blob: e69bc34f26b9d150d484f98bda0665958aeb1ce4 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
Brian Carlstrom58ae9412011-10-04 00:56:06 -07005#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -07006#include <vector>
7
Elliott Hughes90a33692011-08-30 13:27:07 -07008#include "UniquePtr.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -07009#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070010#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070011#include "object.h"
12#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070013#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070014#include "thread_list.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070015
16namespace art {
17
Carl Shapiro58551df2011-07-24 03:09:51 -070018std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070019
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070020Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070021
22size_t Heap::maximum_size_ = 0;
23
Carl Shapiro58551df2011-07-24 03:09:51 -070024size_t Heap::num_bytes_allocated_ = 0;
25
26size_t Heap::num_objects_allocated_ = 0;
27
Carl Shapiro69759ea2011-07-21 18:13:35 -070028bool Heap::is_gc_running_ = false;
29
30HeapBitmap* Heap::mark_bitmap_ = NULL;
31
32HeapBitmap* Heap::live_bitmap_ = NULL;
33
Elliott Hughesadb460d2011-10-05 17:02:34 -070034Class* Heap::java_lang_ref_FinalizerReference_ = NULL;
35Class* Heap::java_lang_ref_ReferenceQueue_ = NULL;
36
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070037MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
38MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
39MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
40MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
41MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
Brian Carlstrom1f870082011-08-23 16:02:11 -070042
Brian Carlstrom395520e2011-09-25 19:35:00 -070043float Heap::target_utilization_ = 0.5;
44
Elliott Hughes92b3b562011-09-08 16:32:26 -070045Mutex* Heap::lock_ = NULL;
46
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070047bool Heap::verify_objects_ = false;
48
Elliott Hughes92b3b562011-09-08 16:32:26 -070049class ScopedHeapLock {
50 public:
51 ScopedHeapLock() {
52 Heap::Lock();
53 }
54
55 ~ScopedHeapLock() {
56 Heap::Unlock();
57 }
58};
59
Elliott Hughesbe759c62011-09-08 19:38:21 -070060void Heap::Init(size_t initial_size, size_t maximum_size,
Brian Carlstrom58ae9412011-10-04 00:56:06 -070061 const std::vector<std::string>& image_file_names) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070062 const Runtime* runtime = Runtime::Current();
63 if (runtime->IsVerboseStartup()) {
64 LOG(INFO) << "Heap::Init entering";
65 }
66
Brian Carlstrom58ae9412011-10-04 00:56:06 -070067 // bounds of all spaces for allocating live and mark bitmaps
68 // there will be at least one space (the alloc space),
69 // so set to base to max and limit to min to start
70 byte* base = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::max());
71 byte* limit = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::min());
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070072
Brian Carlstrom58ae9412011-10-04 00:56:06 -070073 byte* requested_base = NULL;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070074 std::vector<Space*> image_spaces;
75 for (size_t i = 0; i < image_file_names.size(); i++) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070076 Space* space = Space::CreateFromImage(image_file_names[i]);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070077 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070078 LOG(FATAL) << "Failed to create space from " << image_file_names[i];
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070079 }
80 image_spaces.push_back(space);
81 spaces_.push_back(space);
Brian Carlstrome24fa612011-09-29 00:53:55 -070082 byte* oat_limit_addr = space->GetImageHeader().GetOatLimitAddr();
Brian Carlstrom58ae9412011-10-04 00:56:06 -070083 if (oat_limit_addr > requested_base) {
84 requested_base = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_limit_addr),
85 kPageSize));
86 }
87 base = std::min(base, space->GetBase());
88 limit = std::max(limit, space->GetLimit());
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070089 }
90
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070091 Space* space = Space::Create(initial_size, maximum_size, requested_base);
Carl Shapiro58551df2011-07-24 03:09:51 -070092 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070093 LOG(FATAL) << "Failed to create alloc space";
Carl Shapiro69759ea2011-07-21 18:13:35 -070094 }
Brian Carlstrom58ae9412011-10-04 00:56:06 -070095 base = std::min(base, space->GetBase());
96 limit = std::max(limit, space->GetLimit());
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070097 DCHECK_LT(base, limit);
98 size_t num_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -070099
100 // Allocate the initial live bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700101 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
102 if (live_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700103 LOG(FATAL) << "Failed to create live bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700104 }
105
106 // Allocate the initial mark bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700107 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
108 if (mark_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700109 LOG(FATAL) << "Failed to create mark bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700110 }
111
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700112 alloc_space_ = space;
Carl Shapiro58551df2011-07-24 03:09:51 -0700113 spaces_.push_back(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700114 maximum_size_ = maximum_size;
115 live_bitmap_ = live_bitmap.release();
116 mark_bitmap_ = mark_bitmap.release();
117
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700118 num_bytes_allocated_ = 0;
119 num_objects_allocated_ = 0;
120
Carl Shapiro69759ea2011-07-21 18:13:35 -0700121 // TODO: allocate the card table
122
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700123 // Make image objects live (after live_bitmap_ is set)
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700124 for (size_t i = 0; i < image_spaces.size(); i++) {
125 RecordImageAllocations(image_spaces[i]);
126 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700127
Elliott Hughes85d15452011-09-16 17:33:01 -0700128 Heap::EnableObjectValidation();
129
Elliott Hughes92b3b562011-09-08 16:32:26 -0700130 // It's still to early to take a lock because there are no threads yet,
131 // but we can create the heap lock now. We don't create it earlier to
132 // make it clear that you can't use locks during heap initialization.
Elliott Hughes8daa0922011-09-11 13:46:25 -0700133 lock_ = new Mutex("Heap lock");
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700134
135 if (runtime->IsVerboseStartup()) {
136 LOG(INFO) << "Heap::Init exiting";
137 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700138}
139
140void Heap::Destroy() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700141 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700142 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700143 if (mark_bitmap_ != NULL) {
144 delete mark_bitmap_;
145 mark_bitmap_ = NULL;
146 }
147 if (live_bitmap_ != NULL) {
148 delete live_bitmap_;
149 }
150 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700151}
152
Carl Shapiro58551df2011-07-24 03:09:51 -0700153Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700154 ScopedHeapLock lock;
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700155 DCHECK(klass == NULL
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700156 || klass->GetDescriptor() == NULL
Brian Carlstrom4873d462011-08-21 15:23:39 -0700157 || (klass->IsClassClass() && num_bytes >= sizeof(Class))
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700158 || (klass->IsVariableSize() || klass->GetObjectSize() == num_bytes));
159 DCHECK(num_bytes >= sizeof(Object));
Elliott Hughes92b3b562011-09-08 16:32:26 -0700160 Object* obj = AllocateLocked(num_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -0700161 if (obj != NULL) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700162 obj->SetClass(klass);
Carl Shapiro58551df2011-07-24 03:09:51 -0700163 }
164 return obj;
165}
166
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700167bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700168 // Note: we deliberately don't take the lock here, and mustn't test anything that would
169 // require taking the lock.
Elliott Hughesa2501992011-08-26 19:39:54 -0700170 if (!IsAligned(obj, kObjectAlignment)) {
171 return false;
172 }
173 // TODO
174 return true;
175}
176
Elliott Hughes3e465b12011-09-02 18:26:12 -0700177#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700178void Heap::VerifyObject(const Object* obj) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700179 if (!verify_objects_) {
180 return;
181 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700182 ScopedHeapLock lock;
183 Heap::VerifyObjectLocked(obj);
184}
185#endif
186
187void Heap::VerifyObjectLocked(const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700188 lock_->AssertHeld();
Elliott Hughes85d15452011-09-16 17:33:01 -0700189 if (obj != NULL) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700190 if (!IsAligned(obj, kObjectAlignment)) {
191 LOG(FATAL) << "Object isn't aligned: " << obj;
192 } else if (!live_bitmap_->Test(obj)) {
193 // TODO: we don't hold a lock here as it is assumed the live bit map
194 // isn't changing if the mutator is running.
195 LOG(FATAL) << "Object is dead: " << obj;
196 }
197 // Ignore early dawn of the universe verifications
Brian Carlstromdbc05252011-09-09 01:59:59 -0700198 if (num_objects_allocated_ > 10) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700199 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
200 Object::ClassOffset().Int32Value();
201 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
202 if (c == NULL) {
203 LOG(FATAL) << "Null class" << " in object: " << obj;
204 } else if (!IsAligned(c, kObjectAlignment)) {
205 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
206 } else if (!live_bitmap_->Test(c)) {
207 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
208 }
209 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
210 // NB we don't use the accessors here as they have internal sanity checks
211 // that we don't want to run
212 raw_addr = reinterpret_cast<const byte*>(c) +
213 Object::ClassOffset().Int32Value();
214 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
215 raw_addr = reinterpret_cast<const byte*>(c_c) +
216 Object::ClassOffset().Int32Value();
217 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
218 CHECK_EQ(c_c, c_c_c);
219 }
220 }
221}
222
Brian Carlstrom78128a62011-09-15 17:21:19 -0700223void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700224 DCHECK(obj != NULL);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700225 Heap::VerifyObjectLocked(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700226}
227
228void Heap::VerifyHeap() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700229 ScopedHeapLock lock;
230 live_bitmap_->Walk(Heap::VerificationCallback, NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700231}
232
Elliott Hughes92b3b562011-09-08 16:32:26 -0700233void Heap::RecordAllocationLocked(Space* space, const Object* obj) {
234#ifndef NDEBUG
235 if (Runtime::Current()->IsStarted()) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700236 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700237 }
238#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700239 size_t size = space->AllocationSize(obj);
240 DCHECK_NE(size, 0u);
241 num_bytes_allocated_ += size;
242 num_objects_allocated_ += 1;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700243
244 if (Runtime::Current()->HasStatsEnabled()) {
245 RuntimeStats* global_stats = Runtime::Current()->GetStats();
246 RuntimeStats* thread_stats = Thread::Current()->GetStats();
247 ++global_stats->allocated_objects;
248 ++thread_stats->allocated_objects;
249 global_stats->allocated_bytes += size;
250 thread_stats->allocated_bytes += size;
251 }
252
Carl Shapiro58551df2011-07-24 03:09:51 -0700253 live_bitmap_->Set(obj);
254}
255
Elliott Hughes92b3b562011-09-08 16:32:26 -0700256void Heap::RecordFreeLocked(Space* space, const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700257 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700258 size_t size = space->AllocationSize(obj);
259 DCHECK_NE(size, 0u);
260 if (size < num_bytes_allocated_) {
261 num_bytes_allocated_ -= size;
262 } else {
263 num_bytes_allocated_ = 0;
264 }
265 live_bitmap_->Clear(obj);
266 if (num_objects_allocated_ > 0) {
267 num_objects_allocated_ -= 1;
268 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700269
270 if (Runtime::Current()->HasStatsEnabled()) {
271 RuntimeStats* global_stats = Runtime::Current()->GetStats();
272 RuntimeStats* thread_stats = Thread::Current()->GetStats();
273 ++global_stats->freed_objects;
274 ++thread_stats->freed_objects;
275 global_stats->freed_bytes += size;
276 thread_stats->freed_bytes += size;
277 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700278}
279
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700280void Heap::RecordImageAllocations(Space* space) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700281 const Runtime* runtime = Runtime::Current();
282 if (runtime->IsVerboseStartup()) {
283 LOG(INFO) << "Heap::RecordImageAllocations entering";
284 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700285 DCHECK(!Runtime::Current()->IsStarted());
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700286 CHECK(space != NULL);
287 CHECK(live_bitmap_ != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700288 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700289 while (current < space->GetLimit()) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700290 DCHECK(IsAligned(current, kObjectAlignment));
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700291 const Object* obj = reinterpret_cast<const Object*>(current);
292 live_bitmap_->Set(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700293 current += RoundUp(obj->SizeOf(), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700294 }
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700295 if (runtime->IsVerboseStartup()) {
296 LOG(INFO) << "Heap::RecordImageAllocations exiting";
297 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700298}
299
Elliott Hughes92b3b562011-09-08 16:32:26 -0700300Object* Heap::AllocateLocked(size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700301 lock_->AssertHeld();
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700302 DCHECK(alloc_space_ != NULL);
303 Space* space = alloc_space_;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700304 Object* obj = AllocateLocked(space, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700305 if (obj != NULL) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700306 RecordAllocationLocked(space, obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700307 }
308 return obj;
309}
310
Elliott Hughes92b3b562011-09-08 16:32:26 -0700311Object* Heap::AllocateLocked(Space* space, size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700312 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700313
Carl Shapiro69759ea2011-07-21 18:13:35 -0700314 // Fail impossible allocations. TODO: collect soft references.
315 if (size > maximum_size_) {
316 return NULL;
317 }
318
Carl Shapiro58551df2011-07-24 03:09:51 -0700319 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700320 if (ptr != NULL) {
321 return ptr;
322 }
323
324 // The allocation failed. If the GC is running, block until it
325 // completes and retry.
326 if (is_gc_running_) {
327 // The GC is concurrently tracing the heap. Release the heap
328 // lock, wait for the GC to complete, and retrying allocating.
329 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700330 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700331 if (ptr != NULL) {
332 return ptr;
333 }
334 }
335
336 // Another failure. Our thread was starved or there may be too many
337 // live objects. Try a foreground GC. This will have no effect if
338 // the concurrent GC is already running.
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700339 if (Runtime::Current()->HasStatsEnabled()) {
340 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
341 ++Thread::Current()->GetStats()->gc_for_alloc_count;
342 }
Ian Rogersd6b1f612011-09-27 13:38:14 -0700343 LOG(INFO) << "GC_FOR_ALLOC: TODO: test";
Carl Shapiro58551df2011-07-24 03:09:51 -0700344 CollectGarbageInternal();
345 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700346 if (ptr != NULL) {
347 return ptr;
348 }
Ian Rogersd6b1f612011-09-27 13:38:14 -0700349 UNIMPLEMENTED(FATAL) << "No AllocWithGrowth, use larger -Xms -Xmx";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700350
351 // Even that didn't work; this is an exceptional state.
352 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700353 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700354 if (ptr != NULL) {
355 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Carl Shapiro58551df2011-07-24 03:09:51 -0700356 size_t new_footprint = space->MaxAllowedFootprint();
357 // TODO: may want to grow a little bit more so that the amount of
358 // free space is equal to the old free space + the
359 // utilization slop for the new allocation.
360 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Carl Shapiro69759ea2011-07-21 18:13:35 -0700361 << "for " << size << "-byte allocation";
362 return ptr;
363 }
364
365 // Most allocations should have succeeded by now, so the heap is
366 // really full, really fragmented, or the requested size is really
367 // big. Do another GC, collecting SoftReferences this time. The VM
368 // spec requires that all SoftReferences have been collected and
369 // cleared before throwing an OOME.
370
Carl Shapiro58551df2011-07-24 03:09:51 -0700371 // TODO: wait for the finalizers from the previous GC to finish
Carl Shapiro69759ea2011-07-21 18:13:35 -0700372 LOG(INFO) << "Forcing collection of SoftReferences for "
373 << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700374 CollectGarbageInternal();
375 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700376 if (ptr != NULL) {
377 return ptr;
378 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700379
Carl Shapiro69759ea2011-07-21 18:13:35 -0700380 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
381
Carl Shapiro58551df2011-07-24 03:09:51 -0700382 // TODO: tell the HeapSource to dump its state
383 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700384
Carl Shapiro69759ea2011-07-21 18:13:35 -0700385 return NULL;
386}
387
Elliott Hughesbf86d042011-08-31 17:53:14 -0700388int64_t Heap::GetMaxMemory() {
389 UNIMPLEMENTED(WARNING);
390 return 0;
391}
392
393int64_t Heap::GetTotalMemory() {
394 UNIMPLEMENTED(WARNING);
395 return 0;
396}
397
398int64_t Heap::GetFreeMemory() {
399 UNIMPLEMENTED(WARNING);
400 return 0;
401}
402
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700403class InstanceCounter {
404 public:
405 InstanceCounter(Class* c, bool count_assignable)
406 : class_(c), count_assignable_(count_assignable), count_(0) {
407 }
408
409 size_t GetCount() {
410 return count_;
411 }
412
413 static void Callback(Object* o, void* arg) {
414 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
415 }
416
417 private:
418 void VisitInstance(Object* o) {
419 Class* instance_class = o->GetClass();
420 if (count_assignable_) {
421 if (instance_class == class_) {
422 ++count_;
423 }
424 } else {
425 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
426 ++count_;
427 }
428 }
429 }
430
431 Class* class_;
432 bool count_assignable_;
433 size_t count_;
434};
435
436int64_t Heap::CountInstances(Class* c, bool count_assignable) {
437 ScopedHeapLock lock;
438 InstanceCounter counter(c, count_assignable);
439 live_bitmap_->Walk(InstanceCounter::Callback, &counter);
440 return counter.GetCount();
441}
442
Carl Shapiro69759ea2011-07-21 18:13:35 -0700443void Heap::CollectGarbage() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700444 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700445 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700446}
447
448void Heap::CollectGarbageInternal() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700449 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700450
Elliott Hughes8d768a92011-09-14 16:35:25 -0700451 ThreadList* thread_list = Runtime::Current()->GetThreadList();
452 thread_list->SuspendAll();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700453 Object* cleared_references = NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700454 {
455 MarkSweep mark_sweep;
456
457 mark_sweep.Init();
458
459 mark_sweep.MarkRoots();
460
461 // Push marked roots onto the mark stack
462
463 // TODO: if concurrent
464 // unlock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700465 // thread_list->ResumeAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700466
467 mark_sweep.RecursiveMark();
468
469 // TODO: if concurrent
470 // lock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700471 // thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700472 // re-mark root set
473 // scan dirty objects
474
475 mark_sweep.ProcessReferences(false);
476
477 // TODO: swap bitmaps
478
479 mark_sweep.Sweep();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700480
481 cleared_references = mark_sweep.GetClearedReferences();
Carl Shapiro58551df2011-07-24 03:09:51 -0700482 }
483
484 GrowForUtilization();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700485 thread_list->ResumeAll();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700486
487 EnqueueClearedReferences(&cleared_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700488}
489
490void Heap::WaitForConcurrentGcToComplete() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700491 lock_->AssertHeld();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700492}
493
494// Given the current contents of the active heap, increase the allowed
495// heap footprint to match the target utilization ratio. This should
496// only be called immediately after a full garbage collection.
497void Heap::GrowForUtilization() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700498 lock_->AssertHeld();
Elliott Hughes53b61312011-08-12 18:28:20 -0700499 UNIMPLEMENTED(ERROR);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700500}
501
Elliott Hughes92b3b562011-09-08 16:32:26 -0700502void Heap::Lock() {
Elliott Hughes93e74e82011-09-13 11:07:03 -0700503 // TODO: grab the lock, but put ourselves into Thread::kVmWait if it looks like
Elliott Hughes92b3b562011-09-08 16:32:26 -0700504 // we're going to have to wait on the mutex.
505 lock_->Lock();
506}
507
508void Heap::Unlock() {
509 lock_->Unlock();
510}
511
Elliott Hughesadb460d2011-10-05 17:02:34 -0700512void Heap::SetWellKnownClasses(Class* java_lang_ref_FinalizerReference,
513 Class* java_lang_ref_ReferenceQueue) {
514 java_lang_ref_FinalizerReference_ = java_lang_ref_FinalizerReference;
515 java_lang_ref_ReferenceQueue_ = java_lang_ref_ReferenceQueue;
516 CHECK(java_lang_ref_FinalizerReference_ != NULL);
517 CHECK(java_lang_ref_ReferenceQueue_ != NULL);
518}
519
520void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
521 MemberOffset reference_queue_offset,
522 MemberOffset reference_queueNext_offset,
523 MemberOffset reference_pendingNext_offset,
524 MemberOffset finalizer_reference_zombie_offset) {
525 reference_referent_offset_ = reference_referent_offset;
526 reference_queue_offset_ = reference_queue_offset;
527 reference_queueNext_offset_ = reference_queueNext_offset;
528 reference_pendingNext_offset_ = reference_pendingNext_offset;
529 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
530 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
531 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
532 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
533 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
534 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
535}
536
537Object* Heap::GetReferenceReferent(Object* reference) {
538 DCHECK(reference != NULL);
539 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
540 return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
541}
542
543void Heap::ClearReferenceReferent(Object* reference) {
544 DCHECK(reference != NULL);
545 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
546 reference->SetFieldObject(reference_referent_offset_, NULL, true);
547}
548
549// Returns true if the reference object has not yet been enqueued.
550bool Heap::IsEnqueuable(const Object* ref) {
551 DCHECK(ref != NULL);
552 const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
553 const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
554 return (queue != NULL) && (queue_next == NULL);
555}
556
557void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
558 DCHECK(ref != NULL);
559 CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
560 CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
561 EnqueuePendingReference(ref, cleared_reference_list);
562}
563
564void Heap::EnqueuePendingReference(Object* ref, Object** list) {
565 DCHECK(ref != NULL);
566 DCHECK(list != NULL);
567
568 if (*list == NULL) {
569 ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
570 *list = ref;
571 } else {
572 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
573 ref->SetFieldObject(reference_pendingNext_offset_, head, false);
574 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
575 }
576}
577
578Object* Heap::DequeuePendingReference(Object** list) {
579 DCHECK(list != NULL);
580 DCHECK(*list != NULL);
581 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
582 Object* ref;
583 if (*list == head) {
584 ref = *list;
585 *list = NULL;
586 } else {
587 Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
588 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
589 ref = head;
590 }
591 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
592 return ref;
593}
594
595void Heap::AddFinalizerReference(Object* object) {
596 static Method* FinalizerReference_add =
597 java_lang_ref_FinalizerReference_->FindDirectMethod("add", "(Ljava/lang/Object;)V");
598 DCHECK(FinalizerReference_add != NULL);
599 Object* args[] = { object };
600 FinalizerReference_add->Invoke(Thread::Current(), NULL, reinterpret_cast<byte*>(&args), NULL);
601}
602
603void Heap::EnqueueClearedReferences(Object** cleared) {
604 DCHECK(cleared != NULL);
605 if (*cleared != NULL) {
606 static Method* ReferenceQueue_add =
607 java_lang_ref_ReferenceQueue_->FindDirectMethod("add", "(Ljava/lang/ref/Reference;)V");
608 DCHECK(ReferenceQueue_add != NULL);
609
610 Thread* self = Thread::Current();
611 ScopedThreadStateChange tsc(self, Thread::kRunnable);
612 Object* args[] = { *cleared };
613 ReferenceQueue_add->Invoke(self, NULL, reinterpret_cast<byte*>(&args), NULL);
614 *cleared = NULL;
615 }
616}
617
Carl Shapiro69759ea2011-07-21 18:13:35 -0700618} // namespace art