blob: be3d154408700196c468ab5d60a17a7582651c07 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
5#include <vector>
6
Elliott Hughes90a33692011-08-30 13:27:07 -07007#include "UniquePtr.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -07008#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07009#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070010#include "object.h"
11#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070012#include "stl_util.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070013
14namespace art {
15
Carl Shapiro58551df2011-07-24 03:09:51 -070016std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070017
Brian Carlstroma663ea52011-08-19 23:33:41 -070018Space* Heap::boot_space_ = NULL;
19
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070020Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070021
22size_t Heap::maximum_size_ = 0;
23
Carl Shapiro58551df2011-07-24 03:09:51 -070024size_t Heap::num_bytes_allocated_ = 0;
25
26size_t Heap::num_objects_allocated_ = 0;
27
Carl Shapiro69759ea2011-07-21 18:13:35 -070028bool Heap::is_gc_running_ = false;
29
30HeapBitmap* Heap::mark_bitmap_ = NULL;
31
32HeapBitmap* Heap::live_bitmap_ = NULL;
33
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070034MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
35MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
36MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
37MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
38MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
Brian Carlstrom1f870082011-08-23 16:02:11 -070039
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070040bool Heap::Init(size_t initial_size, size_t maximum_size, const char* boot_image_file_name) {
41 Space* boot_space;
42 byte* requested_base;
43 if (boot_image_file_name == NULL) {
44 boot_space = NULL;
45 requested_base = NULL;
46 } else {
47 boot_space = Space::Create(boot_image_file_name);
48 if (boot_space == NULL) {
49 return false;
50 }
51 spaces_.push_back(boot_space);
52 requested_base = boot_space->GetBase() + RoundUp(boot_space->Size(), kPageSize);
53 }
54
55 Space* space = Space::Create(initial_size, maximum_size, requested_base);
Carl Shapiro58551df2011-07-24 03:09:51 -070056 if (space == NULL) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070057 return false;
58 }
59
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070060 if (boot_space == NULL) {
61 boot_space = space;
62 }
63 byte* base = std::min(boot_space->GetBase(), space->GetBase());
64 byte* limit = std::max(boot_space->GetLimit(), space->GetLimit());
65 DCHECK_LT(base, limit);
66 size_t num_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -070067
68 // Allocate the initial live bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -070069 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
70 if (live_bitmap.get() == NULL) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070071 return false;
72 }
73
74 // Allocate the initial mark bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -070075 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
76 if (mark_bitmap.get() == NULL) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070077 return false;
78 }
79
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070080 alloc_space_ = space;
Carl Shapiro58551df2011-07-24 03:09:51 -070081 spaces_.push_back(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -070082 maximum_size_ = maximum_size;
83 live_bitmap_ = live_bitmap.release();
84 mark_bitmap_ = mark_bitmap.release();
85
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070086 num_bytes_allocated_ = 0;
87 num_objects_allocated_ = 0;
88
Carl Shapiro69759ea2011-07-21 18:13:35 -070089 // TODO: allocate the card table
90
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070091 // Make objects in boot_space live (after live_bitmap_ is set)
92 if (boot_image_file_name != NULL) {
Brian Carlstroma663ea52011-08-19 23:33:41 -070093 boot_space_ = boot_space;
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070094 RecordImageAllocations(boot_space);
95 }
96
Carl Shapiro69759ea2011-07-21 18:13:35 -070097 return true;
98}
99
100void Heap::Destroy() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700101 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700102 if (mark_bitmap_ != NULL) {
103 delete mark_bitmap_;
104 mark_bitmap_ = NULL;
105 }
106 if (live_bitmap_ != NULL) {
107 delete live_bitmap_;
108 }
109 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700110}
111
Carl Shapiro58551df2011-07-24 03:09:51 -0700112Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700113 DCHECK(klass == NULL
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700114 || klass->GetDescriptor() == NULL
Brian Carlstrom4873d462011-08-21 15:23:39 -0700115 || (klass->IsClassClass() && num_bytes >= sizeof(Class))
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700116 || (klass->IsVariableSize() || klass->GetObjectSize() == num_bytes));
117 DCHECK(num_bytes >= sizeof(Object));
Carl Shapiro58551df2011-07-24 03:09:51 -0700118 Object* obj = Allocate(num_bytes);
119 if (obj != NULL) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700120 obj->SetClass(klass);
Carl Shapiro58551df2011-07-24 03:09:51 -0700121 }
122 return obj;
123}
124
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700125bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700126 if (!IsAligned(obj, kObjectAlignment)) {
127 return false;
128 }
129 // TODO
130 return true;
131}
132
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700133bool Heap::verify_object_disabled_;
134
Elliott Hughes3e465b12011-09-02 18:26:12 -0700135#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700136void Heap::VerifyObject(const Object* obj) {
137 if (obj != NULL && !verify_object_disabled_) {
138 if (!IsAligned(obj, kObjectAlignment)) {
139 LOG(FATAL) << "Object isn't aligned: " << obj;
140 } else if (!live_bitmap_->Test(obj)) {
141 // TODO: we don't hold a lock here as it is assumed the live bit map
142 // isn't changing if the mutator is running.
143 LOG(FATAL) << "Object is dead: " << obj;
144 }
145 // Ignore early dawn of the universe verifications
146 if(num_objects_allocated_ > 10) {
147 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
148 Object::ClassOffset().Int32Value();
149 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
150 if (c == NULL) {
151 LOG(FATAL) << "Null class" << " in object: " << obj;
152 } else if (!IsAligned(c, kObjectAlignment)) {
153 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
154 } else if (!live_bitmap_->Test(c)) {
155 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
156 }
157 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
158 // NB we don't use the accessors here as they have internal sanity checks
159 // that we don't want to run
160 raw_addr = reinterpret_cast<const byte*>(c) +
161 Object::ClassOffset().Int32Value();
162 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
163 raw_addr = reinterpret_cast<const byte*>(c_c) +
164 Object::ClassOffset().Int32Value();
165 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
166 CHECK_EQ(c_c, c_c_c);
167 }
168 }
169}
Elliott Hughes3e465b12011-09-02 18:26:12 -0700170#endif
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700171
172static void HeapVerifyCallback(Object* obj, void *arg) {
173 DCHECK(obj != NULL);
174 Heap::VerifyObject(obj);
175}
176
177void Heap::VerifyHeap() {
178 live_bitmap_->Walk(HeapVerifyCallback, NULL);
179}
180
Carl Shapiro58551df2011-07-24 03:09:51 -0700181void Heap::RecordAllocation(Space* space, const Object* obj) {
182 size_t size = space->AllocationSize(obj);
183 DCHECK_NE(size, 0u);
184 num_bytes_allocated_ += size;
185 num_objects_allocated_ += 1;
186 live_bitmap_->Set(obj);
187}
188
189void Heap::RecordFree(Space* space, const Object* obj) {
190 size_t size = space->AllocationSize(obj);
191 DCHECK_NE(size, 0u);
192 if (size < num_bytes_allocated_) {
193 num_bytes_allocated_ -= size;
194 } else {
195 num_bytes_allocated_ = 0;
196 }
197 live_bitmap_->Clear(obj);
198 if (num_objects_allocated_ > 0) {
199 num_objects_allocated_ -= 1;
200 }
201}
202
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700203void Heap::RecordImageAllocations(Space* space) {
204 CHECK(space != NULL);
205 CHECK(live_bitmap_ != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700206 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700207 while (current < space->GetLimit()) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700208 DCHECK(IsAligned(current, kObjectAlignment));
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700209 const Object* obj = reinterpret_cast<const Object*>(current);
210 live_bitmap_->Set(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700211 current += RoundUp(obj->SizeOf(), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700212 }
213}
214
Carl Shapiro69759ea2011-07-21 18:13:35 -0700215Object* Heap::Allocate(size_t size) {
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700216 DCHECK(alloc_space_ != NULL);
217 Space* space = alloc_space_;
Carl Shapiro58551df2011-07-24 03:09:51 -0700218 Object* obj = Allocate(space, size);
219 if (obj != NULL) {
220 RecordAllocation(space, obj);
221 }
222 return obj;
223}
224
225Object* Heap::Allocate(Space* space, size_t size) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700226 // Fail impossible allocations. TODO: collect soft references.
227 if (size > maximum_size_) {
228 return NULL;
229 }
230
Carl Shapiro58551df2011-07-24 03:09:51 -0700231 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700232 if (ptr != NULL) {
233 return ptr;
234 }
235
236 // The allocation failed. If the GC is running, block until it
237 // completes and retry.
238 if (is_gc_running_) {
239 // The GC is concurrently tracing the heap. Release the heap
240 // lock, wait for the GC to complete, and retrying allocating.
241 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700242 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700243 if (ptr != NULL) {
244 return ptr;
245 }
246 }
247
248 // Another failure. Our thread was starved or there may be too many
249 // live objects. Try a foreground GC. This will have no effect if
250 // the concurrent GC is already running.
Carl Shapiro58551df2011-07-24 03:09:51 -0700251 CollectGarbageInternal();
252 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700253 if (ptr != NULL) {
254 return ptr;
255 }
256
257 // Even that didn't work; this is an exceptional state.
258 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700259 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700260 if (ptr != NULL) {
261 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Carl Shapiro58551df2011-07-24 03:09:51 -0700262 size_t new_footprint = space->MaxAllowedFootprint();
263 // TODO: may want to grow a little bit more so that the amount of
264 // free space is equal to the old free space + the
265 // utilization slop for the new allocation.
266 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Carl Shapiro69759ea2011-07-21 18:13:35 -0700267 << "for " << size << "-byte allocation";
268 return ptr;
269 }
270
271 // Most allocations should have succeeded by now, so the heap is
272 // really full, really fragmented, or the requested size is really
273 // big. Do another GC, collecting SoftReferences this time. The VM
274 // spec requires that all SoftReferences have been collected and
275 // cleared before throwing an OOME.
276
Carl Shapiro58551df2011-07-24 03:09:51 -0700277 // TODO: wait for the finalizers from the previous GC to finish
Carl Shapiro69759ea2011-07-21 18:13:35 -0700278 LOG(INFO) << "Forcing collection of SoftReferences for "
279 << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700280 CollectGarbageInternal();
281 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700282 if (ptr != NULL) {
283 return ptr;
284 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700285
Carl Shapiro69759ea2011-07-21 18:13:35 -0700286 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
287
Carl Shapiro58551df2011-07-24 03:09:51 -0700288 // TODO: tell the HeapSource to dump its state
289 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700290
Carl Shapiro69759ea2011-07-21 18:13:35 -0700291 return NULL;
292}
293
Elliott Hughesbf86d042011-08-31 17:53:14 -0700294int64_t Heap::GetMaxMemory() {
295 UNIMPLEMENTED(WARNING);
296 return 0;
297}
298
299int64_t Heap::GetTotalMemory() {
300 UNIMPLEMENTED(WARNING);
301 return 0;
302}
303
304int64_t Heap::GetFreeMemory() {
305 UNIMPLEMENTED(WARNING);
306 return 0;
307}
308
Carl Shapiro69759ea2011-07-21 18:13:35 -0700309void Heap::CollectGarbage() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700310 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700311}
312
313void Heap::CollectGarbageInternal() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700314 // TODO: check that heap lock is held
315
316 // TODO: Suspend all threads
317 {
318 MarkSweep mark_sweep;
319
320 mark_sweep.Init();
321
322 mark_sweep.MarkRoots();
323
324 // Push marked roots onto the mark stack
325
326 // TODO: if concurrent
327 // unlock heap
328 // resume threads
329
330 mark_sweep.RecursiveMark();
331
332 // TODO: if concurrent
333 // lock heap
334 // suspend threads
335 // re-mark root set
336 // scan dirty objects
337
338 mark_sweep.ProcessReferences(false);
339
340 // TODO: swap bitmaps
341
342 mark_sweep.Sweep();
343 }
344
345 GrowForUtilization();
346
347 // TODO: Resume all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700348}
349
350void Heap::WaitForConcurrentGcToComplete() {
351}
352
353// Given the current contents of the active heap, increase the allowed
354// heap footprint to match the target utilization ratio. This should
355// only be called immediately after a full garbage collection.
356void Heap::GrowForUtilization() {
Elliott Hughes53b61312011-08-12 18:28:20 -0700357 UNIMPLEMENTED(ERROR);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700358}
359
360} // namespace art