blob: de801cebc0be5f3858ed89d0b4cdc555971c7773 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
5#include <vector>
6
7#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07008#include "object.h"
9#include "space.h"
Carl Shapirofc322c72011-07-27 00:20:01 -070010#include "scoped_ptr.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070011#include "stl_util.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070012
13namespace art {
14
Carl Shapiro58551df2011-07-24 03:09:51 -070015std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070017Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
19size_t Heap::maximum_size_ = 0;
20
Carl Shapiro58551df2011-07-24 03:09:51 -070021size_t Heap::num_bytes_allocated_ = 0;
22
23size_t Heap::num_objects_allocated_ = 0;
24
Carl Shapiro69759ea2011-07-21 18:13:35 -070025bool Heap::is_gc_running_ = false;
26
27HeapBitmap* Heap::mark_bitmap_ = NULL;
28
29HeapBitmap* Heap::live_bitmap_ = NULL;
30
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070031bool Heap::Init(size_t initial_size, size_t maximum_size, const char* boot_image_file_name) {
32 Space* boot_space;
33 byte* requested_base;
34 if (boot_image_file_name == NULL) {
35 boot_space = NULL;
36 requested_base = NULL;
37 } else {
38 boot_space = Space::Create(boot_image_file_name);
39 if (boot_space == NULL) {
40 return false;
41 }
42 spaces_.push_back(boot_space);
43 requested_base = boot_space->GetBase() + RoundUp(boot_space->Size(), kPageSize);
44 }
45
46 Space* space = Space::Create(initial_size, maximum_size, requested_base);
Carl Shapiro58551df2011-07-24 03:09:51 -070047 if (space == NULL) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070048 return false;
49 }
50
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070051 if (boot_space == NULL) {
52 boot_space = space;
53 }
54 byte* base = std::min(boot_space->GetBase(), space->GetBase());
55 byte* limit = std::max(boot_space->GetLimit(), space->GetLimit());
56 DCHECK_LT(base, limit);
57 size_t num_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -070058
59 // Allocate the initial live bitmap.
60 scoped_ptr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
61 if (live_bitmap == NULL) {
62 return false;
63 }
64
65 // Allocate the initial mark bitmap.
66 scoped_ptr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
67 if (mark_bitmap == NULL) {
68 return false;
69 }
70
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070071 alloc_space_ = space;
Carl Shapiro58551df2011-07-24 03:09:51 -070072 spaces_.push_back(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -070073 maximum_size_ = maximum_size;
74 live_bitmap_ = live_bitmap.release();
75 mark_bitmap_ = mark_bitmap.release();
76
77 // TODO: allocate the card table
78
79 return true;
80}
81
82void Heap::Destroy() {
Carl Shapiro58551df2011-07-24 03:09:51 -070083 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070084 if (mark_bitmap_ != NULL) {
85 delete mark_bitmap_;
86 mark_bitmap_ = NULL;
87 }
88 if (live_bitmap_ != NULL) {
89 delete live_bitmap_;
90 }
91 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070092}
93
Carl Shapiro58551df2011-07-24 03:09:51 -070094Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
Brian Carlstrom74eb46a2011-08-02 20:10:14 -070095 DCHECK((klass == NULL && num_bytes == sizeof(Class))
96 || klass->descriptor_ == NULL
97 || (klass->object_size_ == (klass->IsArray() ? 0 : num_bytes)));
Carl Shapiro58551df2011-07-24 03:09:51 -070098 Object* obj = Allocate(num_bytes);
99 if (obj != NULL) {
100 obj->klass_ = klass;
101 }
102 return obj;
103}
104
105void Heap::RecordAllocation(Space* space, const Object* obj) {
106 size_t size = space->AllocationSize(obj);
107 DCHECK_NE(size, 0u);
108 num_bytes_allocated_ += size;
109 num_objects_allocated_ += 1;
110 live_bitmap_->Set(obj);
111}
112
113void Heap::RecordFree(Space* space, const Object* obj) {
114 size_t size = space->AllocationSize(obj);
115 DCHECK_NE(size, 0u);
116 if (size < num_bytes_allocated_) {
117 num_bytes_allocated_ -= size;
118 } else {
119 num_bytes_allocated_ = 0;
120 }
121 live_bitmap_->Clear(obj);
122 if (num_objects_allocated_ > 0) {
123 num_objects_allocated_ -= 1;
124 }
125}
126
Carl Shapiro69759ea2011-07-21 18:13:35 -0700127Object* Heap::Allocate(size_t size) {
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700128 DCHECK(alloc_space_ != NULL);
129 Space* space = alloc_space_;
Carl Shapiro58551df2011-07-24 03:09:51 -0700130 Object* obj = Allocate(space, size);
131 if (obj != NULL) {
132 RecordAllocation(space, obj);
133 }
134 return obj;
135}
136
137Object* Heap::Allocate(Space* space, size_t size) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700138 // Fail impossible allocations. TODO: collect soft references.
139 if (size > maximum_size_) {
140 return NULL;
141 }
142
Carl Shapiro58551df2011-07-24 03:09:51 -0700143 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700144 if (ptr != NULL) {
145 return ptr;
146 }
147
148 // The allocation failed. If the GC is running, block until it
149 // completes and retry.
150 if (is_gc_running_) {
151 // The GC is concurrently tracing the heap. Release the heap
152 // lock, wait for the GC to complete, and retrying allocating.
153 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700154 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700155 if (ptr != NULL) {
156 return ptr;
157 }
158 }
159
160 // Another failure. Our thread was starved or there may be too many
161 // live objects. Try a foreground GC. This will have no effect if
162 // the concurrent GC is already running.
Carl Shapiro58551df2011-07-24 03:09:51 -0700163 CollectGarbageInternal();
164 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700165 if (ptr != NULL) {
166 return ptr;
167 }
168
169 // Even that didn't work; this is an exceptional state.
170 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700171 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700172 if (ptr != NULL) {
173 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Carl Shapiro58551df2011-07-24 03:09:51 -0700174 size_t new_footprint = space->MaxAllowedFootprint();
175 // TODO: may want to grow a little bit more so that the amount of
176 // free space is equal to the old free space + the
177 // utilization slop for the new allocation.
178 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Carl Shapiro69759ea2011-07-21 18:13:35 -0700179 << "for " << size << "-byte allocation";
180 return ptr;
181 }
182
183 // Most allocations should have succeeded by now, so the heap is
184 // really full, really fragmented, or the requested size is really
185 // big. Do another GC, collecting SoftReferences this time. The VM
186 // spec requires that all SoftReferences have been collected and
187 // cleared before throwing an OOME.
188
Carl Shapiro58551df2011-07-24 03:09:51 -0700189 // TODO: wait for the finalizers from the previous GC to finish
Carl Shapiro69759ea2011-07-21 18:13:35 -0700190 LOG(INFO) << "Forcing collection of SoftReferences for "
191 << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700192 CollectGarbageInternal();
193 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700194 if (ptr != NULL) {
195 return ptr;
196 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700197
Carl Shapiro69759ea2011-07-21 18:13:35 -0700198 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
199
Carl Shapiro58551df2011-07-24 03:09:51 -0700200 // TODO: tell the HeapSource to dump its state
201 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700202
Carl Shapiro69759ea2011-07-21 18:13:35 -0700203 return NULL;
204}
205
Carl Shapiro69759ea2011-07-21 18:13:35 -0700206void Heap::CollectGarbage() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700207 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700208}
209
210void Heap::CollectGarbageInternal() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700211 // TODO: check that heap lock is held
212
213 // TODO: Suspend all threads
214 {
215 MarkSweep mark_sweep;
216
217 mark_sweep.Init();
218
219 mark_sweep.MarkRoots();
220
221 // Push marked roots onto the mark stack
222
223 // TODO: if concurrent
224 // unlock heap
225 // resume threads
226
227 mark_sweep.RecursiveMark();
228
229 // TODO: if concurrent
230 // lock heap
231 // suspend threads
232 // re-mark root set
233 // scan dirty objects
234
235 mark_sweep.ProcessReferences(false);
236
237 // TODO: swap bitmaps
238
239 mark_sweep.Sweep();
240 }
241
242 GrowForUtilization();
243
244 // TODO: Resume all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700245}
246
247void Heap::WaitForConcurrentGcToComplete() {
248}
249
250// Given the current contents of the active heap, increase the allowed
251// heap footprint to match the target utilization ratio. This should
252// only be called immediately after a full garbage collection.
253void Heap::GrowForUtilization() {
Elliott Hughes53b61312011-08-12 18:28:20 -0700254 UNIMPLEMENTED(ERROR);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700255}
256
257} // namespace art