blob: 851433bacbb64f35f759930e0f605446370374c4 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
2// Author: cshapiro@google.com (Carl Shapiro)
3
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07004#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07005
6#include <vector>
7
8#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07009#include "object.h"
10#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070011#include "stl_util.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070012
13namespace art {
14
Carl Shapiro58551df2011-07-24 03:09:51 -070015std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
17size_t Heap::startup_size_ = 0;
18
19size_t Heap::maximum_size_ = 0;
20
Carl Shapiro58551df2011-07-24 03:09:51 -070021size_t Heap::num_bytes_allocated_ = 0;
22
23size_t Heap::num_objects_allocated_ = 0;
24
Carl Shapiro69759ea2011-07-21 18:13:35 -070025bool Heap::is_gc_running_ = false;
26
27HeapBitmap* Heap::mark_bitmap_ = NULL;
28
29HeapBitmap* Heap::live_bitmap_ = NULL;
30
31bool Heap::Init(size_t startup_size, size_t maximum_size) {
Carl Shapiro58551df2011-07-24 03:09:51 -070032 Space* space = Space::Create(startup_size, maximum_size);
33 if (space == NULL) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070034 return false;
35 }
36
Carl Shapiro58551df2011-07-24 03:09:51 -070037 byte* base = space->GetBase();
38 size_t num_bytes = space->Size();
Carl Shapiro69759ea2011-07-21 18:13:35 -070039
40 // Allocate the initial live bitmap.
41 scoped_ptr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
42 if (live_bitmap == NULL) {
43 return false;
44 }
45
46 // Allocate the initial mark bitmap.
47 scoped_ptr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
48 if (mark_bitmap == NULL) {
49 return false;
50 }
51
Carl Shapiro58551df2011-07-24 03:09:51 -070052 spaces_.push_back(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -070053 startup_size_ = startup_size;
54 maximum_size_ = maximum_size;
55 live_bitmap_ = live_bitmap.release();
56 mark_bitmap_ = mark_bitmap.release();
57
58 // TODO: allocate the card table
59
60 return true;
61}
62
63void Heap::Destroy() {
Carl Shapiro58551df2011-07-24 03:09:51 -070064 STLDeleteElements(&spaces_);
Carl Shapiro69759ea2011-07-21 18:13:35 -070065 delete mark_bitmap_;
66 delete live_bitmap_;
67}
68
Carl Shapiro58551df2011-07-24 03:09:51 -070069Object* Heap::AllocObject(Class* klass) {
70 return AllocObject(klass, klass->object_size_);
71}
72
73Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
74 Object* obj = Allocate(num_bytes);
75 if (obj != NULL) {
76 obj->klass_ = klass;
77 }
78 return obj;
79}
80
81void Heap::RecordAllocation(Space* space, const Object* obj) {
82 size_t size = space->AllocationSize(obj);
83 DCHECK_NE(size, 0u);
84 num_bytes_allocated_ += size;
85 num_objects_allocated_ += 1;
86 live_bitmap_->Set(obj);
87}
88
89void Heap::RecordFree(Space* space, const Object* obj) {
90 size_t size = space->AllocationSize(obj);
91 DCHECK_NE(size, 0u);
92 if (size < num_bytes_allocated_) {
93 num_bytes_allocated_ -= size;
94 } else {
95 num_bytes_allocated_ = 0;
96 }
97 live_bitmap_->Clear(obj);
98 if (num_objects_allocated_ > 0) {
99 num_objects_allocated_ -= 1;
100 }
101}
102
Carl Shapiro69759ea2011-07-21 18:13:35 -0700103Object* Heap::Allocate(size_t size) {
Carl Shapiro58551df2011-07-24 03:09:51 -0700104 CHECK_EQ(spaces_.size(), 1u);
105 Space* space = spaces_[0];
106 Object* obj = Allocate(space, size);
107 if (obj != NULL) {
108 RecordAllocation(space, obj);
109 }
110 return obj;
111}
112
113Object* Heap::Allocate(Space* space, size_t size) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700114 // Fail impossible allocations. TODO: collect soft references.
115 if (size > maximum_size_) {
116 return NULL;
117 }
118
Carl Shapiro58551df2011-07-24 03:09:51 -0700119 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700120 if (ptr != NULL) {
121 return ptr;
122 }
123
124 // The allocation failed. If the GC is running, block until it
125 // completes and retry.
126 if (is_gc_running_) {
127 // The GC is concurrently tracing the heap. Release the heap
128 // lock, wait for the GC to complete, and retrying allocating.
129 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700130 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700131 if (ptr != NULL) {
132 return ptr;
133 }
134 }
135
136 // Another failure. Our thread was starved or there may be too many
137 // live objects. Try a foreground GC. This will have no effect if
138 // the concurrent GC is already running.
Carl Shapiro58551df2011-07-24 03:09:51 -0700139 CollectGarbageInternal();
140 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700141 if (ptr != NULL) {
142 return ptr;
143 }
144
145 // Even that didn't work; this is an exceptional state.
146 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700147 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700148 if (ptr != NULL) {
149 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Carl Shapiro58551df2011-07-24 03:09:51 -0700150 size_t new_footprint = space->MaxAllowedFootprint();
151 // TODO: may want to grow a little bit more so that the amount of
152 // free space is equal to the old free space + the
153 // utilization slop for the new allocation.
154 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Carl Shapiro69759ea2011-07-21 18:13:35 -0700155 << "for " << size << "-byte allocation";
156 return ptr;
157 }
158
159 // Most allocations should have succeeded by now, so the heap is
160 // really full, really fragmented, or the requested size is really
161 // big. Do another GC, collecting SoftReferences this time. The VM
162 // spec requires that all SoftReferences have been collected and
163 // cleared before throwing an OOME.
164
Carl Shapiro58551df2011-07-24 03:09:51 -0700165 // TODO: wait for the finalizers from the previous GC to finish
Carl Shapiro69759ea2011-07-21 18:13:35 -0700166 LOG(INFO) << "Forcing collection of SoftReferences for "
167 << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700168 CollectGarbageInternal();
169 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700170 if (ptr != NULL) {
171 return ptr;
172 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700173
Carl Shapiro69759ea2011-07-21 18:13:35 -0700174 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
175
Carl Shapiro58551df2011-07-24 03:09:51 -0700176 // TODO: tell the HeapSource to dump its state
177 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700178
Carl Shapiro69759ea2011-07-21 18:13:35 -0700179 return NULL;
180}
181
182String* Heap::AllocStringFromModifiedUtf8(Class* java_lang_String,
183 Class* char_array,
184 const char* data) {
185 String* string = AllocString(java_lang_String);
186 uint32_t count = strlen(data); // TODO
187 CharArray* array = AllocCharArray(char_array, count);
188 string->array_ = array;
189 string->count_ = count;
190 return string;
191}
192
193void Heap::CollectGarbage() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700194 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700195}
196
197void Heap::CollectGarbageInternal() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700198 // TODO: check that heap lock is held
199
200 // TODO: Suspend all threads
201 {
202 MarkSweep mark_sweep;
203
204 mark_sweep.Init();
205
206 mark_sweep.MarkRoots();
207
208 // Push marked roots onto the mark stack
209
210 // TODO: if concurrent
211 // unlock heap
212 // resume threads
213
214 mark_sweep.RecursiveMark();
215
216 // TODO: if concurrent
217 // lock heap
218 // suspend threads
219 // re-mark root set
220 // scan dirty objects
221
222 mark_sweep.ProcessReferences(false);
223
224 // TODO: swap bitmaps
225
226 mark_sweep.Sweep();
227 }
228
229 GrowForUtilization();
230
231 // TODO: Resume all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700232}
233
234void Heap::WaitForConcurrentGcToComplete() {
235}
236
237// Given the current contents of the active heap, increase the allowed
238// heap footprint to match the target utilization ratio. This should
239// only be called immediately after a full garbage collection.
240void Heap::GrowForUtilization() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700241 LOG(ERROR) << "Unimplemented";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700242}
243
244} // namespace art