Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1 | // Copyright 2011 Google Inc. All Rights Reserved. |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 2 | |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 3 | #include "heap.h" |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 4 | |
| 5 | #include <vector> |
| 6 | |
Elliott Hughes | 90a3369 | 2011-08-30 13:27:07 -0700 | [diff] [blame] | 7 | #include "UniquePtr.h" |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 8 | #include "image.h" |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 9 | #include "mark_sweep.h" |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 10 | #include "object.h" |
| 11 | #include "space.h" |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 12 | #include "stl_util.h" |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 13 | |
| 14 | namespace art { |
| 15 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 16 | std::vector<Space*> Heap::spaces_; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 17 | |
Brian Carlstrom | a663ea5 | 2011-08-19 23:33:41 -0700 | [diff] [blame] | 18 | Space* Heap::boot_space_ = NULL; |
| 19 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 20 | Space* Heap::alloc_space_ = NULL; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 21 | |
| 22 | size_t Heap::maximum_size_ = 0; |
| 23 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 24 | size_t Heap::num_bytes_allocated_ = 0; |
| 25 | |
| 26 | size_t Heap::num_objects_allocated_ = 0; |
| 27 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 28 | bool Heap::is_gc_running_ = false; |
| 29 | |
| 30 | HeapBitmap* Heap::mark_bitmap_ = NULL; |
| 31 | |
| 32 | HeapBitmap* Heap::live_bitmap_ = NULL; |
| 33 | |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 34 | MemberOffset Heap::reference_referent_offset_ = MemberOffset(0); |
| 35 | MemberOffset Heap::reference_queue_offset_ = MemberOffset(0); |
| 36 | MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0); |
| 37 | MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0); |
| 38 | MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 39 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 40 | Mutex* Heap::lock_ = NULL; |
| 41 | |
| 42 | class ScopedHeapLock { |
| 43 | public: |
| 44 | ScopedHeapLock() { |
| 45 | Heap::Lock(); |
| 46 | } |
| 47 | |
| 48 | ~ScopedHeapLock() { |
| 49 | Heap::Unlock(); |
| 50 | } |
| 51 | }; |
| 52 | |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 53 | bool Heap::Init(size_t initial_size, size_t maximum_size, |
| 54 | const char* boot_image_file_name, |
| 55 | std::vector<const char*>& image_file_names) { |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 56 | Space* boot_space; |
| 57 | byte* requested_base; |
| 58 | if (boot_image_file_name == NULL) { |
| 59 | boot_space = NULL; |
| 60 | requested_base = NULL; |
| 61 | } else { |
| 62 | boot_space = Space::Create(boot_image_file_name); |
| 63 | if (boot_space == NULL) { |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 64 | LOG(WARNING) << "Failed to create space from " << boot_image_file_name; |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 65 | return false; |
| 66 | } |
| 67 | spaces_.push_back(boot_space); |
| 68 | requested_base = boot_space->GetBase() + RoundUp(boot_space->Size(), kPageSize); |
| 69 | } |
| 70 | |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 71 | std::vector<Space*> image_spaces; |
| 72 | for (size_t i = 0; i < image_file_names.size(); i++) { |
| 73 | Space* space = Space::Create(image_file_names[i]); |
| 74 | if (space == NULL) { |
| 75 | LOG(WARNING) << "Failed to create space from " << image_file_names[i]; |
| 76 | return false; |
| 77 | } |
| 78 | image_spaces.push_back(space); |
| 79 | spaces_.push_back(space); |
| 80 | requested_base = space->GetBase() + RoundUp(space->Size(), kPageSize); |
| 81 | } |
| 82 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 83 | Space* space = Space::Create(initial_size, maximum_size, requested_base); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 84 | if (space == NULL) { |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 85 | LOG(WARNING) << "Failed to create alloc space"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 86 | return false; |
| 87 | } |
| 88 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 89 | if (boot_space == NULL) { |
| 90 | boot_space = space; |
| 91 | } |
| 92 | byte* base = std::min(boot_space->GetBase(), space->GetBase()); |
| 93 | byte* limit = std::max(boot_space->GetLimit(), space->GetLimit()); |
| 94 | DCHECK_LT(base, limit); |
| 95 | size_t num_bytes = limit - base; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 96 | |
| 97 | // Allocate the initial live bitmap. |
Elliott Hughes | 90a3369 | 2011-08-30 13:27:07 -0700 | [diff] [blame] | 98 | UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes)); |
| 99 | if (live_bitmap.get() == NULL) { |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 100 | LOG(WARNING) << "Failed to create live bitmap"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 101 | return false; |
| 102 | } |
| 103 | |
| 104 | // Allocate the initial mark bitmap. |
Elliott Hughes | 90a3369 | 2011-08-30 13:27:07 -0700 | [diff] [blame] | 105 | UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes)); |
| 106 | if (mark_bitmap.get() == NULL) { |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 107 | LOG(WARNING) << "Failed to create mark bitmap"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 108 | return false; |
| 109 | } |
| 110 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 111 | alloc_space_ = space; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 112 | spaces_.push_back(space); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 113 | maximum_size_ = maximum_size; |
| 114 | live_bitmap_ = live_bitmap.release(); |
| 115 | mark_bitmap_ = mark_bitmap.release(); |
| 116 | |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 117 | num_bytes_allocated_ = 0; |
| 118 | num_objects_allocated_ = 0; |
| 119 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 120 | // TODO: allocate the card table |
| 121 | |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 122 | // Make objects in boot_space live (after live_bitmap_ is set) |
| 123 | if (boot_image_file_name != NULL) { |
Brian Carlstrom | a663ea5 | 2011-08-19 23:33:41 -0700 | [diff] [blame] | 124 | boot_space_ = boot_space; |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 125 | RecordImageAllocations(boot_space); |
| 126 | } |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 127 | for (size_t i = 0; i < image_spaces.size(); i++) { |
| 128 | RecordImageAllocations(image_spaces[i]); |
| 129 | } |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 130 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 131 | // It's still to early to take a lock because there are no threads yet, |
| 132 | // but we can create the heap lock now. We don't create it earlier to |
| 133 | // make it clear that you can't use locks during heap initialization. |
| 134 | lock_ = Mutex::Create("Heap lock"); |
| 135 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 136 | return true; |
| 137 | } |
| 138 | |
| 139 | void Heap::Destroy() { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 140 | ScopedHeapLock lock; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 141 | STLDeleteElements(&spaces_); |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 142 | if (mark_bitmap_ != NULL) { |
| 143 | delete mark_bitmap_; |
| 144 | mark_bitmap_ = NULL; |
| 145 | } |
| 146 | if (live_bitmap_ != NULL) { |
| 147 | delete live_bitmap_; |
| 148 | } |
| 149 | live_bitmap_ = NULL; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 150 | } |
| 151 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 152 | Object* Heap::AllocObject(Class* klass, size_t num_bytes) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 153 | ScopedHeapLock lock; |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 154 | DCHECK(klass == NULL |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 155 | || klass->GetDescriptor() == NULL |
Brian Carlstrom | 4873d46 | 2011-08-21 15:23:39 -0700 | [diff] [blame] | 156 | || (klass->IsClassClass() && num_bytes >= sizeof(Class)) |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 157 | || (klass->IsVariableSize() || klass->GetObjectSize() == num_bytes)); |
| 158 | DCHECK(num_bytes >= sizeof(Object)); |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 159 | Object* obj = AllocateLocked(num_bytes); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 160 | if (obj != NULL) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 161 | obj->SetClass(klass); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 162 | } |
| 163 | return obj; |
| 164 | } |
| 165 | |
Elliott Hughes | cf4c6c4 | 2011-09-01 15:16:42 -0700 | [diff] [blame] | 166 | bool Heap::IsHeapAddress(const Object* obj) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 167 | // Note: we deliberately don't take the lock here, and mustn't test anything that would |
| 168 | // require taking the lock. |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 169 | if (!IsAligned(obj, kObjectAlignment)) { |
| 170 | return false; |
| 171 | } |
| 172 | // TODO |
| 173 | return true; |
| 174 | } |
| 175 | |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 176 | bool Heap::verify_object_disabled_; |
| 177 | |
Elliott Hughes | 3e465b1 | 2011-09-02 18:26:12 -0700 | [diff] [blame] | 178 | #if VERIFY_OBJECT_ENABLED |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 179 | void Heap::VerifyObject(const Object* obj) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 180 | ScopedHeapLock lock; |
| 181 | Heap::VerifyObjectLocked(obj); |
| 182 | } |
| 183 | #endif |
| 184 | |
| 185 | void Heap::VerifyObjectLocked(const Object* obj) { |
| 186 | DCHECK_LOCK_HELD(lock_); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 187 | if (obj != NULL && !verify_object_disabled_) { |
| 188 | if (!IsAligned(obj, kObjectAlignment)) { |
| 189 | LOG(FATAL) << "Object isn't aligned: " << obj; |
| 190 | } else if (!live_bitmap_->Test(obj)) { |
| 191 | // TODO: we don't hold a lock here as it is assumed the live bit map |
| 192 | // isn't changing if the mutator is running. |
| 193 | LOG(FATAL) << "Object is dead: " << obj; |
| 194 | } |
| 195 | // Ignore early dawn of the universe verifications |
| 196 | if(num_objects_allocated_ > 10) { |
| 197 | const byte* raw_addr = reinterpret_cast<const byte*>(obj) + |
| 198 | Object::ClassOffset().Int32Value(); |
| 199 | const Class* c = *reinterpret_cast<Class* const *>(raw_addr); |
| 200 | if (c == NULL) { |
| 201 | LOG(FATAL) << "Null class" << " in object: " << obj; |
| 202 | } else if (!IsAligned(c, kObjectAlignment)) { |
| 203 | LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj; |
| 204 | } else if (!live_bitmap_->Test(c)) { |
| 205 | LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj; |
| 206 | } |
| 207 | // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() |
| 208 | // NB we don't use the accessors here as they have internal sanity checks |
| 209 | // that we don't want to run |
| 210 | raw_addr = reinterpret_cast<const byte*>(c) + |
| 211 | Object::ClassOffset().Int32Value(); |
| 212 | const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr); |
| 213 | raw_addr = reinterpret_cast<const byte*>(c_c) + |
| 214 | Object::ClassOffset().Int32Value(); |
| 215 | const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr); |
| 216 | CHECK_EQ(c_c, c_c_c); |
| 217 | } |
| 218 | } |
| 219 | } |
| 220 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 221 | void Heap::VerificationCallback(Object* obj, void *arg) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 222 | DCHECK(obj != NULL); |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 223 | Heap::VerifyObjectLocked(obj); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 224 | } |
| 225 | |
| 226 | void Heap::VerifyHeap() { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 227 | ScopedHeapLock lock; |
| 228 | live_bitmap_->Walk(Heap::VerificationCallback, NULL); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 229 | } |
| 230 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 231 | void Heap::RecordAllocationLocked(Space* space, const Object* obj) { |
| 232 | #ifndef NDEBUG |
| 233 | if (Runtime::Current()->IsStarted()) { |
| 234 | DCHECK_LOCK_HELD(lock_); |
| 235 | } |
| 236 | #endif |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 237 | size_t size = space->AllocationSize(obj); |
| 238 | DCHECK_NE(size, 0u); |
| 239 | num_bytes_allocated_ += size; |
| 240 | num_objects_allocated_ += 1; |
| 241 | live_bitmap_->Set(obj); |
| 242 | } |
| 243 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 244 | void Heap::RecordFreeLocked(Space* space, const Object* obj) { |
| 245 | DCHECK_LOCK_HELD(lock_); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 246 | size_t size = space->AllocationSize(obj); |
| 247 | DCHECK_NE(size, 0u); |
| 248 | if (size < num_bytes_allocated_) { |
| 249 | num_bytes_allocated_ -= size; |
| 250 | } else { |
| 251 | num_bytes_allocated_ = 0; |
| 252 | } |
| 253 | live_bitmap_->Clear(obj); |
| 254 | if (num_objects_allocated_ > 0) { |
| 255 | num_objects_allocated_ -= 1; |
| 256 | } |
| 257 | } |
| 258 | |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 259 | void Heap::RecordImageAllocations(Space* space) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 260 | DCHECK(!Runtime::Current()->IsStarted()); |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 261 | CHECK(space != NULL); |
| 262 | CHECK(live_bitmap_ != NULL); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 263 | byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment); |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 264 | while (current < space->GetLimit()) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 265 | DCHECK(IsAligned(current, kObjectAlignment)); |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 266 | const Object* obj = reinterpret_cast<const Object*>(current); |
| 267 | live_bitmap_->Set(obj); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 268 | current += RoundUp(obj->SizeOf(), kObjectAlignment); |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 269 | } |
| 270 | } |
| 271 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 272 | Object* Heap::AllocateLocked(size_t size) { |
| 273 | DCHECK_LOCK_HELD(lock_); |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 274 | DCHECK(alloc_space_ != NULL); |
| 275 | Space* space = alloc_space_; |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 276 | Object* obj = AllocateLocked(space, size); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 277 | if (obj != NULL) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 278 | RecordAllocationLocked(space, obj); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 279 | } |
| 280 | return obj; |
| 281 | } |
| 282 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 283 | Object* Heap::AllocateLocked(Space* space, size_t size) { |
| 284 | DCHECK_LOCK_HELD(lock_); |
| 285 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 286 | // Fail impossible allocations. TODO: collect soft references. |
| 287 | if (size > maximum_size_) { |
| 288 | return NULL; |
| 289 | } |
| 290 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 291 | Object* ptr = space->AllocWithoutGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 292 | if (ptr != NULL) { |
| 293 | return ptr; |
| 294 | } |
| 295 | |
| 296 | // The allocation failed. If the GC is running, block until it |
| 297 | // completes and retry. |
| 298 | if (is_gc_running_) { |
| 299 | // The GC is concurrently tracing the heap. Release the heap |
| 300 | // lock, wait for the GC to complete, and retrying allocating. |
| 301 | WaitForConcurrentGcToComplete(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 302 | ptr = space->AllocWithoutGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 303 | if (ptr != NULL) { |
| 304 | return ptr; |
| 305 | } |
| 306 | } |
| 307 | |
| 308 | // Another failure. Our thread was starved or there may be too many |
| 309 | // live objects. Try a foreground GC. This will have no effect if |
| 310 | // the concurrent GC is already running. |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 311 | CollectGarbageInternal(); |
| 312 | ptr = space->AllocWithoutGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 313 | if (ptr != NULL) { |
| 314 | return ptr; |
| 315 | } |
| 316 | |
| 317 | // Even that didn't work; this is an exceptional state. |
| 318 | // Try harder, growing the heap if necessary. |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 319 | ptr = space->AllocWithGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 320 | if (ptr != NULL) { |
| 321 | //size_t new_footprint = dvmHeapSourceGetIdealFootprint(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 322 | size_t new_footprint = space->MaxAllowedFootprint(); |
| 323 | // TODO: may want to grow a little bit more so that the amount of |
| 324 | // free space is equal to the old free space + the |
| 325 | // utilization slop for the new allocation. |
| 326 | LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 327 | << "for " << size << "-byte allocation"; |
| 328 | return ptr; |
| 329 | } |
| 330 | |
| 331 | // Most allocations should have succeeded by now, so the heap is |
| 332 | // really full, really fragmented, or the requested size is really |
| 333 | // big. Do another GC, collecting SoftReferences this time. The VM |
| 334 | // spec requires that all SoftReferences have been collected and |
| 335 | // cleared before throwing an OOME. |
| 336 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 337 | // TODO: wait for the finalizers from the previous GC to finish |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 338 | LOG(INFO) << "Forcing collection of SoftReferences for " |
| 339 | << size << "-byte allocation"; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 340 | CollectGarbageInternal(); |
| 341 | ptr = space->AllocWithGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 342 | if (ptr != NULL) { |
| 343 | return ptr; |
| 344 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 345 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 346 | LOG(ERROR) << "Out of memory on a " << size << " byte allocation"; |
| 347 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 348 | // TODO: tell the HeapSource to dump its state |
| 349 | // TODO: dump stack traces for all threads |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 350 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 351 | return NULL; |
| 352 | } |
| 353 | |
Elliott Hughes | bf86d04 | 2011-08-31 17:53:14 -0700 | [diff] [blame] | 354 | int64_t Heap::GetMaxMemory() { |
| 355 | UNIMPLEMENTED(WARNING); |
| 356 | return 0; |
| 357 | } |
| 358 | |
| 359 | int64_t Heap::GetTotalMemory() { |
| 360 | UNIMPLEMENTED(WARNING); |
| 361 | return 0; |
| 362 | } |
| 363 | |
| 364 | int64_t Heap::GetFreeMemory() { |
| 365 | UNIMPLEMENTED(WARNING); |
| 366 | return 0; |
| 367 | } |
| 368 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 369 | void Heap::CollectGarbage() { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 370 | ScopedHeapLock lock; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 371 | CollectGarbageInternal(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 372 | } |
| 373 | |
| 374 | void Heap::CollectGarbageInternal() { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 375 | DCHECK_LOCK_HELD(lock_); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 376 | |
| 377 | // TODO: Suspend all threads |
| 378 | { |
| 379 | MarkSweep mark_sweep; |
| 380 | |
| 381 | mark_sweep.Init(); |
| 382 | |
| 383 | mark_sweep.MarkRoots(); |
| 384 | |
| 385 | // Push marked roots onto the mark stack |
| 386 | |
| 387 | // TODO: if concurrent |
| 388 | // unlock heap |
| 389 | // resume threads |
| 390 | |
| 391 | mark_sweep.RecursiveMark(); |
| 392 | |
| 393 | // TODO: if concurrent |
| 394 | // lock heap |
| 395 | // suspend threads |
| 396 | // re-mark root set |
| 397 | // scan dirty objects |
| 398 | |
| 399 | mark_sweep.ProcessReferences(false); |
| 400 | |
| 401 | // TODO: swap bitmaps |
| 402 | |
| 403 | mark_sweep.Sweep(); |
| 404 | } |
| 405 | |
| 406 | GrowForUtilization(); |
| 407 | |
| 408 | // TODO: Resume all threads |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 409 | } |
| 410 | |
| 411 | void Heap::WaitForConcurrentGcToComplete() { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 412 | DCHECK_LOCK_HELD(lock_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 413 | } |
| 414 | |
| 415 | // Given the current contents of the active heap, increase the allowed |
| 416 | // heap footprint to match the target utilization ratio. This should |
| 417 | // only be called immediately after a full garbage collection. |
| 418 | void Heap::GrowForUtilization() { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 419 | DCHECK_LOCK_HELD(lock_); |
Elliott Hughes | 53b6131 | 2011-08-12 18:28:20 -0700 | [diff] [blame] | 420 | UNIMPLEMENTED(ERROR); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 421 | } |
| 422 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame^] | 423 | void Heap::Lock() { |
| 424 | // TODO: grab the lock, but put ourselves into THREAD_VMWAIT if it looks like |
| 425 | // we're going to have to wait on the mutex. |
| 426 | lock_->Lock(); |
| 427 | } |
| 428 | |
| 429 | void Heap::Unlock() { |
| 430 | lock_->Unlock(); |
| 431 | } |
| 432 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 433 | } // namespace art |