Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1 | // Copyright 2011 Google Inc. All Rights Reserved. |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 2 | |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 3 | #include "heap.h" |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 4 | |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 5 | #include <limits> |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 6 | #include <vector> |
| 7 | |
Elliott Hughes | 90a3369 | 2011-08-30 13:27:07 -0700 | [diff] [blame] | 8 | #include "UniquePtr.h" |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 9 | #include "image.h" |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 10 | #include "mark_sweep.h" |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 11 | #include "object.h" |
| 12 | #include "space.h" |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 13 | #include "stl_util.h" |
Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 14 | #include "thread_list.h" |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 15 | |
| 16 | namespace art { |
| 17 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 18 | std::vector<Space*> Heap::spaces_; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 19 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 20 | Space* Heap::alloc_space_ = NULL; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 21 | |
| 22 | size_t Heap::maximum_size_ = 0; |
| 23 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 24 | size_t Heap::num_bytes_allocated_ = 0; |
| 25 | |
| 26 | size_t Heap::num_objects_allocated_ = 0; |
| 27 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 28 | bool Heap::is_gc_running_ = false; |
| 29 | |
| 30 | HeapBitmap* Heap::mark_bitmap_ = NULL; |
| 31 | |
| 32 | HeapBitmap* Heap::live_bitmap_ = NULL; |
| 33 | |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame^] | 34 | Class* Heap::java_lang_ref_FinalizerReference_ = NULL; |
| 35 | Class* Heap::java_lang_ref_ReferenceQueue_ = NULL; |
| 36 | |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 37 | MemberOffset Heap::reference_referent_offset_ = MemberOffset(0); |
| 38 | MemberOffset Heap::reference_queue_offset_ = MemberOffset(0); |
| 39 | MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0); |
| 40 | MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0); |
| 41 | MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 42 | |
Brian Carlstrom | 395520e | 2011-09-25 19:35:00 -0700 | [diff] [blame] | 43 | float Heap::target_utilization_ = 0.5; |
| 44 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 45 | Mutex* Heap::lock_ = NULL; |
| 46 | |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 47 | bool Heap::verify_objects_ = false; |
| 48 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 49 | class ScopedHeapLock { |
| 50 | public: |
| 51 | ScopedHeapLock() { |
| 52 | Heap::Lock(); |
| 53 | } |
| 54 | |
| 55 | ~ScopedHeapLock() { |
| 56 | Heap::Unlock(); |
| 57 | } |
| 58 | }; |
| 59 | |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 60 | void Heap::Init(size_t initial_size, size_t maximum_size, |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 61 | const std::vector<std::string>& image_file_names) { |
Brian Carlstrom | 0a5b14d | 2011-09-27 13:29:15 -0700 | [diff] [blame] | 62 | const Runtime* runtime = Runtime::Current(); |
| 63 | if (runtime->IsVerboseStartup()) { |
| 64 | LOG(INFO) << "Heap::Init entering"; |
| 65 | } |
| 66 | |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 67 | // bounds of all spaces for allocating live and mark bitmaps |
| 68 | // there will be at least one space (the alloc space), |
| 69 | // so set to base to max and limit to min to start |
| 70 | byte* base = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::max()); |
| 71 | byte* limit = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::min()); |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 72 | |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 73 | byte* requested_base = NULL; |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 74 | std::vector<Space*> image_spaces; |
| 75 | for (size_t i = 0; i < image_file_names.size(); i++) { |
Brian Carlstrom | 0a5b14d | 2011-09-27 13:29:15 -0700 | [diff] [blame] | 76 | Space* space = Space::CreateFromImage(image_file_names[i]); |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 77 | if (space == NULL) { |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 78 | LOG(FATAL) << "Failed to create space from " << image_file_names[i]; |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 79 | } |
| 80 | image_spaces.push_back(space); |
| 81 | spaces_.push_back(space); |
Brian Carlstrom | e24fa61 | 2011-09-29 00:53:55 -0700 | [diff] [blame] | 82 | byte* oat_limit_addr = space->GetImageHeader().GetOatLimitAddr(); |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 83 | if (oat_limit_addr > requested_base) { |
| 84 | requested_base = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_limit_addr), |
| 85 | kPageSize)); |
| 86 | } |
| 87 | base = std::min(base, space->GetBase()); |
| 88 | limit = std::max(limit, space->GetLimit()); |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 89 | } |
| 90 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 91 | Space* space = Space::Create(initial_size, maximum_size, requested_base); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 92 | if (space == NULL) { |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 93 | LOG(FATAL) << "Failed to create alloc space"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 94 | } |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 95 | base = std::min(base, space->GetBase()); |
| 96 | limit = std::max(limit, space->GetLimit()); |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 97 | DCHECK_LT(base, limit); |
| 98 | size_t num_bytes = limit - base; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 99 | |
| 100 | // Allocate the initial live bitmap. |
Elliott Hughes | 90a3369 | 2011-08-30 13:27:07 -0700 | [diff] [blame] | 101 | UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes)); |
| 102 | if (live_bitmap.get() == NULL) { |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 103 | LOG(FATAL) << "Failed to create live bitmap"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | // Allocate the initial mark bitmap. |
Elliott Hughes | 90a3369 | 2011-08-30 13:27:07 -0700 | [diff] [blame] | 107 | UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes)); |
| 108 | if (mark_bitmap.get() == NULL) { |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 109 | LOG(FATAL) << "Failed to create mark bitmap"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 110 | } |
| 111 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 112 | alloc_space_ = space; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 113 | spaces_.push_back(space); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 114 | maximum_size_ = maximum_size; |
| 115 | live_bitmap_ = live_bitmap.release(); |
| 116 | mark_bitmap_ = mark_bitmap.release(); |
| 117 | |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 118 | num_bytes_allocated_ = 0; |
| 119 | num_objects_allocated_ = 0; |
| 120 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 121 | // TODO: allocate the card table |
| 122 | |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 123 | // Make image objects live (after live_bitmap_ is set) |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 124 | for (size_t i = 0; i < image_spaces.size(); i++) { |
| 125 | RecordImageAllocations(image_spaces[i]); |
| 126 | } |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 127 | |
Elliott Hughes | 85d1545 | 2011-09-16 17:33:01 -0700 | [diff] [blame] | 128 | Heap::EnableObjectValidation(); |
| 129 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 130 | // It's still to early to take a lock because there are no threads yet, |
| 131 | // but we can create the heap lock now. We don't create it earlier to |
| 132 | // make it clear that you can't use locks during heap initialization. |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 133 | lock_ = new Mutex("Heap lock"); |
Brian Carlstrom | 0a5b14d | 2011-09-27 13:29:15 -0700 | [diff] [blame] | 134 | |
| 135 | if (runtime->IsVerboseStartup()) { |
| 136 | LOG(INFO) << "Heap::Init exiting"; |
| 137 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 138 | } |
| 139 | |
| 140 | void Heap::Destroy() { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 141 | ScopedHeapLock lock; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 142 | STLDeleteElements(&spaces_); |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 143 | if (mark_bitmap_ != NULL) { |
| 144 | delete mark_bitmap_; |
| 145 | mark_bitmap_ = NULL; |
| 146 | } |
| 147 | if (live_bitmap_ != NULL) { |
| 148 | delete live_bitmap_; |
| 149 | } |
| 150 | live_bitmap_ = NULL; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 151 | } |
| 152 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 153 | Object* Heap::AllocObject(Class* klass, size_t num_bytes) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 154 | ScopedHeapLock lock; |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 155 | DCHECK(klass == NULL |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 156 | || klass->GetDescriptor() == NULL |
Brian Carlstrom | 4873d46 | 2011-08-21 15:23:39 -0700 | [diff] [blame] | 157 | || (klass->IsClassClass() && num_bytes >= sizeof(Class)) |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 158 | || (klass->IsVariableSize() || klass->GetObjectSize() == num_bytes)); |
| 159 | DCHECK(num_bytes >= sizeof(Object)); |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 160 | Object* obj = AllocateLocked(num_bytes); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 161 | if (obj != NULL) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 162 | obj->SetClass(klass); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 163 | } |
| 164 | return obj; |
| 165 | } |
| 166 | |
Elliott Hughes | cf4c6c4 | 2011-09-01 15:16:42 -0700 | [diff] [blame] | 167 | bool Heap::IsHeapAddress(const Object* obj) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 168 | // Note: we deliberately don't take the lock here, and mustn't test anything that would |
| 169 | // require taking the lock. |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 170 | if (!IsAligned(obj, kObjectAlignment)) { |
| 171 | return false; |
| 172 | } |
| 173 | // TODO |
| 174 | return true; |
| 175 | } |
| 176 | |
Elliott Hughes | 3e465b1 | 2011-09-02 18:26:12 -0700 | [diff] [blame] | 177 | #if VERIFY_OBJECT_ENABLED |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 178 | void Heap::VerifyObject(const Object* obj) { |
Elliott Hughes | 85d1545 | 2011-09-16 17:33:01 -0700 | [diff] [blame] | 179 | if (!verify_objects_) { |
| 180 | return; |
| 181 | } |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 182 | ScopedHeapLock lock; |
| 183 | Heap::VerifyObjectLocked(obj); |
| 184 | } |
| 185 | #endif |
| 186 | |
| 187 | void Heap::VerifyObjectLocked(const Object* obj) { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 188 | lock_->AssertHeld(); |
Elliott Hughes | 85d1545 | 2011-09-16 17:33:01 -0700 | [diff] [blame] | 189 | if (obj != NULL) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 190 | if (!IsAligned(obj, kObjectAlignment)) { |
| 191 | LOG(FATAL) << "Object isn't aligned: " << obj; |
| 192 | } else if (!live_bitmap_->Test(obj)) { |
| 193 | // TODO: we don't hold a lock here as it is assumed the live bit map |
| 194 | // isn't changing if the mutator is running. |
| 195 | LOG(FATAL) << "Object is dead: " << obj; |
| 196 | } |
| 197 | // Ignore early dawn of the universe verifications |
Brian Carlstrom | dbc0525 | 2011-09-09 01:59:59 -0700 | [diff] [blame] | 198 | if (num_objects_allocated_ > 10) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 199 | const byte* raw_addr = reinterpret_cast<const byte*>(obj) + |
| 200 | Object::ClassOffset().Int32Value(); |
| 201 | const Class* c = *reinterpret_cast<Class* const *>(raw_addr); |
| 202 | if (c == NULL) { |
| 203 | LOG(FATAL) << "Null class" << " in object: " << obj; |
| 204 | } else if (!IsAligned(c, kObjectAlignment)) { |
| 205 | LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj; |
| 206 | } else if (!live_bitmap_->Test(c)) { |
| 207 | LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj; |
| 208 | } |
| 209 | // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() |
| 210 | // NB we don't use the accessors here as they have internal sanity checks |
| 211 | // that we don't want to run |
| 212 | raw_addr = reinterpret_cast<const byte*>(c) + |
| 213 | Object::ClassOffset().Int32Value(); |
| 214 | const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr); |
| 215 | raw_addr = reinterpret_cast<const byte*>(c_c) + |
| 216 | Object::ClassOffset().Int32Value(); |
| 217 | const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr); |
| 218 | CHECK_EQ(c_c, c_c_c); |
| 219 | } |
| 220 | } |
| 221 | } |
| 222 | |
Brian Carlstrom | 78128a6 | 2011-09-15 17:21:19 -0700 | [diff] [blame] | 223 | void Heap::VerificationCallback(Object* obj, void* arg) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 224 | DCHECK(obj != NULL); |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 225 | Heap::VerifyObjectLocked(obj); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 226 | } |
| 227 | |
| 228 | void Heap::VerifyHeap() { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 229 | ScopedHeapLock lock; |
| 230 | live_bitmap_->Walk(Heap::VerificationCallback, NULL); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 231 | } |
| 232 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 233 | void Heap::RecordAllocationLocked(Space* space, const Object* obj) { |
| 234 | #ifndef NDEBUG |
| 235 | if (Runtime::Current()->IsStarted()) { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 236 | lock_->AssertHeld(); |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 237 | } |
| 238 | #endif |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 239 | size_t size = space->AllocationSize(obj); |
| 240 | DCHECK_NE(size, 0u); |
| 241 | num_bytes_allocated_ += size; |
| 242 | num_objects_allocated_ += 1; |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 243 | |
| 244 | if (Runtime::Current()->HasStatsEnabled()) { |
| 245 | RuntimeStats* global_stats = Runtime::Current()->GetStats(); |
| 246 | RuntimeStats* thread_stats = Thread::Current()->GetStats(); |
| 247 | ++global_stats->allocated_objects; |
| 248 | ++thread_stats->allocated_objects; |
| 249 | global_stats->allocated_bytes += size; |
| 250 | thread_stats->allocated_bytes += size; |
| 251 | } |
| 252 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 253 | live_bitmap_->Set(obj); |
| 254 | } |
| 255 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 256 | void Heap::RecordFreeLocked(Space* space, const Object* obj) { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 257 | lock_->AssertHeld(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 258 | size_t size = space->AllocationSize(obj); |
| 259 | DCHECK_NE(size, 0u); |
| 260 | if (size < num_bytes_allocated_) { |
| 261 | num_bytes_allocated_ -= size; |
| 262 | } else { |
| 263 | num_bytes_allocated_ = 0; |
| 264 | } |
| 265 | live_bitmap_->Clear(obj); |
| 266 | if (num_objects_allocated_ > 0) { |
| 267 | num_objects_allocated_ -= 1; |
| 268 | } |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 269 | |
| 270 | if (Runtime::Current()->HasStatsEnabled()) { |
| 271 | RuntimeStats* global_stats = Runtime::Current()->GetStats(); |
| 272 | RuntimeStats* thread_stats = Thread::Current()->GetStats(); |
| 273 | ++global_stats->freed_objects; |
| 274 | ++thread_stats->freed_objects; |
| 275 | global_stats->freed_bytes += size; |
| 276 | thread_stats->freed_bytes += size; |
| 277 | } |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 278 | } |
| 279 | |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 280 | void Heap::RecordImageAllocations(Space* space) { |
Brian Carlstrom | 0a5b14d | 2011-09-27 13:29:15 -0700 | [diff] [blame] | 281 | const Runtime* runtime = Runtime::Current(); |
| 282 | if (runtime->IsVerboseStartup()) { |
| 283 | LOG(INFO) << "Heap::RecordImageAllocations entering"; |
| 284 | } |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 285 | DCHECK(!Runtime::Current()->IsStarted()); |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 286 | CHECK(space != NULL); |
| 287 | CHECK(live_bitmap_ != NULL); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 288 | byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment); |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 289 | while (current < space->GetLimit()) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 290 | DCHECK(IsAligned(current, kObjectAlignment)); |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 291 | const Object* obj = reinterpret_cast<const Object*>(current); |
| 292 | live_bitmap_->Set(obj); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 293 | current += RoundUp(obj->SizeOf(), kObjectAlignment); |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 294 | } |
Brian Carlstrom | 0a5b14d | 2011-09-27 13:29:15 -0700 | [diff] [blame] | 295 | if (runtime->IsVerboseStartup()) { |
| 296 | LOG(INFO) << "Heap::RecordImageAllocations exiting"; |
| 297 | } |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 298 | } |
| 299 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 300 | Object* Heap::AllocateLocked(size_t size) { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 301 | lock_->AssertHeld(); |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 302 | DCHECK(alloc_space_ != NULL); |
| 303 | Space* space = alloc_space_; |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 304 | Object* obj = AllocateLocked(space, size); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 305 | if (obj != NULL) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 306 | RecordAllocationLocked(space, obj); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 307 | } |
| 308 | return obj; |
| 309 | } |
| 310 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 311 | Object* Heap::AllocateLocked(Space* space, size_t size) { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 312 | lock_->AssertHeld(); |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 313 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 314 | // Fail impossible allocations. TODO: collect soft references. |
| 315 | if (size > maximum_size_) { |
| 316 | return NULL; |
| 317 | } |
| 318 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 319 | Object* ptr = space->AllocWithoutGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 320 | if (ptr != NULL) { |
| 321 | return ptr; |
| 322 | } |
| 323 | |
| 324 | // The allocation failed. If the GC is running, block until it |
| 325 | // completes and retry. |
| 326 | if (is_gc_running_) { |
| 327 | // The GC is concurrently tracing the heap. Release the heap |
| 328 | // lock, wait for the GC to complete, and retrying allocating. |
| 329 | WaitForConcurrentGcToComplete(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 330 | ptr = space->AllocWithoutGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 331 | if (ptr != NULL) { |
| 332 | return ptr; |
| 333 | } |
| 334 | } |
| 335 | |
| 336 | // Another failure. Our thread was starved or there may be too many |
| 337 | // live objects. Try a foreground GC. This will have no effect if |
| 338 | // the concurrent GC is already running. |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 339 | if (Runtime::Current()->HasStatsEnabled()) { |
| 340 | ++Runtime::Current()->GetStats()->gc_for_alloc_count; |
| 341 | ++Thread::Current()->GetStats()->gc_for_alloc_count; |
| 342 | } |
Ian Rogers | d6b1f61 | 2011-09-27 13:38:14 -0700 | [diff] [blame] | 343 | LOG(INFO) << "GC_FOR_ALLOC: TODO: test"; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 344 | CollectGarbageInternal(); |
| 345 | ptr = space->AllocWithoutGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 346 | if (ptr != NULL) { |
| 347 | return ptr; |
| 348 | } |
Ian Rogers | d6b1f61 | 2011-09-27 13:38:14 -0700 | [diff] [blame] | 349 | UNIMPLEMENTED(FATAL) << "No AllocWithGrowth, use larger -Xms -Xmx"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 350 | |
| 351 | // Even that didn't work; this is an exceptional state. |
| 352 | // Try harder, growing the heap if necessary. |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 353 | ptr = space->AllocWithGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 354 | if (ptr != NULL) { |
| 355 | //size_t new_footprint = dvmHeapSourceGetIdealFootprint(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 356 | size_t new_footprint = space->MaxAllowedFootprint(); |
| 357 | // TODO: may want to grow a little bit more so that the amount of |
| 358 | // free space is equal to the old free space + the |
| 359 | // utilization slop for the new allocation. |
| 360 | LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 361 | << "for " << size << "-byte allocation"; |
| 362 | return ptr; |
| 363 | } |
| 364 | |
| 365 | // Most allocations should have succeeded by now, so the heap is |
| 366 | // really full, really fragmented, or the requested size is really |
| 367 | // big. Do another GC, collecting SoftReferences this time. The VM |
| 368 | // spec requires that all SoftReferences have been collected and |
| 369 | // cleared before throwing an OOME. |
| 370 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 371 | // TODO: wait for the finalizers from the previous GC to finish |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 372 | LOG(INFO) << "Forcing collection of SoftReferences for " |
| 373 | << size << "-byte allocation"; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 374 | CollectGarbageInternal(); |
| 375 | ptr = space->AllocWithGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 376 | if (ptr != NULL) { |
| 377 | return ptr; |
| 378 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 379 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 380 | LOG(ERROR) << "Out of memory on a " << size << " byte allocation"; |
| 381 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 382 | // TODO: tell the HeapSource to dump its state |
| 383 | // TODO: dump stack traces for all threads |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 384 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 385 | return NULL; |
| 386 | } |
| 387 | |
Elliott Hughes | bf86d04 | 2011-08-31 17:53:14 -0700 | [diff] [blame] | 388 | int64_t Heap::GetMaxMemory() { |
| 389 | UNIMPLEMENTED(WARNING); |
| 390 | return 0; |
| 391 | } |
| 392 | |
| 393 | int64_t Heap::GetTotalMemory() { |
| 394 | UNIMPLEMENTED(WARNING); |
| 395 | return 0; |
| 396 | } |
| 397 | |
| 398 | int64_t Heap::GetFreeMemory() { |
| 399 | UNIMPLEMENTED(WARNING); |
| 400 | return 0; |
| 401 | } |
| 402 | |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 403 | class InstanceCounter { |
| 404 | public: |
| 405 | InstanceCounter(Class* c, bool count_assignable) |
| 406 | : class_(c), count_assignable_(count_assignable), count_(0) { |
| 407 | } |
| 408 | |
| 409 | size_t GetCount() { |
| 410 | return count_; |
| 411 | } |
| 412 | |
| 413 | static void Callback(Object* o, void* arg) { |
| 414 | reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o); |
| 415 | } |
| 416 | |
| 417 | private: |
| 418 | void VisitInstance(Object* o) { |
| 419 | Class* instance_class = o->GetClass(); |
| 420 | if (count_assignable_) { |
| 421 | if (instance_class == class_) { |
| 422 | ++count_; |
| 423 | } |
| 424 | } else { |
| 425 | if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) { |
| 426 | ++count_; |
| 427 | } |
| 428 | } |
| 429 | } |
| 430 | |
| 431 | Class* class_; |
| 432 | bool count_assignable_; |
| 433 | size_t count_; |
| 434 | }; |
| 435 | |
| 436 | int64_t Heap::CountInstances(Class* c, bool count_assignable) { |
| 437 | ScopedHeapLock lock; |
| 438 | InstanceCounter counter(c, count_assignable); |
| 439 | live_bitmap_->Walk(InstanceCounter::Callback, &counter); |
| 440 | return counter.GetCount(); |
| 441 | } |
| 442 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 443 | void Heap::CollectGarbage() { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 444 | ScopedHeapLock lock; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 445 | CollectGarbageInternal(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 446 | } |
| 447 | |
| 448 | void Heap::CollectGarbageInternal() { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 449 | lock_->AssertHeld(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 450 | |
Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 451 | ThreadList* thread_list = Runtime::Current()->GetThreadList(); |
| 452 | thread_list->SuspendAll(); |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame^] | 453 | Object* cleared_references = NULL; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 454 | { |
| 455 | MarkSweep mark_sweep; |
| 456 | |
| 457 | mark_sweep.Init(); |
| 458 | |
| 459 | mark_sweep.MarkRoots(); |
| 460 | |
| 461 | // Push marked roots onto the mark stack |
| 462 | |
| 463 | // TODO: if concurrent |
| 464 | // unlock heap |
Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 465 | // thread_list->ResumeAll(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 466 | |
| 467 | mark_sweep.RecursiveMark(); |
| 468 | |
| 469 | // TODO: if concurrent |
| 470 | // lock heap |
Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 471 | // thread_list->SuspendAll(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 472 | // re-mark root set |
| 473 | // scan dirty objects |
| 474 | |
| 475 | mark_sweep.ProcessReferences(false); |
| 476 | |
| 477 | // TODO: swap bitmaps |
| 478 | |
| 479 | mark_sweep.Sweep(); |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame^] | 480 | |
| 481 | cleared_references = mark_sweep.GetClearedReferences(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 482 | } |
| 483 | |
| 484 | GrowForUtilization(); |
Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 485 | thread_list->ResumeAll(); |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame^] | 486 | |
| 487 | EnqueueClearedReferences(&cleared_references); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 488 | } |
| 489 | |
| 490 | void Heap::WaitForConcurrentGcToComplete() { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 491 | lock_->AssertHeld(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 492 | } |
| 493 | |
| 494 | // Given the current contents of the active heap, increase the allowed |
| 495 | // heap footprint to match the target utilization ratio. This should |
| 496 | // only be called immediately after a full garbage collection. |
| 497 | void Heap::GrowForUtilization() { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 498 | lock_->AssertHeld(); |
Elliott Hughes | 53b6131 | 2011-08-12 18:28:20 -0700 | [diff] [blame] | 499 | UNIMPLEMENTED(ERROR); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 500 | } |
| 501 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 502 | void Heap::Lock() { |
Elliott Hughes | 93e74e8 | 2011-09-13 11:07:03 -0700 | [diff] [blame] | 503 | // TODO: grab the lock, but put ourselves into Thread::kVmWait if it looks like |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 504 | // we're going to have to wait on the mutex. |
| 505 | lock_->Lock(); |
| 506 | } |
| 507 | |
| 508 | void Heap::Unlock() { |
| 509 | lock_->Unlock(); |
| 510 | } |
| 511 | |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame^] | 512 | void Heap::SetWellKnownClasses(Class* java_lang_ref_FinalizerReference, |
| 513 | Class* java_lang_ref_ReferenceQueue) { |
| 514 | java_lang_ref_FinalizerReference_ = java_lang_ref_FinalizerReference; |
| 515 | java_lang_ref_ReferenceQueue_ = java_lang_ref_ReferenceQueue; |
| 516 | CHECK(java_lang_ref_FinalizerReference_ != NULL); |
| 517 | CHECK(java_lang_ref_ReferenceQueue_ != NULL); |
| 518 | } |
| 519 | |
| 520 | void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset, |
| 521 | MemberOffset reference_queue_offset, |
| 522 | MemberOffset reference_queueNext_offset, |
| 523 | MemberOffset reference_pendingNext_offset, |
| 524 | MemberOffset finalizer_reference_zombie_offset) { |
| 525 | reference_referent_offset_ = reference_referent_offset; |
| 526 | reference_queue_offset_ = reference_queue_offset; |
| 527 | reference_queueNext_offset_ = reference_queueNext_offset; |
| 528 | reference_pendingNext_offset_ = reference_pendingNext_offset; |
| 529 | finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset; |
| 530 | CHECK_NE(reference_referent_offset_.Uint32Value(), 0U); |
| 531 | CHECK_NE(reference_queue_offset_.Uint32Value(), 0U); |
| 532 | CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U); |
| 533 | CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U); |
| 534 | CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U); |
| 535 | } |
| 536 | |
| 537 | Object* Heap::GetReferenceReferent(Object* reference) { |
| 538 | DCHECK(reference != NULL); |
| 539 | DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); |
| 540 | return reference->GetFieldObject<Object*>(reference_referent_offset_, true); |
| 541 | } |
| 542 | |
| 543 | void Heap::ClearReferenceReferent(Object* reference) { |
| 544 | DCHECK(reference != NULL); |
| 545 | DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); |
| 546 | reference->SetFieldObject(reference_referent_offset_, NULL, true); |
| 547 | } |
| 548 | |
| 549 | // Returns true if the reference object has not yet been enqueued. |
| 550 | bool Heap::IsEnqueuable(const Object* ref) { |
| 551 | DCHECK(ref != NULL); |
| 552 | const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false); |
| 553 | const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false); |
| 554 | return (queue != NULL) && (queue_next == NULL); |
| 555 | } |
| 556 | |
| 557 | void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) { |
| 558 | DCHECK(ref != NULL); |
| 559 | CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL); |
| 560 | CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL); |
| 561 | EnqueuePendingReference(ref, cleared_reference_list); |
| 562 | } |
| 563 | |
| 564 | void Heap::EnqueuePendingReference(Object* ref, Object** list) { |
| 565 | DCHECK(ref != NULL); |
| 566 | DCHECK(list != NULL); |
| 567 | |
| 568 | if (*list == NULL) { |
| 569 | ref->SetFieldObject(reference_pendingNext_offset_, ref, false); |
| 570 | *list = ref; |
| 571 | } else { |
| 572 | Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false); |
| 573 | ref->SetFieldObject(reference_pendingNext_offset_, head, false); |
| 574 | (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false); |
| 575 | } |
| 576 | } |
| 577 | |
| 578 | Object* Heap::DequeuePendingReference(Object** list) { |
| 579 | DCHECK(list != NULL); |
| 580 | DCHECK(*list != NULL); |
| 581 | Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false); |
| 582 | Object* ref; |
| 583 | if (*list == head) { |
| 584 | ref = *list; |
| 585 | *list = NULL; |
| 586 | } else { |
| 587 | Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false); |
| 588 | (*list)->SetFieldObject(reference_pendingNext_offset_, next, false); |
| 589 | ref = head; |
| 590 | } |
| 591 | ref->SetFieldObject(reference_pendingNext_offset_, NULL, false); |
| 592 | return ref; |
| 593 | } |
| 594 | |
| 595 | void Heap::AddFinalizerReference(Object* object) { |
| 596 | static Method* FinalizerReference_add = |
| 597 | java_lang_ref_FinalizerReference_->FindDirectMethod("add", "(Ljava/lang/Object;)V"); |
| 598 | DCHECK(FinalizerReference_add != NULL); |
| 599 | Object* args[] = { object }; |
| 600 | FinalizerReference_add->Invoke(Thread::Current(), NULL, reinterpret_cast<byte*>(&args), NULL); |
| 601 | } |
| 602 | |
| 603 | void Heap::EnqueueClearedReferences(Object** cleared) { |
| 604 | DCHECK(cleared != NULL); |
| 605 | if (*cleared != NULL) { |
| 606 | static Method* ReferenceQueue_add = |
| 607 | java_lang_ref_ReferenceQueue_->FindDirectMethod("add", "(Ljava/lang/ref/Reference;)V"); |
| 608 | DCHECK(ReferenceQueue_add != NULL); |
| 609 | |
| 610 | Thread* self = Thread::Current(); |
| 611 | ScopedThreadStateChange tsc(self, Thread::kRunnable); |
| 612 | Object* args[] = { *cleared }; |
| 613 | ReferenceQueue_add->Invoke(self, NULL, reinterpret_cast<byte*>(&args), NULL); |
| 614 | *cleared = NULL; |
| 615 | } |
| 616 | } |
| 617 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 618 | } // namespace art |