blob: fe51d48cfbdaa576059fb43249574b0a088ddbc8 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
Brian Carlstrom58ae9412011-10-04 00:56:06 -07005#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -07006#include <vector>
7
Ian Rogers5d76c432011-10-31 21:42:49 -07008#include "card_table.h"
Elliott Hughes767a1472011-10-26 18:49:02 -07009#include "debugger.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070010#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070011#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070012#include "object.h"
13#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070014#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070015#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070016#include "timing_logger.h"
17#include "UniquePtr.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
19namespace art {
20
Brian Carlstrom6b4ef022011-10-23 14:59:04 -070021bool Heap::is_verbose_heap_ = false;
22
23bool Heap::is_verbose_gc_ = false;
24
Carl Shapiro58551df2011-07-24 03:09:51 -070025std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070026
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070027Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070028
29size_t Heap::maximum_size_ = 0;
30
jeffhaoc1160702011-10-27 15:48:45 -070031size_t Heap::growth_size_ = 0;
32
Carl Shapiro58551df2011-07-24 03:09:51 -070033size_t Heap::num_bytes_allocated_ = 0;
34
35size_t Heap::num_objects_allocated_ = 0;
36
Carl Shapiro69759ea2011-07-21 18:13:35 -070037bool Heap::is_gc_running_ = false;
38
39HeapBitmap* Heap::mark_bitmap_ = NULL;
40
41HeapBitmap* Heap::live_bitmap_ = NULL;
42
Ian Rogers5d76c432011-10-31 21:42:49 -070043CardTable* Heap::card_table_ = NULL;
44
45bool Heap::card_marking_disabled_ = false;
46
Elliott Hughesadb460d2011-10-05 17:02:34 -070047Class* Heap::java_lang_ref_FinalizerReference_ = NULL;
48Class* Heap::java_lang_ref_ReferenceQueue_ = NULL;
49
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070050MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
51MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
52MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
53MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
54MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
Brian Carlstrom1f870082011-08-23 16:02:11 -070055
Brian Carlstrom395520e2011-09-25 19:35:00 -070056float Heap::target_utilization_ = 0.5;
57
Elliott Hughes92b3b562011-09-08 16:32:26 -070058Mutex* Heap::lock_ = NULL;
59
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070060bool Heap::verify_objects_ = false;
61
Brian Carlstrom6b4ef022011-10-23 14:59:04 -070062void Heap::Init(bool is_verbose_heap, bool is_verbose_gc,
jeffhaoc1160702011-10-27 15:48:45 -070063 size_t initial_size, size_t maximum_size, size_t growth_size,
Brian Carlstrom58ae9412011-10-04 00:56:06 -070064 const std::vector<std::string>& image_file_names) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -070065 is_verbose_heap_ = is_verbose_heap;
66 is_verbose_gc_ = is_verbose_gc;
67
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070068 const Runtime* runtime = Runtime::Current();
Elliott Hughes352a4242011-10-31 15:15:21 -070069 if (Heap::IsVerboseHeap() || runtime->IsVerboseStartup()) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070070 LOG(INFO) << "Heap::Init entering";
71 }
72
Brian Carlstrom58ae9412011-10-04 00:56:06 -070073 // bounds of all spaces for allocating live and mark bitmaps
74 // there will be at least one space (the alloc space),
jeffhao39da0352011-11-04 14:58:55 -070075 // so set base to max, and limit and min to start
Brian Carlstrom58ae9412011-10-04 00:56:06 -070076 byte* base = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::max());
Ian Rogers5d76c432011-10-31 21:42:49 -070077 byte* max = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::min());
jeffhao39da0352011-11-04 14:58:55 -070078 byte* limit = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::min());
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070079
Brian Carlstrom58ae9412011-10-04 00:56:06 -070080 byte* requested_base = NULL;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070081 std::vector<Space*> image_spaces;
82 for (size_t i = 0; i < image_file_names.size(); i++) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070083 Space* space = Space::CreateFromImage(image_file_names[i]);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070084 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070085 LOG(FATAL) << "Failed to create space from " << image_file_names[i];
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070086 }
87 image_spaces.push_back(space);
88 spaces_.push_back(space);
Brian Carlstrome24fa612011-09-29 00:53:55 -070089 byte* oat_limit_addr = space->GetImageHeader().GetOatLimitAddr();
Brian Carlstrom58ae9412011-10-04 00:56:06 -070090 if (oat_limit_addr > requested_base) {
91 requested_base = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_limit_addr),
92 kPageSize));
93 }
94 base = std::min(base, space->GetBase());
Ian Rogers5d76c432011-10-31 21:42:49 -070095 max = std::max(max, space->GetMax());
jeffhao39da0352011-11-04 14:58:55 -070096 limit = std::max(limit, space->GetLimit());
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070097 }
98
jeffhaoc1160702011-10-27 15:48:45 -070099 alloc_space_ = Space::Create("alloc space", initial_size, maximum_size, growth_size, requested_base);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700100 if (alloc_space_ == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700101 LOG(FATAL) << "Failed to create alloc space";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700102 }
Elliott Hughes307f75d2011-10-12 18:04:40 -0700103 base = std::min(base, alloc_space_->GetBase());
Ian Rogers5d76c432011-10-31 21:42:49 -0700104 max = std::max(max, alloc_space_->GetMax());
jeffhao39da0352011-11-04 14:58:55 -0700105 limit = std::max(limit, alloc_space_->GetLimit());
Ian Rogers5d76c432011-10-31 21:42:49 -0700106 DCHECK_LT(base, max);
jeffhao39da0352011-11-04 14:58:55 -0700107 DCHECK_LT(base, limit);
Ian Rogers5d76c432011-10-31 21:42:49 -0700108 size_t num_bytes = max - base;
jeffhao39da0352011-11-04 14:58:55 -0700109 size_t limit_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700110
111 // Allocate the initial live bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700112 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
113 if (live_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700114 LOG(FATAL) << "Failed to create live bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700115 }
116
117 // Allocate the initial mark bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700118 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
119 if (mark_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700120 LOG(FATAL) << "Failed to create mark bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700121 }
122
Ian Rogers5d76c432011-10-31 21:42:49 -0700123 // Allocate the card table
jeffhao39da0352011-11-04 14:58:55 -0700124 UniquePtr<CardTable> card_table(CardTable::Create(base, num_bytes, limit_bytes));
Ian Rogers5d76c432011-10-31 21:42:49 -0700125 if (card_table.get() == NULL) {
126 LOG(FATAL) << "Failed to create card table";
127 }
128
Elliott Hughes307f75d2011-10-12 18:04:40 -0700129 spaces_.push_back(alloc_space_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700130 maximum_size_ = maximum_size;
jeffhaoc1160702011-10-27 15:48:45 -0700131 growth_size_ = growth_size;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700132 live_bitmap_ = live_bitmap.release();
133 mark_bitmap_ = mark_bitmap.release();
Ian Rogers5d76c432011-10-31 21:42:49 -0700134 card_table_ = card_table.release();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700135
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700136 num_bytes_allocated_ = 0;
137 num_objects_allocated_ = 0;
138
Carl Shapiro69759ea2011-07-21 18:13:35 -0700139 // TODO: allocate the card table
140
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700141 // Make image objects live (after live_bitmap_ is set)
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700142 for (size_t i = 0; i < image_spaces.size(); i++) {
143 RecordImageAllocations(image_spaces[i]);
144 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700145
Elliott Hughes85d15452011-09-16 17:33:01 -0700146 Heap::EnableObjectValidation();
147
Elliott Hughes92b3b562011-09-08 16:32:26 -0700148 // It's still to early to take a lock because there are no threads yet,
149 // but we can create the heap lock now. We don't create it earlier to
150 // make it clear that you can't use locks during heap initialization.
Elliott Hughes8daa0922011-09-11 13:46:25 -0700151 lock_ = new Mutex("Heap lock");
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700152
Elliott Hughes352a4242011-10-31 15:15:21 -0700153 if (Heap::IsVerboseHeap() || runtime->IsVerboseStartup()) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700154 LOG(INFO) << "Heap::Init exiting";
155 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700156}
157
158void Heap::Destroy() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700159 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700160 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700161 if (mark_bitmap_ != NULL) {
162 delete mark_bitmap_;
163 mark_bitmap_ = NULL;
164 }
165 if (live_bitmap_ != NULL) {
166 delete live_bitmap_;
167 }
168 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700169}
170
Elliott Hughes418dfe72011-10-06 18:56:27 -0700171Object* Heap::AllocObject(Class* klass, size_t byte_count) {
172 {
173 ScopedHeapLock lock;
174 DCHECK(klass == NULL || klass->GetDescriptor() == NULL ||
175 (klass->IsClassClass() && byte_count >= sizeof(Class)) ||
176 (klass->IsVariableSize() || klass->GetObjectSize() == byte_count));
177 DCHECK_GE(byte_count, sizeof(Object));
178 Object* obj = AllocateLocked(byte_count);
179 if (obj != NULL) {
180 obj->SetClass(klass);
181 return obj;
182 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700183 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700184
185 Thread::Current()->ThrowOutOfMemoryError(klass, byte_count);
186 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700187}
188
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700189bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700190 // Note: we deliberately don't take the lock here, and mustn't test anything that would
191 // require taking the lock.
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700192 if (obj == NULL || !IsAligned<kObjectAlignment>(obj)) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700193 return false;
194 }
195 // TODO
196 return true;
197}
198
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700199bool Heap::IsLiveObjectLocked(const Object* obj) {
200 lock_->AssertHeld();
201 return IsHeapAddress(obj) && live_bitmap_->Test(obj);
202}
203
Elliott Hughes3e465b12011-09-02 18:26:12 -0700204#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700205void Heap::VerifyObject(const Object* obj) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700206 if (!verify_objects_) {
207 return;
208 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700209 ScopedHeapLock lock;
210 Heap::VerifyObjectLocked(obj);
211}
212#endif
213
214void Heap::VerifyObjectLocked(const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700215 lock_->AssertHeld();
Elliott Hughes85d15452011-09-16 17:33:01 -0700216 if (obj != NULL) {
Elliott Hughes06b37d92011-10-16 11:51:29 -0700217 if (!IsAligned<kObjectAlignment>(obj)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700218 LOG(FATAL) << "Object isn't aligned: " << obj;
219 } else if (!live_bitmap_->Test(obj)) {
220 // TODO: we don't hold a lock here as it is assumed the live bit map
221 // isn't changing if the mutator is running.
222 LOG(FATAL) << "Object is dead: " << obj;
223 }
224 // Ignore early dawn of the universe verifications
Brian Carlstromdbc05252011-09-09 01:59:59 -0700225 if (num_objects_allocated_ > 10) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700226 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
227 Object::ClassOffset().Int32Value();
228 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
229 if (c == NULL) {
230 LOG(FATAL) << "Null class" << " in object: " << obj;
Elliott Hughes06b37d92011-10-16 11:51:29 -0700231 } else if (!IsAligned<kObjectAlignment>(c)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700232 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
233 } else if (!live_bitmap_->Test(c)) {
234 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
235 }
236 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
Ian Rogersad25ac52011-10-04 19:13:33 -0700237 // Note: we don't use the accessors here as they have internal sanity checks
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700238 // that we don't want to run
239 raw_addr = reinterpret_cast<const byte*>(c) +
240 Object::ClassOffset().Int32Value();
241 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
242 raw_addr = reinterpret_cast<const byte*>(c_c) +
243 Object::ClassOffset().Int32Value();
244 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
245 CHECK_EQ(c_c, c_c_c);
246 }
247 }
248}
249
Brian Carlstrom78128a62011-09-15 17:21:19 -0700250void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700251 DCHECK(obj != NULL);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700252 Heap::VerifyObjectLocked(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700253}
254
255void Heap::VerifyHeap() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700256 ScopedHeapLock lock;
257 live_bitmap_->Walk(Heap::VerificationCallback, NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700258}
259
Elliott Hughes92b3b562011-09-08 16:32:26 -0700260void Heap::RecordAllocationLocked(Space* space, const Object* obj) {
261#ifndef NDEBUG
262 if (Runtime::Current()->IsStarted()) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700263 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700264 }
265#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700266 size_t size = space->AllocationSize(obj);
267 DCHECK_NE(size, 0u);
268 num_bytes_allocated_ += size;
269 num_objects_allocated_ += 1;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700270
271 if (Runtime::Current()->HasStatsEnabled()) {
272 RuntimeStats* global_stats = Runtime::Current()->GetStats();
273 RuntimeStats* thread_stats = Thread::Current()->GetStats();
274 ++global_stats->allocated_objects;
275 ++thread_stats->allocated_objects;
276 global_stats->allocated_bytes += size;
277 thread_stats->allocated_bytes += size;
278 }
279
Carl Shapiro58551df2011-07-24 03:09:51 -0700280 live_bitmap_->Set(obj);
281}
282
Elliott Hughes307f75d2011-10-12 18:04:40 -0700283void Heap::RecordFreeLocked(size_t freed_objects, size_t freed_bytes) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700284 lock_->AssertHeld();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700285
286 if (freed_objects < num_objects_allocated_) {
287 num_objects_allocated_ -= freed_objects;
288 } else {
289 num_objects_allocated_ = 0;
290 }
291 if (freed_bytes < num_bytes_allocated_) {
292 num_bytes_allocated_ -= freed_bytes;
Carl Shapiro58551df2011-07-24 03:09:51 -0700293 } else {
294 num_bytes_allocated_ = 0;
295 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700296
297 if (Runtime::Current()->HasStatsEnabled()) {
298 RuntimeStats* global_stats = Runtime::Current()->GetStats();
299 RuntimeStats* thread_stats = Thread::Current()->GetStats();
300 ++global_stats->freed_objects;
301 ++thread_stats->freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700302 global_stats->freed_bytes += freed_bytes;
303 thread_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700304 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700305}
306
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700307void Heap::RecordImageAllocations(Space* space) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700308 const Runtime* runtime = Runtime::Current();
Elliott Hughes352a4242011-10-31 15:15:21 -0700309 if (Heap::IsVerboseHeap() || runtime->IsVerboseStartup()) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700310 LOG(INFO) << "Heap::RecordImageAllocations entering";
311 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700312 DCHECK(!Runtime::Current()->IsStarted());
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700313 CHECK(space != NULL);
314 CHECK(live_bitmap_ != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700315 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700316 while (current < space->GetLimit()) {
Elliott Hughes06b37d92011-10-16 11:51:29 -0700317 DCHECK_ALIGNED(current, kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700318 const Object* obj = reinterpret_cast<const Object*>(current);
319 live_bitmap_->Set(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700320 current += RoundUp(obj->SizeOf(), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700321 }
Elliott Hughes352a4242011-10-31 15:15:21 -0700322 if (Heap::IsVerboseHeap() || runtime->IsVerboseStartup()) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700323 LOG(INFO) << "Heap::RecordImageAllocations exiting";
324 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700325}
326
Elliott Hughes92b3b562011-09-08 16:32:26 -0700327Object* Heap::AllocateLocked(size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700328 lock_->AssertHeld();
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700329 DCHECK(alloc_space_ != NULL);
330 Space* space = alloc_space_;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700331 Object* obj = AllocateLocked(space, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700332 if (obj != NULL) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700333 RecordAllocationLocked(space, obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700334 }
335 return obj;
336}
337
Elliott Hughes92b3b562011-09-08 16:32:26 -0700338Object* Heap::AllocateLocked(Space* space, size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700339 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700340
Brian Carlstromb82b6872011-10-26 17:18:07 -0700341 // Since allocation can cause a GC which will need to SuspendAll,
342 // make sure all allocators are in the kRunnable state.
343 DCHECK_EQ(Thread::Current()->GetState(), Thread::kRunnable);
344
Carl Shapiro69759ea2011-07-21 18:13:35 -0700345 // Fail impossible allocations. TODO: collect soft references.
jeffhaoc1160702011-10-27 15:48:45 -0700346 if (size > growth_size_) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700347 return NULL;
348 }
349
Carl Shapiro58551df2011-07-24 03:09:51 -0700350 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700351 if (ptr != NULL) {
352 return ptr;
353 }
354
355 // The allocation failed. If the GC is running, block until it
356 // completes and retry.
357 if (is_gc_running_) {
358 // The GC is concurrently tracing the heap. Release the heap
359 // lock, wait for the GC to complete, and retrying allocating.
360 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700361 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700362 if (ptr != NULL) {
363 return ptr;
364 }
365 }
366
367 // Another failure. Our thread was starved or there may be too many
368 // live objects. Try a foreground GC. This will have no effect if
369 // the concurrent GC is already running.
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700370 if (Runtime::Current()->HasStatsEnabled()) {
371 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
372 ++Thread::Current()->GetStats()->gc_for_alloc_count;
373 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700374 CollectGarbageInternal();
375 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700376 if (ptr != NULL) {
377 return ptr;
378 }
379
380 // Even that didn't work; this is an exceptional state.
381 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700382 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700383 if (ptr != NULL) {
384 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700385 size_t new_footprint = space->GetMaxAllowedFootprint();
Elliott Hughes418dfe72011-10-06 18:56:27 -0700386 // OLD-TODO: may want to grow a little bit more so that the amount of
Carl Shapiro58551df2011-07-24 03:09:51 -0700387 // free space is equal to the old free space + the
388 // utilization slop for the new allocation.
Elliott Hughes352a4242011-10-31 15:15:21 -0700389 if (Heap::IsVerboseGc()) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700390 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Brian Carlstromf28bc5b2011-10-26 01:15:03 -0700391 << " for " << size << "-byte allocation";
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700392 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700393 return ptr;
394 }
395
396 // Most allocations should have succeeded by now, so the heap is
397 // really full, really fragmented, or the requested size is really
398 // big. Do another GC, collecting SoftReferences this time. The VM
399 // spec requires that all SoftReferences have been collected and
400 // cleared before throwing an OOME.
401
Elliott Hughes418dfe72011-10-06 18:56:27 -0700402 // OLD-TODO: wait for the finalizers from the previous GC to finish
Elliott Hughes352a4242011-10-31 15:15:21 -0700403 if (Heap::IsVerboseGc()) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700404 LOG(INFO) << "Forcing collection of SoftReferences for "
405 << size << "-byte allocation";
406 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700407 CollectGarbageInternal();
408 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700409 if (ptr != NULL) {
410 return ptr;
411 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700412
Carl Shapiro69759ea2011-07-21 18:13:35 -0700413 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
414
Carl Shapiro58551df2011-07-24 03:09:51 -0700415 // TODO: tell the HeapSource to dump its state
416 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700417
Carl Shapiro69759ea2011-07-21 18:13:35 -0700418 return NULL;
419}
420
Elliott Hughesbf86d042011-08-31 17:53:14 -0700421int64_t Heap::GetMaxMemory() {
jeffhaoc1160702011-10-27 15:48:45 -0700422 return growth_size_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700423}
424
425int64_t Heap::GetTotalMemory() {
Elliott Hughes7162ad92011-10-27 14:08:42 -0700426 return alloc_space_->Size();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700427}
428
429int64_t Heap::GetFreeMemory() {
Elliott Hughes7162ad92011-10-27 14:08:42 -0700430 return alloc_space_->Size() - num_bytes_allocated_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700431}
432
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700433class InstanceCounter {
434 public:
435 InstanceCounter(Class* c, bool count_assignable)
436 : class_(c), count_assignable_(count_assignable), count_(0) {
437 }
438
439 size_t GetCount() {
440 return count_;
441 }
442
443 static void Callback(Object* o, void* arg) {
444 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
445 }
446
447 private:
448 void VisitInstance(Object* o) {
449 Class* instance_class = o->GetClass();
450 if (count_assignable_) {
451 if (instance_class == class_) {
452 ++count_;
453 }
454 } else {
455 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
456 ++count_;
457 }
458 }
459 }
460
461 Class* class_;
462 bool count_assignable_;
463 size_t count_;
464};
465
466int64_t Heap::CountInstances(Class* c, bool count_assignable) {
467 ScopedHeapLock lock;
468 InstanceCounter counter(c, count_assignable);
469 live_bitmap_->Walk(InstanceCounter::Callback, &counter);
470 return counter.GetCount();
471}
472
Carl Shapiro69759ea2011-07-21 18:13:35 -0700473void Heap::CollectGarbage() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700474 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700475 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700476}
477
478void Heap::CollectGarbageInternal() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700479 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700480
Elliott Hughes8d768a92011-09-14 16:35:25 -0700481 ThreadList* thread_list = Runtime::Current()->GetThreadList();
482 thread_list->SuspendAll();
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700483
484 size_t initial_size = num_bytes_allocated_;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700485 TimingLogger timings("CollectGarbageInternal");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700486 uint64_t t0 = NanoTime();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700487 Object* cleared_references = NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700488 {
489 MarkSweep mark_sweep;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700490 timings.AddSplit("ctor");
Carl Shapiro58551df2011-07-24 03:09:51 -0700491
492 mark_sweep.Init();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700493 timings.AddSplit("Init");
Carl Shapiro58551df2011-07-24 03:09:51 -0700494
495 mark_sweep.MarkRoots();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700496 timings.AddSplit("MarkRoots");
Carl Shapiro58551df2011-07-24 03:09:51 -0700497
Ian Rogers5d76c432011-10-31 21:42:49 -0700498 mark_sweep.ScanDirtyImageRoots();
499 timings.AddSplit("DirtyImageRoots");
500
501 // Roots are marked on the bitmap and the mark_stack is empty
502 DCHECK(mark_sweep.IsMarkStackEmpty());
Carl Shapiro58551df2011-07-24 03:09:51 -0700503
504 // TODO: if concurrent
505 // unlock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700506 // thread_list->ResumeAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700507
Ian Rogers5d76c432011-10-31 21:42:49 -0700508 // Recursively mark all bits set in the non-image mark bitmap
Carl Shapiro58551df2011-07-24 03:09:51 -0700509 mark_sweep.RecursiveMark();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700510 timings.AddSplit("RecursiveMark");
Carl Shapiro58551df2011-07-24 03:09:51 -0700511
512 // TODO: if concurrent
513 // lock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700514 // thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700515 // re-mark root set
516 // scan dirty objects
517
518 mark_sweep.ProcessReferences(false);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700519 timings.AddSplit("ProcessReferences");
Carl Shapiro58551df2011-07-24 03:09:51 -0700520
Elliott Hughes2da50362011-10-10 16:57:08 -0700521 // TODO: if concurrent
522 // swap bitmaps
Carl Shapiro58551df2011-07-24 03:09:51 -0700523
524 mark_sweep.Sweep();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700525 timings.AddSplit("Sweep");
Elliott Hughesadb460d2011-10-05 17:02:34 -0700526
527 cleared_references = mark_sweep.GetClearedReferences();
Carl Shapiro58551df2011-07-24 03:09:51 -0700528 }
529
530 GrowForUtilization();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700531 timings.AddSplit("GrowForUtilization");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700532 uint64_t t1 = NanoTime();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700533 thread_list->ResumeAll();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700534
535 EnqueueClearedReferences(&cleared_references);
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700536
537 // TODO: somehow make the specific GC implementation (here MarkSweep) responsible for logging.
538 size_t bytes_freed = initial_size - num_bytes_allocated_;
539 bool is_small = (bytes_freed > 0 && bytes_freed < 1024);
540 size_t kib_freed = (bytes_freed > 0 ? std::max(bytes_freed/1024, 1U) : 0);
541
Elliott Hughes7162ad92011-10-27 14:08:42 -0700542 size_t total = GetTotalMemory();
543 size_t percentFree = 100 - static_cast<size_t>(100.0f * float(num_bytes_allocated_) / total);
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700544
545 uint32_t duration = (t1 - t0)/1000/1000;
Elliott Hughesaaed81d2011-11-07 15:11:47 -0800546 bool gc_was_particularly_slow = (duration > 100); // TODO: crank this down for concurrent.
547 if (Heap::IsVerboseGc() || gc_was_particularly_slow) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700548 LOG(INFO) << "GC freed " << (is_small ? "<" : "") << kib_freed << "KiB, "
549 << percentFree << "% free "
Elliott Hughes7162ad92011-10-27 14:08:42 -0700550 << (num_bytes_allocated_/1024) << "KiB/" << (total/1024) << "KiB, "
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700551 << "paused " << duration << "ms";
552 }
Elliott Hughes767a1472011-10-26 18:49:02 -0700553 Dbg::GcDidFinish();
Elliott Hughes352a4242011-10-31 15:15:21 -0700554 if (Heap::IsVerboseHeap()) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700555 timings.Dump();
556 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700557}
558
559void Heap::WaitForConcurrentGcToComplete() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700560 lock_->AssertHeld();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700561}
562
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700563void Heap::WalkHeap(void(*callback)(const void*, size_t, const void*, size_t, void*), void* arg) {
564 typedef std::vector<Space*>::iterator It; // C++0x auto.
565 for (It it = spaces_.begin(); it != spaces_.end(); ++it) {
566 (*it)->Walk(callback, arg);
567 }
568}
569
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700570/* Terminology:
571 * 1. Footprint: Capacity we allocate from system.
572 * 2. Active space: a.k.a. alloc_space_.
573 * 3. Soft footprint: external allocation + spaces footprint + active space footprint
574 * 4. Overhead: soft footprint excluding active.
575 *
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700576 * Layout: (The spaces below might not be contiguous, but are lumped together to depict size.)
577 * |----------------------spaces footprint--------- --------------|----active space footprint----|
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700578 * |--active space allocated--|
579 * |--------------------soft footprint (include active)--------------------------------------|
580 * |----------------soft footprint excluding active---------------|
581 * |------------soft limit-------...|
582 * |------------------------------------ideal footprint-----------------------------------------...|
583 *
584 */
585
586// Sets the maximum number of bytes that the heap is allowed to
587// allocate from the system. Clamps to the appropriate maximum
588// value.
589// Old spaces will count against the ideal size.
590//
591void Heap::SetIdealFootprint(size_t max_allowed_footprint)
592{
jeffhaoc1160702011-10-27 15:48:45 -0700593 if (max_allowed_footprint > Heap::growth_size_) {
Elliott Hughes352a4242011-10-31 15:15:21 -0700594 if (Heap::IsVerboseGc()) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700595 LOG(INFO) << "Clamp target GC heap from " << max_allowed_footprint
jeffhaoc1160702011-10-27 15:48:45 -0700596 << " to " << Heap::growth_size_;
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700597 }
jeffhaoc1160702011-10-27 15:48:45 -0700598 max_allowed_footprint = Heap::growth_size_;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700599 }
600
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700601 alloc_space_->SetMaxAllowedFootprint(max_allowed_footprint);
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700602}
603
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700604// kHeapIdealFree is the ideal maximum free size, when we grow the heap for
605// utlization.
606static const size_t kHeapIdealFree = 2 * MB;
607// kHeapMinFree guarantees that you always have at least 512 KB free, when
608// you grow for utilization, regardless of target utilization ratio.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700609static const size_t kHeapMinFree = kHeapIdealFree / 4;
610
611// Given the current contents of the active space, increase the allowed
Carl Shapiro69759ea2011-07-21 18:13:35 -0700612// heap footprint to match the target utilization ratio. This should
613// only be called immediately after a full garbage collection.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700614//
Carl Shapiro69759ea2011-07-21 18:13:35 -0700615void Heap::GrowForUtilization() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700616 lock_->AssertHeld();
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700617
618 // We know what our utilization is at this moment.
619 // This doesn't actually resize any memory. It just lets the heap grow more
620 // when necessary.
Elliott Hughes362f9bc2011-10-17 18:56:41 -0700621 size_t target_size(num_bytes_allocated_ / Heap::GetTargetHeapUtilization());
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700622
623 if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
624 target_size = num_bytes_allocated_ + kHeapIdealFree;
625 } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
626 target_size = num_bytes_allocated_ + kHeapMinFree;
627 }
628
629 SetIdealFootprint(target_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700630}
631
jeffhaoc1160702011-10-27 15:48:45 -0700632void Heap::ClearGrowthLimit() {
633 ScopedHeapLock lock;
634 WaitForConcurrentGcToComplete();
635 CHECK_GE(maximum_size_, growth_size_);
636 growth_size_ = maximum_size_;
637 alloc_space_->ClearGrowthLimit();
jeffhao39da0352011-11-04 14:58:55 -0700638 card_table_->ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -0700639}
640
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700641pid_t Heap::GetLockOwner() {
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700642 return lock_->GetOwner();
643}
644
Elliott Hughes92b3b562011-09-08 16:32:26 -0700645void Heap::Lock() {
Brian Carlstromfad71432011-10-16 20:25:10 -0700646 // Grab the lock, but put ourselves into Thread::kVmWait if it looks
647 // like we're going to have to wait on the mutex. This prevents
648 // deadlock if another thread is calling CollectGarbageInternal,
649 // since they will have the heap lock and be waiting for mutators to
650 // suspend.
651 if (!lock_->TryLock()) {
652 ScopedThreadStateChange tsc(Thread::Current(), Thread::kVmWait);
653 lock_->Lock();
654 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700655}
656
657void Heap::Unlock() {
658 lock_->Unlock();
659}
660
Elliott Hughesadb460d2011-10-05 17:02:34 -0700661void Heap::SetWellKnownClasses(Class* java_lang_ref_FinalizerReference,
662 Class* java_lang_ref_ReferenceQueue) {
663 java_lang_ref_FinalizerReference_ = java_lang_ref_FinalizerReference;
664 java_lang_ref_ReferenceQueue_ = java_lang_ref_ReferenceQueue;
665 CHECK(java_lang_ref_FinalizerReference_ != NULL);
666 CHECK(java_lang_ref_ReferenceQueue_ != NULL);
667}
668
669void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
670 MemberOffset reference_queue_offset,
671 MemberOffset reference_queueNext_offset,
672 MemberOffset reference_pendingNext_offset,
673 MemberOffset finalizer_reference_zombie_offset) {
674 reference_referent_offset_ = reference_referent_offset;
675 reference_queue_offset_ = reference_queue_offset;
676 reference_queueNext_offset_ = reference_queueNext_offset;
677 reference_pendingNext_offset_ = reference_pendingNext_offset;
678 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
679 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
680 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
681 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
682 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
683 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
684}
685
686Object* Heap::GetReferenceReferent(Object* reference) {
687 DCHECK(reference != NULL);
688 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
689 return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
690}
691
692void Heap::ClearReferenceReferent(Object* reference) {
693 DCHECK(reference != NULL);
694 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
695 reference->SetFieldObject(reference_referent_offset_, NULL, true);
696}
697
698// Returns true if the reference object has not yet been enqueued.
699bool Heap::IsEnqueuable(const Object* ref) {
700 DCHECK(ref != NULL);
701 const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
702 const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
703 return (queue != NULL) && (queue_next == NULL);
704}
705
706void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
707 DCHECK(ref != NULL);
708 CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
709 CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
710 EnqueuePendingReference(ref, cleared_reference_list);
711}
712
713void Heap::EnqueuePendingReference(Object* ref, Object** list) {
714 DCHECK(ref != NULL);
715 DCHECK(list != NULL);
716
717 if (*list == NULL) {
718 ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
719 *list = ref;
720 } else {
721 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
722 ref->SetFieldObject(reference_pendingNext_offset_, head, false);
723 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
724 }
725}
726
727Object* Heap::DequeuePendingReference(Object** list) {
728 DCHECK(list != NULL);
729 DCHECK(*list != NULL);
730 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
731 Object* ref;
732 if (*list == head) {
733 ref = *list;
734 *list = NULL;
735 } else {
736 Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
737 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
738 ref = head;
739 }
740 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
741 return ref;
742}
743
Ian Rogers5d4bdc22011-11-02 22:15:43 -0700744void Heap::AddFinalizerReference(Thread* self, Object* object) {
745 ScopedThreadStateChange tsc(self, Thread::kRunnable);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700746 static Method* FinalizerReference_add =
747 java_lang_ref_FinalizerReference_->FindDirectMethod("add", "(Ljava/lang/Object;)V");
748 DCHECK(FinalizerReference_add != NULL);
749 Object* args[] = { object };
Ian Rogers5d4bdc22011-11-02 22:15:43 -0700750 FinalizerReference_add->Invoke(self, NULL, reinterpret_cast<byte*>(&args), NULL);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700751}
752
753void Heap::EnqueueClearedReferences(Object** cleared) {
754 DCHECK(cleared != NULL);
755 if (*cleared != NULL) {
756 static Method* ReferenceQueue_add =
757 java_lang_ref_ReferenceQueue_->FindDirectMethod("add", "(Ljava/lang/ref/Reference;)V");
758 DCHECK(ReferenceQueue_add != NULL);
759
760 Thread* self = Thread::Current();
761 ScopedThreadStateChange tsc(self, Thread::kRunnable);
762 Object* args[] = { *cleared };
763 ReferenceQueue_add->Invoke(self, NULL, reinterpret_cast<byte*>(&args), NULL);
764 *cleared = NULL;
765 }
766}
767
Carl Shapiro69759ea2011-07-21 18:13:35 -0700768} // namespace art