blob: 5a19d281d60e6134ab67c0e482a103290832a8d4 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Brian Carlstrom5643b782012-02-05 12:32:53 -080019#include <sys/types.h>
20#include <sys/wait.h>
21
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
24
Ian Rogers5d76c432011-10-31 21:42:49 -070025#include "card_table.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070026#include "debugger.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070027#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070028#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070029#include "object.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080030#include "object_utils.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080031#include "os.h"
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080032#include "scoped_heap_lock.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070033#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070034#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070035#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070036#include "timing_logger.h"
37#include "UniquePtr.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070038
39namespace art {
40
Ian Rogers30fab402012-01-23 15:43:46 -080041static void UpdateFirstAndLastSpace(Space** first_space, Space** last_space, Space* space) {
42 if (*first_space == NULL) {
43 *first_space = space;
44 *last_space = space;
45 } else {
46 if ((*first_space)->Begin() > space->Begin()) {
47 *first_space = space;
48 } else if (space->Begin() > (*last_space)->Begin()) {
49 *last_space = space;
50 }
51 }
52}
53
Elliott Hughesffb465f2012-03-01 18:46:05 -080054static bool GenerateImage(const std::string image_file_name) {
Brian Carlstroma004aa92012-02-08 18:05:09 -080055 const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
Brian Carlstrom5643b782012-02-05 12:32:53 -080056 std::vector<std::string> boot_class_path;
57 Split(boot_class_path_string, ':', boot_class_path);
Brian Carlstromb2793372012-03-17 18:27:16 -070058 if (boot_class_path.empty()) {
59 LOG(FATAL) << "Failed to generate image because no boot class path specified";
60 }
Brian Carlstrom5643b782012-02-05 12:32:53 -080061
62 std::vector<char*> arg_vector;
63
64 std::string dex2oat_string(GetAndroidRoot());
65 dex2oat_string += "/bin/dex2oat";
66#ifndef NDEBUG
67 dex2oat_string += 'd';
68#endif
69 const char* dex2oat = dex2oat_string.c_str();
70 arg_vector.push_back(strdup(dex2oat));
71
72 std::string image_option_string("--image=");
73 image_option_string += image_file_name;
74 const char* image_option = image_option_string.c_str();
75 arg_vector.push_back(strdup(image_option));
76
77 arg_vector.push_back(strdup("--runtime-arg"));
78 arg_vector.push_back(strdup("-Xms64m"));
79
80 arg_vector.push_back(strdup("--runtime-arg"));
81 arg_vector.push_back(strdup("-Xmx64m"));
82
83 for (size_t i = 0; i < boot_class_path.size(); i++) {
84 std::string dex_file_option_string("--dex-file=");
85 dex_file_option_string += boot_class_path[i];
86 const char* dex_file_option = dex_file_option_string.c_str();
87 arg_vector.push_back(strdup(dex_file_option));
88 }
89
90 std::string oat_file_option_string("--oat-file=");
91 oat_file_option_string += image_file_name;
92 oat_file_option_string.erase(oat_file_option_string.size() - 3);
93 oat_file_option_string += "oat";
94 const char* oat_file_option = oat_file_option_string.c_str();
95 arg_vector.push_back(strdup(oat_file_option));
96
97 arg_vector.push_back(strdup("--base=0x60000000"));
98
Elliott Hughes48436bb2012-02-07 15:23:28 -080099 std::string command_line(Join(arg_vector, ' '));
Brian Carlstrom5643b782012-02-05 12:32:53 -0800100 LOG(INFO) << command_line;
101
Elliott Hughes48436bb2012-02-07 15:23:28 -0800102 arg_vector.push_back(NULL);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800103 char** argv = &arg_vector[0];
104
105 // fork and exec dex2oat
106 pid_t pid = fork();
107 if (pid == 0) {
108 // no allocation allowed between fork and exec
109
110 // change process groups, so we don't get reaped by ProcessManager
111 setpgid(0, 0);
112
113 execv(dex2oat, argv);
114
115 PLOG(FATAL) << "execv(" << dex2oat << ") failed";
116 return false;
117 } else {
118 STLDeleteElements(&arg_vector);
119
120 // wait for dex2oat to finish
121 int status;
122 pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
123 if (got_pid != pid) {
124 PLOG(ERROR) << "waitpid failed: wanted " << pid << ", got " << got_pid;
125 return false;
126 }
127 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
128 LOG(ERROR) << dex2oat << " failed: " << command_line;
129 return false;
130 }
131 }
132 return true;
133}
134
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800135Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
136 const std::string& original_image_file_name)
137 : lock_(NULL),
Brian Carlstromfddf6f62012-03-15 16:56:45 -0700138 image_space_(NULL),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800139 alloc_space_(NULL),
140 mark_bitmap_(NULL),
141 live_bitmap_(NULL),
142 card_table_(NULL),
143 card_marking_disabled_(false),
144 is_gc_running_(false),
145 num_bytes_allocated_(0),
146 num_objects_allocated_(0),
147 java_lang_ref_FinalizerReference_(NULL),
148 java_lang_ref_ReferenceQueue_(NULL),
149 reference_referent_offset_(0),
150 reference_queue_offset_(0),
151 reference_queueNext_offset_(0),
152 reference_pendingNext_offset_(0),
153 finalizer_reference_zombie_offset_(0),
154 target_utilization_(0.5),
155 verify_objects_(false)
156{
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800157 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800158 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700159 }
160
Ian Rogers30fab402012-01-23 15:43:46 -0800161 // Compute the bounds of all spaces for allocating live and mark bitmaps
162 // there will be at least one space (the alloc space)
163 Space* first_space = NULL;
164 Space* last_space = NULL;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700165
Ian Rogers30fab402012-01-23 15:43:46 -0800166 // Requested begin for the alloc space, to follow the mapped image and oat files
167 byte* requested_begin = NULL;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800168 std::string image_file_name(original_image_file_name);
169 if (!image_file_name.empty()) {
Brian Carlstrom5643b782012-02-05 12:32:53 -0800170 if (OS::FileExists(image_file_name.c_str())) {
171 // If the /system file exists, it should be up-to-date, don't try to generate
Brian Carlstromfddf6f62012-03-15 16:56:45 -0700172 image_space_ = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800173 } else {
174 // If the /system file didn't exist, we need to use one from the art-cache.
175 // If the cache file exists, try to open, but if it fails, regenerate.
176 // If it does not exist, generate.
177 image_file_name = GetArtCacheFilenameOrDie(image_file_name);
178 if (OS::FileExists(image_file_name.c_str())) {
Brian Carlstromfddf6f62012-03-15 16:56:45 -0700179 image_space_ = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800180 }
Brian Carlstromfddf6f62012-03-15 16:56:45 -0700181 if (image_space_ == NULL) {
Brian Carlstrom5643b782012-02-05 12:32:53 -0800182 if (!GenerateImage(image_file_name)) {
183 LOG(FATAL) << "Failed to generate image: " << image_file_name;
184 }
Brian Carlstromfddf6f62012-03-15 16:56:45 -0700185 image_space_ = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800186 }
187 }
Brian Carlstromfddf6f62012-03-15 16:56:45 -0700188 if (image_space_ == NULL) {
Brian Carlstrom223f20f2012-02-04 23:06:55 -0800189 LOG(FATAL) << "Failed to create space from " << image_file_name;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700190 }
Brian Carlstrom5643b782012-02-05 12:32:53 -0800191
Brian Carlstromfddf6f62012-03-15 16:56:45 -0700192 AddSpace(image_space_);
193 UpdateFirstAndLastSpace(&first_space, &last_space, image_space_);
Ian Rogers30fab402012-01-23 15:43:46 -0800194 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
195 // isn't going to get in the middle
Brian Carlstromfddf6f62012-03-15 16:56:45 -0700196 byte* oat_end_addr = image_space_->GetImageHeader().GetOatEnd();
197 CHECK(oat_end_addr > image_space_->End());
Ian Rogers30fab402012-01-23 15:43:46 -0800198 if (oat_end_addr > requested_begin) {
199 requested_begin = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_end_addr),
200 kPageSize));
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700201 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700202 }
203
Ian Rogers30fab402012-01-23 15:43:46 -0800204 alloc_space_ = Space::CreateAllocSpace("alloc space", initial_size, growth_limit, capacity,
205 requested_begin);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700206 if (alloc_space_ == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700207 LOG(FATAL) << "Failed to create alloc space";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700208 }
Ian Rogers30fab402012-01-23 15:43:46 -0800209 AddSpace(alloc_space_);
210 UpdateFirstAndLastSpace(&first_space, &last_space, alloc_space_);
211 byte* heap_begin = first_space->Begin();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800212 size_t heap_capacity = (last_space->Begin() - first_space->Begin()) + last_space->NonGrowthLimitCapacity();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700213
214 // Allocate the initial live bitmap.
Ian Rogers30fab402012-01-23 15:43:46 -0800215 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create("dalvik-bitmap-1", heap_begin, heap_capacity));
Elliott Hughes90a33692011-08-30 13:27:07 -0700216 if (live_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700217 LOG(FATAL) << "Failed to create live bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700218 }
219
Ian Rogers30fab402012-01-23 15:43:46 -0800220 // Mark image objects in the live bitmap
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800221 for (size_t i = 0; i < spaces_.size(); ++i) {
Ian Rogers30fab402012-01-23 15:43:46 -0800222 Space* space = spaces_[i];
223 if (space->IsImageSpace()) {
224 space->AsImageSpace()->RecordImageAllocations(live_bitmap.get());
225 }
226 }
227
Carl Shapiro69759ea2011-07-21 18:13:35 -0700228 // Allocate the initial mark bitmap.
Ian Rogers30fab402012-01-23 15:43:46 -0800229 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create("dalvik-bitmap-2", heap_begin, heap_capacity));
Elliott Hughes90a33692011-08-30 13:27:07 -0700230 if (mark_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700231 LOG(FATAL) << "Failed to create mark bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700232 }
233
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800234 // Allocate the card table.
Ian Rogers30fab402012-01-23 15:43:46 -0800235 UniquePtr<CardTable> card_table(CardTable::Create(heap_begin, heap_capacity));
Ian Rogers5d76c432011-10-31 21:42:49 -0700236 if (card_table.get() == NULL) {
237 LOG(FATAL) << "Failed to create card table";
238 }
239
Carl Shapiro69759ea2011-07-21 18:13:35 -0700240 live_bitmap_ = live_bitmap.release();
241 mark_bitmap_ = mark_bitmap.release();
Ian Rogers5d76c432011-10-31 21:42:49 -0700242 card_table_ = card_table.release();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700243
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700244 num_bytes_allocated_ = 0;
245 num_objects_allocated_ = 0;
246
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800247 // It's still too early to take a lock because there are no threads yet,
Elliott Hughes92b3b562011-09-08 16:32:26 -0700248 // but we can create the heap lock now. We don't create it earlier to
249 // make it clear that you can't use locks during heap initialization.
Elliott Hughesffb465f2012-03-01 18:46:05 -0800250 lock_ = new Mutex("Heap lock", kHeapLock);
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700251
Ian Rogers30fab402012-01-23 15:43:46 -0800252 Heap::EnableObjectValidation();
253
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800254 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800255 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700256 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700257}
258
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800259void Heap::AddSpace(Space* space) {
260 spaces_.push_back(space);
261}
262
263Heap::~Heap() {
264 VLOG(heap) << "~Heap()";
Elliott Hughesb3e66df2012-01-12 14:49:18 -0800265 // We can't take the heap lock here because there might be a daemon thread suspended with the
266 // heap lock held. We know though that no non-daemon threads are executing, and we know that
267 // all daemon threads are suspended, and we also know that the threads list have been deleted, so
268 // those threads can't resume. We're the only running thread, and we can do whatever we like...
Carl Shapiro58551df2011-07-24 03:09:51 -0700269 STLDeleteElements(&spaces_);
Elliott Hughes4d6850c2012-01-18 15:55:06 -0800270 delete mark_bitmap_;
271 delete live_bitmap_;
272 delete card_table_;
273 delete lock_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700274}
275
Elliott Hughes418dfe72011-10-06 18:56:27 -0700276Object* Heap::AllocObject(Class* klass, size_t byte_count) {
277 {
Elliott Hughesffb465f2012-03-01 18:46:05 -0800278 ScopedHeapLock heap_lock;
Ian Rogers6d4d9fc2011-11-30 16:24:48 -0800279 DCHECK(klass == NULL || (klass->IsClassClass() && byte_count >= sizeof(Class)) ||
280 (klass->IsVariableSize() || klass->GetObjectSize() == byte_count) ||
Elliott Hughes91250e02011-12-13 22:30:35 -0800281 strlen(ClassHelper(klass).GetDescriptor()) == 0);
Elliott Hughes418dfe72011-10-06 18:56:27 -0700282 DCHECK_GE(byte_count, sizeof(Object));
283 Object* obj = AllocateLocked(byte_count);
284 if (obj != NULL) {
285 obj->SetClass(klass);
Elliott Hughes545a0642011-11-08 19:10:03 -0800286 if (Dbg::IsAllocTrackingEnabled()) {
287 Dbg::RecordAllocation(klass, byte_count);
288 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700289 return obj;
290 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700291 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700292
293 Thread::Current()->ThrowOutOfMemoryError(klass, byte_count);
294 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700295}
296
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700297bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700298 // Note: we deliberately don't take the lock here, and mustn't test anything that would
299 // require taking the lock.
Elliott Hughes88c5c352012-03-15 18:49:48 -0700300 if (obj == NULL) {
301 return true;
302 }
303 if (!IsAligned<kObjectAlignment>(obj)) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700304 return false;
305 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800306 for (size_t i = 0; i < spaces_.size(); ++i) {
Ian Rogers30fab402012-01-23 15:43:46 -0800307 if (spaces_[i]->Contains(obj)) {
308 return true;
309 }
310 }
311 return false;
Elliott Hughesa2501992011-08-26 19:39:54 -0700312}
313
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700314bool Heap::IsLiveObjectLocked(const Object* obj) {
315 lock_->AssertHeld();
316 return IsHeapAddress(obj) && live_bitmap_->Test(obj);
317}
318
Elliott Hughes3e465b12011-09-02 18:26:12 -0700319#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700320void Heap::VerifyObject(const Object* obj) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700321 if (!verify_objects_) {
322 return;
323 }
Elliott Hughesffb465f2012-03-01 18:46:05 -0800324 ScopedHeapLock heap_lock;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700325 Heap::VerifyObjectLocked(obj);
326}
327#endif
328
329void Heap::VerifyObjectLocked(const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700330 lock_->AssertHeld();
Elliott Hughes85d15452011-09-16 17:33:01 -0700331 if (obj != NULL) {
Elliott Hughes06b37d92011-10-16 11:51:29 -0700332 if (!IsAligned<kObjectAlignment>(obj)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700333 LOG(FATAL) << "Object isn't aligned: " << obj;
334 } else if (!live_bitmap_->Test(obj)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700335 LOG(FATAL) << "Object is dead: " << obj;
336 }
337 // Ignore early dawn of the universe verifications
Brian Carlstromdbc05252011-09-09 01:59:59 -0700338 if (num_objects_allocated_ > 10) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700339 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
340 Object::ClassOffset().Int32Value();
341 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
342 if (c == NULL) {
Elliott Hughes5d78d392011-12-13 16:53:05 -0800343 LOG(FATAL) << "Null class in object: " << obj;
Elliott Hughes06b37d92011-10-16 11:51:29 -0700344 } else if (!IsAligned<kObjectAlignment>(c)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700345 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
346 } else if (!live_bitmap_->Test(c)) {
347 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
348 }
349 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
Ian Rogersad25ac52011-10-04 19:13:33 -0700350 // Note: we don't use the accessors here as they have internal sanity checks
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700351 // that we don't want to run
Ian Rogers30fab402012-01-23 15:43:46 -0800352 raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700353 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
Ian Rogers30fab402012-01-23 15:43:46 -0800354 raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700355 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
356 CHECK_EQ(c_c, c_c_c);
357 }
358 }
359}
360
Brian Carlstrom78128a62011-09-15 17:21:19 -0700361void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700362 DCHECK(obj != NULL);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800363 reinterpret_cast<Heap*>(arg)->VerifyObjectLocked(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700364}
365
366void Heap::VerifyHeap() {
Elliott Hughesffb465f2012-03-01 18:46:05 -0800367 ScopedHeapLock heap_lock;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800368 live_bitmap_->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700369}
370
Ian Rogers30fab402012-01-23 15:43:46 -0800371void Heap::RecordAllocationLocked(AllocSpace* space, const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700372#ifndef NDEBUG
373 if (Runtime::Current()->IsStarted()) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700374 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700375 }
376#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700377 size_t size = space->AllocationSize(obj);
Elliott Hughes5d78d392011-12-13 16:53:05 -0800378 DCHECK_GT(size, 0u);
Carl Shapiro58551df2011-07-24 03:09:51 -0700379 num_bytes_allocated_ += size;
380 num_objects_allocated_ += 1;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700381
382 if (Runtime::Current()->HasStatsEnabled()) {
383 RuntimeStats* global_stats = Runtime::Current()->GetStats();
384 RuntimeStats* thread_stats = Thread::Current()->GetStats();
385 ++global_stats->allocated_objects;
386 ++thread_stats->allocated_objects;
387 global_stats->allocated_bytes += size;
388 thread_stats->allocated_bytes += size;
389 }
390
Carl Shapiro58551df2011-07-24 03:09:51 -0700391 live_bitmap_->Set(obj);
392}
393
Elliott Hughes307f75d2011-10-12 18:04:40 -0700394void Heap::RecordFreeLocked(size_t freed_objects, size_t freed_bytes) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700395 lock_->AssertHeld();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700396
397 if (freed_objects < num_objects_allocated_) {
398 num_objects_allocated_ -= freed_objects;
399 } else {
400 num_objects_allocated_ = 0;
401 }
402 if (freed_bytes < num_bytes_allocated_) {
403 num_bytes_allocated_ -= freed_bytes;
Carl Shapiro58551df2011-07-24 03:09:51 -0700404 } else {
405 num_bytes_allocated_ = 0;
406 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700407
408 if (Runtime::Current()->HasStatsEnabled()) {
409 RuntimeStats* global_stats = Runtime::Current()->GetStats();
410 RuntimeStats* thread_stats = Thread::Current()->GetStats();
411 ++global_stats->freed_objects;
412 ++thread_stats->freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700413 global_stats->freed_bytes += freed_bytes;
414 thread_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700415 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700416}
417
Elliott Hughes92b3b562011-09-08 16:32:26 -0700418Object* Heap::AllocateLocked(size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700419 lock_->AssertHeld();
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700420 DCHECK(alloc_space_ != NULL);
Ian Rogers30fab402012-01-23 15:43:46 -0800421 AllocSpace* space = alloc_space_;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700422 Object* obj = AllocateLocked(space, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700423 if (obj != NULL) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700424 RecordAllocationLocked(space, obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700425 }
426 return obj;
427}
428
Ian Rogers30fab402012-01-23 15:43:46 -0800429Object* Heap::AllocateLocked(AllocSpace* space, size_t alloc_size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700430 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700431
Brian Carlstromb82b6872011-10-26 17:18:07 -0700432 // Since allocation can cause a GC which will need to SuspendAll,
433 // make sure all allocators are in the kRunnable state.
Ian Rogersf45b1542012-02-03 18:03:48 -0800434 CHECK_EQ(Thread::Current()->GetState(), Thread::kRunnable);
Brian Carlstromb82b6872011-10-26 17:18:07 -0700435
Ian Rogers30fab402012-01-23 15:43:46 -0800436 // Fail impossible allocations
437 if (alloc_size > space->Capacity()) {
438 // On failure collect soft references
439 CollectGarbageInternal(true);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700440 return NULL;
441 }
442
Ian Rogers30fab402012-01-23 15:43:46 -0800443 Object* ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700444 if (ptr != NULL) {
445 return ptr;
446 }
447
Ian Rogers30fab402012-01-23 15:43:46 -0800448 // The allocation failed. If the GC is running, block until it completes and retry.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700449 if (is_gc_running_) {
Ian Rogers30fab402012-01-23 15:43:46 -0800450 // The GC is concurrently tracing the heap. Release the heap lock, wait for the GC to
451 // complete, and retrying allocating.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700452 WaitForConcurrentGcToComplete();
Ian Rogers30fab402012-01-23 15:43:46 -0800453 ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700454 if (ptr != NULL) {
455 return ptr;
456 }
457 }
458
459 // Another failure. Our thread was starved or there may be too many
460 // live objects. Try a foreground GC. This will have no effect if
461 // the concurrent GC is already running.
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700462 if (Runtime::Current()->HasStatsEnabled()) {
463 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
464 ++Thread::Current()->GetStats()->gc_for_alloc_count;
465 }
Ian Rogers30fab402012-01-23 15:43:46 -0800466 CollectGarbageInternal(false);
467 ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700468 if (ptr != NULL) {
469 return ptr;
470 }
471
472 // Even that didn't work; this is an exceptional state.
473 // Try harder, growing the heap if necessary.
Ian Rogers30fab402012-01-23 15:43:46 -0800474 ptr = space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700475 if (ptr != NULL) {
476 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Ian Rogers30fab402012-01-23 15:43:46 -0800477 size_t new_footprint = space->GetFootprintLimit();
Elliott Hughes418dfe72011-10-06 18:56:27 -0700478 // OLD-TODO: may want to grow a little bit more so that the amount of
Carl Shapiro58551df2011-07-24 03:09:51 -0700479 // free space is equal to the old free space + the
480 // utilization slop for the new allocation.
Ian Rogers3bb17a62012-01-27 23:56:44 -0800481 VLOG(gc) << "Grow heap (frag case) to " << PrettySize(new_footprint)
Ian Rogers162a31c2012-01-31 16:14:31 -0800482 << " for a " << PrettySize(alloc_size) << " allocation";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700483 return ptr;
484 }
485
486 // Most allocations should have succeeded by now, so the heap is
487 // really full, really fragmented, or the requested size is really
488 // big. Do another GC, collecting SoftReferences this time. The VM
489 // spec requires that all SoftReferences have been collected and
490 // cleared before throwing an OOME.
491
Elliott Hughes418dfe72011-10-06 18:56:27 -0700492 // OLD-TODO: wait for the finalizers from the previous GC to finish
Ian Rogers3bb17a62012-01-27 23:56:44 -0800493 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) << " allocation";
Ian Rogers30fab402012-01-23 15:43:46 -0800494 CollectGarbageInternal(true);
495 ptr = space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700496 if (ptr != NULL) {
497 return ptr;
498 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700499
Ian Rogers3bb17a62012-01-27 23:56:44 -0800500 LOG(ERROR) << "Out of memory on a " << PrettySize(alloc_size) << " allocation";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700501
Carl Shapiro58551df2011-07-24 03:09:51 -0700502 // TODO: tell the HeapSource to dump its state
503 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700504
Carl Shapiro69759ea2011-07-21 18:13:35 -0700505 return NULL;
506}
507
Elliott Hughesbf86d042011-08-31 17:53:14 -0700508int64_t Heap::GetMaxMemory() {
Ian Rogers30fab402012-01-23 15:43:46 -0800509 return alloc_space_->Capacity();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700510}
511
512int64_t Heap::GetTotalMemory() {
Ian Rogers30fab402012-01-23 15:43:46 -0800513 return alloc_space_->Capacity();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700514}
515
516int64_t Heap::GetFreeMemory() {
Ian Rogers30fab402012-01-23 15:43:46 -0800517 return alloc_space_->Capacity() - num_bytes_allocated_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700518}
519
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700520class InstanceCounter {
521 public:
522 InstanceCounter(Class* c, bool count_assignable)
523 : class_(c), count_assignable_(count_assignable), count_(0) {
524 }
525
526 size_t GetCount() {
527 return count_;
528 }
529
530 static void Callback(Object* o, void* arg) {
531 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
532 }
533
534 private:
535 void VisitInstance(Object* o) {
536 Class* instance_class = o->GetClass();
537 if (count_assignable_) {
538 if (instance_class == class_) {
539 ++count_;
540 }
541 } else {
542 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
543 ++count_;
544 }
545 }
546 }
547
548 Class* class_;
549 bool count_assignable_;
550 size_t count_;
551};
552
553int64_t Heap::CountInstances(Class* c, bool count_assignable) {
Elliott Hughesffb465f2012-03-01 18:46:05 -0800554 ScopedHeapLock heap_lock;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700555 InstanceCounter counter(c, count_assignable);
556 live_bitmap_->Walk(InstanceCounter::Callback, &counter);
557 return counter.GetCount();
558}
559
Ian Rogers30fab402012-01-23 15:43:46 -0800560void Heap::CollectGarbage(bool clear_soft_references) {
Elliott Hughesffb465f2012-03-01 18:46:05 -0800561 ScopedHeapLock heap_lock;
Ian Rogers30fab402012-01-23 15:43:46 -0800562 CollectGarbageInternal(clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700563}
564
Ian Rogers30fab402012-01-23 15:43:46 -0800565void Heap::CollectGarbageInternal(bool clear_soft_references) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700566 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700567
Elliott Hughes8d768a92011-09-14 16:35:25 -0700568 ThreadList* thread_list = Runtime::Current()->GetThreadList();
569 thread_list->SuspendAll();
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700570
571 size_t initial_size = num_bytes_allocated_;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700572 TimingLogger timings("CollectGarbageInternal");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700573 uint64_t t0 = NanoTime();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700574 Object* cleared_references = NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700575 {
576 MarkSweep mark_sweep;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700577 timings.AddSplit("ctor");
Carl Shapiro58551df2011-07-24 03:09:51 -0700578
579 mark_sweep.Init();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700580 timings.AddSplit("Init");
Carl Shapiro58551df2011-07-24 03:09:51 -0700581
582 mark_sweep.MarkRoots();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700583 timings.AddSplit("MarkRoots");
Carl Shapiro58551df2011-07-24 03:09:51 -0700584
Ian Rogers5d76c432011-10-31 21:42:49 -0700585 mark_sweep.ScanDirtyImageRoots();
586 timings.AddSplit("DirtyImageRoots");
587
588 // Roots are marked on the bitmap and the mark_stack is empty
589 DCHECK(mark_sweep.IsMarkStackEmpty());
Carl Shapiro58551df2011-07-24 03:09:51 -0700590
591 // TODO: if concurrent
592 // unlock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700593 // thread_list->ResumeAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700594
Ian Rogers5d76c432011-10-31 21:42:49 -0700595 // Recursively mark all bits set in the non-image mark bitmap
Carl Shapiro58551df2011-07-24 03:09:51 -0700596 mark_sweep.RecursiveMark();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700597 timings.AddSplit("RecursiveMark");
Carl Shapiro58551df2011-07-24 03:09:51 -0700598
599 // TODO: if concurrent
600 // lock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700601 // thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700602 // re-mark root set
603 // scan dirty objects
604
Ian Rogers30fab402012-01-23 15:43:46 -0800605 mark_sweep.ProcessReferences(clear_soft_references);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700606 timings.AddSplit("ProcessReferences");
Carl Shapiro58551df2011-07-24 03:09:51 -0700607
Elliott Hughes2da50362011-10-10 16:57:08 -0700608 // TODO: if concurrent
609 // swap bitmaps
Carl Shapiro58551df2011-07-24 03:09:51 -0700610
611 mark_sweep.Sweep();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700612 timings.AddSplit("Sweep");
Elliott Hughesadb460d2011-10-05 17:02:34 -0700613
614 cleared_references = mark_sweep.GetClearedReferences();
Carl Shapiro58551df2011-07-24 03:09:51 -0700615 }
616
617 GrowForUtilization();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700618 timings.AddSplit("GrowForUtilization");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700619 uint64_t t1 = NanoTime();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700620 thread_list->ResumeAll();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700621
622 EnqueueClearedReferences(&cleared_references);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800623 RequestHeapTrim();
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700624
Ian Rogers3bb17a62012-01-27 23:56:44 -0800625 uint64_t duration_ns = t1 - t0;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800626 bool gc_was_particularly_slow = duration_ns > MsToNs(50); // TODO: crank this down for concurrent.
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800627 if (VLOG_IS_ON(gc) || gc_was_particularly_slow) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800628 // TODO: somehow make the specific GC implementation (here MarkSweep) responsible for logging.
629 size_t bytes_freed = initial_size - num_bytes_allocated_;
630 if (bytes_freed > KB) { // ignore freed bytes in output if > 1KB
631 bytes_freed = RoundDown(bytes_freed, KB);
632 }
633 size_t bytes_allocated = RoundUp(num_bytes_allocated_, KB);
634 // lose low nanoseconds in duration. TODO: make this part of PrettyDuration
635 duration_ns = (duration_ns / 1000) * 1000;
636 size_t total = GetTotalMemory();
637 size_t percentFree = 100 - static_cast<size_t>(100.0f * static_cast<float>(num_bytes_allocated_) / total);
638 LOG(INFO) << "GC freed " << PrettySize(bytes_freed) << ", " << percentFree << "% free, "
639 << PrettySize(bytes_allocated) << "/" << PrettySize(total) << ", "
640 << "paused " << PrettyDuration(duration_ns);
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700641 }
Elliott Hughes767a1472011-10-26 18:49:02 -0700642 Dbg::GcDidFinish();
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800643 if (VLOG_IS_ON(heap)) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700644 timings.Dump();
645 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700646}
647
648void Heap::WaitForConcurrentGcToComplete() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700649 lock_->AssertHeld();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700650}
651
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800652void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Ian Rogers30fab402012-01-23 15:43:46 -0800653 size_t alloc_space_capacity = alloc_space_->Capacity();
654 if (max_allowed_footprint > alloc_space_capacity) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800655 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint)
656 << " to " << PrettySize(alloc_space_capacity);
Ian Rogers30fab402012-01-23 15:43:46 -0800657 max_allowed_footprint = alloc_space_capacity;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700658 }
Ian Rogers30fab402012-01-23 15:43:46 -0800659 alloc_space_->SetFootprintLimit(max_allowed_footprint);
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700660}
661
Ian Rogers3bb17a62012-01-27 23:56:44 -0800662// kHeapIdealFree is the ideal maximum free size, when we grow the heap for utilization.
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700663static const size_t kHeapIdealFree = 2 * MB;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800664// kHeapMinFree guarantees that you always have at least 512 KB free, when you grow for utilization,
665// regardless of target utilization ratio.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700666static const size_t kHeapMinFree = kHeapIdealFree / 4;
667
Carl Shapiro69759ea2011-07-21 18:13:35 -0700668void Heap::GrowForUtilization() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700669 lock_->AssertHeld();
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700670
671 // We know what our utilization is at this moment.
672 // This doesn't actually resize any memory. It just lets the heap grow more
673 // when necessary.
Elliott Hughes362f9bc2011-10-17 18:56:41 -0700674 size_t target_size(num_bytes_allocated_ / Heap::GetTargetHeapUtilization());
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700675
676 if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
677 target_size = num_bytes_allocated_ + kHeapIdealFree;
678 } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
679 target_size = num_bytes_allocated_ + kHeapMinFree;
680 }
681
682 SetIdealFootprint(target_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700683}
684
jeffhaoc1160702011-10-27 15:48:45 -0700685void Heap::ClearGrowthLimit() {
Elliott Hughesffb465f2012-03-01 18:46:05 -0800686 ScopedHeapLock heap_lock;
jeffhaoc1160702011-10-27 15:48:45 -0700687 WaitForConcurrentGcToComplete();
jeffhaoc1160702011-10-27 15:48:45 -0700688 alloc_space_->ClearGrowthLimit();
689}
690
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700691pid_t Heap::GetLockOwner() {
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700692 return lock_->GetOwner();
693}
694
Elliott Hughes92b3b562011-09-08 16:32:26 -0700695void Heap::Lock() {
Brian Carlstromfad71432011-10-16 20:25:10 -0700696 // Grab the lock, but put ourselves into Thread::kVmWait if it looks
697 // like we're going to have to wait on the mutex. This prevents
698 // deadlock if another thread is calling CollectGarbageInternal,
699 // since they will have the heap lock and be waiting for mutators to
700 // suspend.
701 if (!lock_->TryLock()) {
702 ScopedThreadStateChange tsc(Thread::Current(), Thread::kVmWait);
703 lock_->Lock();
704 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700705}
706
707void Heap::Unlock() {
708 lock_->Unlock();
709}
710
Elliott Hughesadb460d2011-10-05 17:02:34 -0700711void Heap::SetWellKnownClasses(Class* java_lang_ref_FinalizerReference,
712 Class* java_lang_ref_ReferenceQueue) {
713 java_lang_ref_FinalizerReference_ = java_lang_ref_FinalizerReference;
714 java_lang_ref_ReferenceQueue_ = java_lang_ref_ReferenceQueue;
715 CHECK(java_lang_ref_FinalizerReference_ != NULL);
716 CHECK(java_lang_ref_ReferenceQueue_ != NULL);
717}
718
719void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
720 MemberOffset reference_queue_offset,
721 MemberOffset reference_queueNext_offset,
722 MemberOffset reference_pendingNext_offset,
723 MemberOffset finalizer_reference_zombie_offset) {
724 reference_referent_offset_ = reference_referent_offset;
725 reference_queue_offset_ = reference_queue_offset;
726 reference_queueNext_offset_ = reference_queueNext_offset;
727 reference_pendingNext_offset_ = reference_pendingNext_offset;
728 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
729 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
730 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
731 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
732 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
733 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
734}
735
736Object* Heap::GetReferenceReferent(Object* reference) {
737 DCHECK(reference != NULL);
738 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
739 return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
740}
741
742void Heap::ClearReferenceReferent(Object* reference) {
743 DCHECK(reference != NULL);
744 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
745 reference->SetFieldObject(reference_referent_offset_, NULL, true);
746}
747
748// Returns true if the reference object has not yet been enqueued.
749bool Heap::IsEnqueuable(const Object* ref) {
750 DCHECK(ref != NULL);
751 const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
752 const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
753 return (queue != NULL) && (queue_next == NULL);
754}
755
756void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
757 DCHECK(ref != NULL);
758 CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
759 CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
760 EnqueuePendingReference(ref, cleared_reference_list);
761}
762
763void Heap::EnqueuePendingReference(Object* ref, Object** list) {
764 DCHECK(ref != NULL);
765 DCHECK(list != NULL);
766
767 if (*list == NULL) {
768 ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
769 *list = ref;
770 } else {
771 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
772 ref->SetFieldObject(reference_pendingNext_offset_, head, false);
773 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
774 }
775}
776
777Object* Heap::DequeuePendingReference(Object** list) {
778 DCHECK(list != NULL);
779 DCHECK(*list != NULL);
780 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
781 Object* ref;
782 if (*list == head) {
783 ref = *list;
784 *list = NULL;
785 } else {
786 Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
787 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
788 ref = head;
789 }
790 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
791 return ref;
792}
793
Ian Rogers5d4bdc22011-11-02 22:15:43 -0700794void Heap::AddFinalizerReference(Thread* self, Object* object) {
795 ScopedThreadStateChange tsc(self, Thread::kRunnable);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700796 static Method* FinalizerReference_add =
797 java_lang_ref_FinalizerReference_->FindDirectMethod("add", "(Ljava/lang/Object;)V");
798 DCHECK(FinalizerReference_add != NULL);
Elliott Hughes77405792012-03-15 15:22:12 -0700799 JValue args[1];
800 args[0].l = object;
801 FinalizerReference_add->Invoke(self, NULL, args, NULL);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700802}
803
804void Heap::EnqueueClearedReferences(Object** cleared) {
805 DCHECK(cleared != NULL);
806 if (*cleared != NULL) {
807 static Method* ReferenceQueue_add =
808 java_lang_ref_ReferenceQueue_->FindDirectMethod("add", "(Ljava/lang/ref/Reference;)V");
809 DCHECK(ReferenceQueue_add != NULL);
810
811 Thread* self = Thread::Current();
812 ScopedThreadStateChange tsc(self, Thread::kRunnable);
Elliott Hughes77405792012-03-15 15:22:12 -0700813 JValue args[1];
814 args[0].l = *cleared;
815 ReferenceQueue_add->Invoke(self, NULL, args, NULL);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700816 *cleared = NULL;
817 }
818}
819
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800820void Heap::RequestHeapTrim() {
821 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
822 // because that only marks object heads, so a large array looks like lots of empty space. We
823 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
824 // to utilization (which is probably inversely proportional to how much benefit we can expect).
825 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
826 // not how much use we're making of those pages.
827 float utilization = static_cast<float>(num_bytes_allocated_) / alloc_space_->Size();
828 if (utilization > 0.75f) {
829 // Don't bother trimming the heap if it's more than 75% utilized.
830 // (This percentage was picked arbitrarily.)
831 return;
832 }
Ian Rogerse1d490c2012-02-03 09:09:07 -0800833 if (!Runtime::Current()->IsStarted()) {
834 // Heap trimming isn't supported without a Java runtime (such as at dex2oat time)
835 return;
836 }
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800837 JNIEnv* env = Thread::Current()->GetJniEnv();
838 static jclass Daemons_class = CacheClass(env, "java/lang/Daemons");
839 static jmethodID Daemons_requestHeapTrim = env->GetStaticMethodID(Daemons_class, "requestHeapTrim", "()V");
840 env->CallStaticVoidMethod(Daemons_class, Daemons_requestHeapTrim);
841 CHECK(!env->ExceptionCheck());
842}
843
Carl Shapiro69759ea2011-07-21 18:13:35 -0700844} // namespace art