blob: 3d84451d21d7d650749c188897cf5ec7683e07b5 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Brian Carlstrom5643b782012-02-05 12:32:53 -080019#include <sys/types.h>
20#include <sys/wait.h>
21
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
24
Mathieu Chartier637e3482012-08-17 10:41:32 -070025#include "atomic.h"
Ian Rogers5d76c432011-10-31 21:42:49 -070026#include "card_table.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070027#include "debugger.h"
Mathieu Chartiercc236d72012-07-20 10:29:05 -070028#include "heap_bitmap.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070029#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070030#include "mark_sweep.h"
Mathieu Chartierb43b7d42012-06-19 13:15:09 -070031#include "mod_union_table.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070032#include "object.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080033#include "object_utils.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080034#include "os.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070035#include "ScopedLocalRef.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070036#include "scoped_thread_state_change.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070037#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070038#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070039#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070040#include "timing_logger.h"
41#include "UniquePtr.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070042#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070043
44namespace art {
45
Ian Rogers30fab402012-01-23 15:43:46 -080046static void UpdateFirstAndLastSpace(Space** first_space, Space** last_space, Space* space) {
47 if (*first_space == NULL) {
48 *first_space = space;
49 *last_space = space;
50 } else {
51 if ((*first_space)->Begin() > space->Begin()) {
52 *first_space = space;
53 } else if (space->Begin() > (*last_space)->Begin()) {
54 *last_space = space;
55 }
56 }
57}
58
Elliott Hughesae80b492012-04-24 10:43:17 -070059static bool GenerateImage(const std::string& image_file_name) {
Brian Carlstroma004aa92012-02-08 18:05:09 -080060 const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
Brian Carlstrom5643b782012-02-05 12:32:53 -080061 std::vector<std::string> boot_class_path;
62 Split(boot_class_path_string, ':', boot_class_path);
Brian Carlstromb2793372012-03-17 18:27:16 -070063 if (boot_class_path.empty()) {
64 LOG(FATAL) << "Failed to generate image because no boot class path specified";
65 }
Brian Carlstrom5643b782012-02-05 12:32:53 -080066
67 std::vector<char*> arg_vector;
68
69 std::string dex2oat_string(GetAndroidRoot());
Elliott Hughes67d92002012-03-26 15:08:51 -070070 dex2oat_string += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
Brian Carlstrom5643b782012-02-05 12:32:53 -080071 const char* dex2oat = dex2oat_string.c_str();
72 arg_vector.push_back(strdup(dex2oat));
73
74 std::string image_option_string("--image=");
75 image_option_string += image_file_name;
76 const char* image_option = image_option_string.c_str();
77 arg_vector.push_back(strdup(image_option));
78
79 arg_vector.push_back(strdup("--runtime-arg"));
80 arg_vector.push_back(strdup("-Xms64m"));
81
82 arg_vector.push_back(strdup("--runtime-arg"));
83 arg_vector.push_back(strdup("-Xmx64m"));
84
85 for (size_t i = 0; i < boot_class_path.size(); i++) {
86 std::string dex_file_option_string("--dex-file=");
87 dex_file_option_string += boot_class_path[i];
88 const char* dex_file_option = dex_file_option_string.c_str();
89 arg_vector.push_back(strdup(dex_file_option));
90 }
91
92 std::string oat_file_option_string("--oat-file=");
93 oat_file_option_string += image_file_name;
94 oat_file_option_string.erase(oat_file_option_string.size() - 3);
95 oat_file_option_string += "oat";
96 const char* oat_file_option = oat_file_option_string.c_str();
97 arg_vector.push_back(strdup(oat_file_option));
98
99 arg_vector.push_back(strdup("--base=0x60000000"));
100
Elliott Hughes48436bb2012-02-07 15:23:28 -0800101 std::string command_line(Join(arg_vector, ' '));
Brian Carlstrom5643b782012-02-05 12:32:53 -0800102 LOG(INFO) << command_line;
103
Elliott Hughes48436bb2012-02-07 15:23:28 -0800104 arg_vector.push_back(NULL);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800105 char** argv = &arg_vector[0];
106
107 // fork and exec dex2oat
108 pid_t pid = fork();
109 if (pid == 0) {
110 // no allocation allowed between fork and exec
111
112 // change process groups, so we don't get reaped by ProcessManager
113 setpgid(0, 0);
114
115 execv(dex2oat, argv);
116
117 PLOG(FATAL) << "execv(" << dex2oat << ") failed";
118 return false;
119 } else {
120 STLDeleteElements(&arg_vector);
121
122 // wait for dex2oat to finish
123 int status;
124 pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
125 if (got_pid != pid) {
126 PLOG(ERROR) << "waitpid failed: wanted " << pid << ", got " << got_pid;
127 return false;
128 }
129 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
130 LOG(ERROR) << dex2oat << " failed: " << command_line;
131 return false;
132 }
133 }
134 return true;
135}
136
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800137Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700138 const std::string& original_image_file_name, bool concurrent_gc)
139 : alloc_space_(NULL),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800140 card_table_(NULL),
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700141 concurrent_gc_(concurrent_gc),
142 have_zygote_space_(false),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800143 card_marking_disabled_(false),
144 is_gc_running_(false),
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700145 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700146 concurrent_start_size_(128 * KB),
147 concurrent_min_free_(256 * KB),
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700148 sticky_gc_count_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800149 num_bytes_allocated_(0),
150 num_objects_allocated_(0),
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700151 last_trim_time_(0),
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700152 try_running_gc_(false),
153 requesting_gc_(false),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800154 reference_referent_offset_(0),
155 reference_queue_offset_(0),
156 reference_queueNext_offset_(0),
157 reference_pendingNext_offset_(0),
158 finalizer_reference_zombie_offset_(0),
159 target_utilization_(0.5),
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700160 verify_objects_(false) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800161 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800162 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700163 }
164
Ian Rogers30fab402012-01-23 15:43:46 -0800165 // Compute the bounds of all spaces for allocating live and mark bitmaps
166 // there will be at least one space (the alloc space)
167 Space* first_space = NULL;
168 Space* last_space = NULL;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700169
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700170 live_bitmap_.reset(new HeapBitmap(this));
171 mark_bitmap_.reset(new HeapBitmap(this));
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700172
Ian Rogers30fab402012-01-23 15:43:46 -0800173 // Requested begin for the alloc space, to follow the mapped image and oat files
174 byte* requested_begin = NULL;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800175 std::string image_file_name(original_image_file_name);
176 if (!image_file_name.empty()) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700177 Space* image_space = NULL;
178
Brian Carlstrom5643b782012-02-05 12:32:53 -0800179 if (OS::FileExists(image_file_name.c_str())) {
180 // If the /system file exists, it should be up-to-date, don't try to generate
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700181 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800182 } else {
183 // If the /system file didn't exist, we need to use one from the art-cache.
184 // If the cache file exists, try to open, but if it fails, regenerate.
185 // If it does not exist, generate.
186 image_file_name = GetArtCacheFilenameOrDie(image_file_name);
187 if (OS::FileExists(image_file_name.c_str())) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700188 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800189 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700190 if (image_space == NULL) {
Brian Carlstrom5643b782012-02-05 12:32:53 -0800191 if (!GenerateImage(image_file_name)) {
192 LOG(FATAL) << "Failed to generate image: " << image_file_name;
193 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700194 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800195 }
196 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700197 if (image_space == NULL) {
Brian Carlstrom223f20f2012-02-04 23:06:55 -0800198 LOG(FATAL) << "Failed to create space from " << image_file_name;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700199 }
Brian Carlstrom5643b782012-02-05 12:32:53 -0800200
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700201 AddSpace(image_space);
202 UpdateFirstAndLastSpace(&first_space, &last_space, image_space);
Ian Rogers30fab402012-01-23 15:43:46 -0800203 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
204 // isn't going to get in the middle
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700205 byte* oat_end_addr = GetImageSpace()->GetImageHeader().GetOatEnd();
206 CHECK(oat_end_addr > GetImageSpace()->End());
Ian Rogers30fab402012-01-23 15:43:46 -0800207 if (oat_end_addr > requested_begin) {
208 requested_begin = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_end_addr),
209 kPageSize));
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700210 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700211 }
212
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700213 UniquePtr<AllocSpace> alloc_space(Space::CreateAllocSpace(
214 "alloc space", initial_size, growth_limit, capacity, requested_begin));
215 alloc_space_ = alloc_space.release();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700216 CHECK(alloc_space_ != NULL) << "Failed to create alloc space";
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700217 AddSpace(alloc_space_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700218
Ian Rogers30fab402012-01-23 15:43:46 -0800219 UpdateFirstAndLastSpace(&first_space, &last_space, alloc_space_);
220 byte* heap_begin = first_space->Begin();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800221 size_t heap_capacity = (last_space->Begin() - first_space->Begin()) + last_space->NonGrowthLimitCapacity();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700222
Ian Rogers30fab402012-01-23 15:43:46 -0800223 // Mark image objects in the live bitmap
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800224 for (size_t i = 0; i < spaces_.size(); ++i) {
Ian Rogers30fab402012-01-23 15:43:46 -0800225 Space* space = spaces_[i];
226 if (space->IsImageSpace()) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700227 space->AsImageSpace()->RecordImageAllocations(space->GetLiveBitmap());
Ian Rogers30fab402012-01-23 15:43:46 -0800228 }
229 }
230
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800231 // Allocate the card table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700232 card_table_.reset(CardTable::Create(heap_begin, heap_capacity));
233 CHECK(card_table_.get() != NULL) << "Failed to create card table";
Ian Rogers5d76c432011-10-31 21:42:49 -0700234
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700235 mod_union_table_.reset(new ModUnionTableToZygoteAllocspace<ModUnionTableReferenceCache>(this));
236 CHECK(mod_union_table_.get() != NULL) << "Failed to create mod-union table";
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700237
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700238 zygote_mod_union_table_.reset(new ModUnionTableCardCache(this));
239 CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700240
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700241 num_bytes_allocated_ = 0;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700242 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
243 if ((*it)->IsImageSpace()) {
244 num_bytes_allocated_ += (*it)->AsImageSpace()->Size();
245 }
246 }
247
248 // TODO: Count objects in the image space here.
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700249 num_objects_allocated_ = 0;
250
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700251 // Max stack size in bytes.
252 static const size_t max_stack_size = capacity / SpaceBitmap::kAlignment * kWordSize;
253
254 // TODO: Rename MarkStack to a more generic name?
255 mark_stack_.reset(MarkStack::Create("dalvik-mark-stack", max_stack_size));
256 allocation_stack_.reset(MarkStack::Create("dalvik-allocation-stack", max_stack_size));
257 live_stack_.reset(MarkStack::Create("dalvik-live-stack", max_stack_size));
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700258
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800259 // It's still too early to take a lock because there are no threads yet,
Elliott Hughes92b3b562011-09-08 16:32:26 -0700260 // but we can create the heap lock now. We don't create it earlier to
261 // make it clear that you can't use locks during heap initialization.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700262 gc_complete_lock_ = new Mutex("GC complete lock");
263 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable"));
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700264
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800265 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800266 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700267 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700268}
269
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700270// Sort spaces based on begin address
271class SpaceSorter {
272 public:
273 bool operator () (const Space* a, const Space* b) const {
274 return a->Begin() < b->Begin();
275 }
276};
277
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800278void Heap::AddSpace(Space* space) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700279 WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700280 DCHECK(space != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700281 DCHECK(space->GetLiveBitmap() != NULL);
282 live_bitmap_->AddSpaceBitmap(space->GetLiveBitmap());
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700283 DCHECK(space->GetMarkBitmap() != NULL);
284 mark_bitmap_->AddSpaceBitmap(space->GetMarkBitmap());
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800285 spaces_.push_back(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700286 if (space->IsAllocSpace()) {
287 alloc_space_ = space->AsAllocSpace();
288 }
289
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700290 // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger)
291 std::sort(spaces_.begin(), spaces_.end(), SpaceSorter());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700292
293 // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
294 // avoid redundant marking.
295 bool seen_zygote = false, seen_alloc = false;
296 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
297 Space* space = *it;
298 if (space->IsImageSpace()) {
299 DCHECK(!seen_zygote);
300 DCHECK(!seen_alloc);
301 } if (space->IsZygoteSpace()) {
302 DCHECK(!seen_alloc);
303 seen_zygote = true;
304 } else if (space->IsAllocSpace()) {
305 seen_alloc = true;
306 }
307 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800308}
309
310Heap::~Heap() {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700311 // If we don't reset then the mark stack complains in it's destructor.
312 allocation_stack_->Reset();
313 live_stack_->Reset();
314
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800315 VLOG(heap) << "~Heap()";
Elliott Hughesb3e66df2012-01-12 14:49:18 -0800316 // We can't take the heap lock here because there might be a daemon thread suspended with the
317 // heap lock held. We know though that no non-daemon threads are executing, and we know that
318 // all daemon threads are suspended, and we also know that the threads list have been deleted, so
319 // those threads can't resume. We're the only running thread, and we can do whatever we like...
Carl Shapiro58551df2011-07-24 03:09:51 -0700320 STLDeleteElements(&spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700321 delete gc_complete_lock_;
322
Carl Shapiro69759ea2011-07-21 18:13:35 -0700323}
324
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700325Space* Heap::FindSpaceFromObject(const Object* obj) const {
326 // TODO: C++0x auto
327 for (Spaces::const_iterator cur = spaces_.begin(); cur != spaces_.end(); ++cur) {
328 if ((*cur)->Contains(obj)) {
329 return *cur;
330 }
331 }
332 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
333 return NULL;
334}
335
336ImageSpace* Heap::GetImageSpace() {
337 // TODO: C++0x auto
338 for (Spaces::const_iterator cur = spaces_.begin(); cur != spaces_.end(); ++cur) {
339 if ((*cur)->IsImageSpace()) {
340 return (*cur)->AsImageSpace();
341 }
342 }
343 return NULL;
344}
345
346AllocSpace* Heap::GetAllocSpace() {
347 return alloc_space_;
348}
349
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700350static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
351 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
352
353 size_t chunk_size = static_cast<size_t>(reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start));
354 size_t chunk_free_bytes = 0;
355 if (used_bytes < chunk_size) {
356 chunk_free_bytes = chunk_size - used_bytes;
357 }
358
359 if (chunk_free_bytes > max_contiguous_allocation) {
360 max_contiguous_allocation = chunk_free_bytes;
361 }
362}
363
364Object* Heap::AllocObject(Class* c, size_t byte_count) {
365 // Used in the detail message if we throw an OOME.
366 int64_t total_bytes_free;
367 size_t max_contiguous_allocation;
368
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700369 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(Class)) ||
370 (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
371 strlen(ClassHelper(c).GetDescriptor()) == 0);
372 DCHECK_GE(byte_count, sizeof(Object));
373 Object* obj = Allocate(byte_count);
374 if (obj != NULL) {
375 obj->SetClass(c);
376 if (Dbg::IsAllocTrackingEnabled()) {
377 Dbg::RecordAllocation(c, byte_count);
Elliott Hughes418dfe72011-10-06 18:56:27 -0700378 }
Mathieu Chartier637e3482012-08-17 10:41:32 -0700379 const bool request_concurrent_gc = num_bytes_allocated_ >= concurrent_start_bytes_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700380 if (request_concurrent_gc) {
381 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
382 SirtRef<Object> ref(obj);
383 RequestConcurrentGC();
384 }
385 VerifyObject(obj);
386
387 // Additional verification to ensure that we did not allocate into a zygote space.
388 DCHECK(!have_zygote_space_ || !FindSpaceFromObject(obj)->IsZygoteSpace());
389
390 return obj;
391 }
392 total_bytes_free = GetFreeMemory();
393 max_contiguous_allocation = 0;
394 // TODO: C++0x auto
395 for (Spaces::const_iterator cur = spaces_.begin(); cur != spaces_.end(); ++cur) {
396 if ((*cur)->IsAllocSpace()) {
397 (*cur)->AsAllocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700398 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700399 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700400
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700401 std::string msg(StringPrintf("Failed to allocate a %zd-byte %s (%lld total bytes free; largest possible contiguous allocation %zd bytes)",
402 byte_count,
403 PrettyDescriptor(c).c_str(),
404 total_bytes_free, max_contiguous_allocation));
405 Thread::Current()->ThrowOutOfMemoryError(msg.c_str());
Elliott Hughes418dfe72011-10-06 18:56:27 -0700406 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700407}
408
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700409bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700410 // Note: we deliberately don't take the lock here, and mustn't test anything that would
411 // require taking the lock.
Elliott Hughes88c5c352012-03-15 18:49:48 -0700412 if (obj == NULL) {
413 return true;
414 }
415 if (!IsAligned<kObjectAlignment>(obj)) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700416 return false;
417 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800418 for (size_t i = 0; i < spaces_.size(); ++i) {
Ian Rogers30fab402012-01-23 15:43:46 -0800419 if (spaces_[i]->Contains(obj)) {
420 return true;
421 }
422 }
423 return false;
Elliott Hughesa2501992011-08-26 19:39:54 -0700424}
425
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700426bool Heap::IsLiveObjectLocked(const Object* obj) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700427 GlobalSynchronization::heap_bitmap_lock_->AssertReaderHeld();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700428 return IsHeapAddress(obj) && GetLiveBitmap()->Test(obj);
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700429}
430
Elliott Hughes3e465b12011-09-02 18:26:12 -0700431#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700432void Heap::VerifyObject(const Object* obj) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700433 if (obj == NULL || this == NULL || !verify_objects_ || Runtime::Current()->IsShuttingDown() ||
Ian Rogers141d6222012-04-05 12:23:06 -0700434 Thread::Current() == NULL ||
jeffhao25045522012-03-13 19:34:37 -0700435 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700436 return;
437 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700438 VerifyObjectBody(obj);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700439}
440#endif
441
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700442void Heap::DumpSpaces() {
443 // TODO: C++0x auto
444 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700445 Space* space = *it;
446 LOG(INFO) << *space;
447 LOG(INFO) << *space->GetLiveBitmap();
448 LOG(INFO) << *space->GetMarkBitmap();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700449 }
450}
451
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700452// We want to avoid bit rotting.
453void Heap::VerifyObjectBody(const Object* obj) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700454 if (!IsAligned<kObjectAlignment>(obj)) {
455 LOG(FATAL) << "Object isn't aligned: " << obj;
456 } else if (!GetLiveBitmap()->Test(obj)) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700457 DumpSpaces();
458 LOG(FATAL) << "Object is dead: " << obj;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700459 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700460
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700461 // Ignore early dawn of the universe verifications
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700462 if (!VERIFY_OBJECT_FAST && num_objects_allocated_ > 10) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700463 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
464 Object::ClassOffset().Int32Value();
465 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
466 if (c == NULL) {
467 LOG(FATAL) << "Null class in object: " << obj;
468 } else if (!IsAligned<kObjectAlignment>(c)) {
469 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
470 } else if (!GetLiveBitmap()->Test(c)) {
471 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
472 }
473 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
474 // Note: we don't use the accessors here as they have internal sanity checks
475 // that we don't want to run
476 raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
477 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
478 raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
479 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
480 CHECK_EQ(c_c, c_c_c);
481 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700482}
483
Brian Carlstrom78128a62011-09-15 17:21:19 -0700484void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700485 DCHECK(obj != NULL);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700486 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700487}
488
489void Heap::VerifyHeap() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700490 ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700491 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700492}
493
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700494void Heap::RecordAllocation(AllocSpace* space, const Object* obj) {
495 {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700496 size_t size = space->AllocationSize(obj);
497 DCHECK_GT(size, 0u);
Mathieu Chartier637e3482012-08-17 10:41:32 -0700498 COMPILE_ASSERT(sizeof(size_t) == sizeof(int32_t),
499 int32_t_must_be_same_size_as_size_t_for_used_atomic_operations);
500 android_atomic_add(size, reinterpret_cast<volatile int32_t*>(&num_bytes_allocated_));
501 android_atomic_add(1, reinterpret_cast<volatile int32_t*>(&num_objects_allocated_));
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700502
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700503 if (Runtime::Current()->HasStatsEnabled()) {
504 RuntimeStats* global_stats = Runtime::Current()->GetStats();
505 RuntimeStats* thread_stats = Thread::Current()->GetStats();
506 ++global_stats->allocated_objects;
507 ++thread_stats->allocated_objects;
508 global_stats->allocated_bytes += size;
509 thread_stats->allocated_bytes += size;
510 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700511 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700512
513 DCHECK(obj);
514
515 allocation_stack_->AtomicPush(obj);
516#if VERIFY_OBJECT_ENABLED
517 WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
518 // Verify objects doesn't like objects in allocation stack not being marked as live.
519 live_bitmap_->Set(obj);
520#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700521}
522
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700523void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
Mathieu Chartier637e3482012-08-17 10:41:32 -0700524 COMPILE_ASSERT(sizeof(size_t) == sizeof(int32_t),
525 int32_t_must_be_same_size_as_size_t_for_used_atomic_operations);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700526 DCHECK_LE(freed_objects, num_objects_allocated_);
Mathieu Chartier637e3482012-08-17 10:41:32 -0700527 android_atomic_add(-static_cast<int32_t>(freed_objects),
528 reinterpret_cast<volatile int32_t*>(&num_objects_allocated_));
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700529
530 DCHECK_LE(freed_bytes, num_bytes_allocated_);
Mathieu Chartier637e3482012-08-17 10:41:32 -0700531 android_atomic_add(-static_cast<int32_t>(freed_bytes),
532 reinterpret_cast<volatile int32_t*>(&num_bytes_allocated_));
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700533
534 if (Runtime::Current()->HasStatsEnabled()) {
535 RuntimeStats* global_stats = Runtime::Current()->GetStats();
536 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700537 global_stats->freed_objects += freed_objects;
538 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700539 global_stats->freed_bytes += freed_bytes;
540 thread_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700541 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700542}
543
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700544Object* Heap::Allocate(size_t size) {
545 Object* obj = Allocate(alloc_space_, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700546 if (obj != NULL) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700547 RecordAllocation(alloc_space_, obj);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700548 return obj;
Carl Shapiro58551df2011-07-24 03:09:51 -0700549 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700550
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700551 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700552}
553
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700554Object* Heap::Allocate(AllocSpace* space, size_t alloc_size) {
555 Thread* self = Thread::Current();
Ian Rogers0399dde2012-06-06 17:09:28 -0700556 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
557 // done in the runnable state where suspension is expected.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700558#ifndef NDEBUG
559 {
560 MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_);
561 CHECK_EQ(self->GetState(), kRunnable);
562 }
563 self->AssertThreadSuspensionIsAllowable();
564#endif
Brian Carlstromb82b6872011-10-26 17:18:07 -0700565
Ian Rogers30fab402012-01-23 15:43:46 -0800566 Object* ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700567 if (ptr != NULL) {
568 return ptr;
569 }
570
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700571 // The allocation failed. If the GC is running, block until it completes else request a
572 // foreground partial collection.
573 if (!WaitForConcurrentGcToComplete()) {
574 // No concurrent GC so perform a foreground collection.
575 if (Runtime::Current()->HasStatsEnabled()) {
576 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
577 ++Thread::Current()->GetStats()->gc_for_alloc_count;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700578 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700579 self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700580 CollectGarbageInternal(have_zygote_space_ ? GC_PARTIAL : GC_FULL, false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700581 self->TransitionFromSuspendedToRunnable();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700582 }
583
Ian Rogers30fab402012-01-23 15:43:46 -0800584 ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700585 if (ptr != NULL) {
586 return ptr;
587 }
588
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700589 const size_t alloc_space_size = alloc_space_->Size();
590 if (alloc_space_size > kMinAllocSpaceSizeForStickyGC &&
591 alloc_space_->Capacity() - alloc_space_size < kMinRemainingSpaceForStickyGC) {
592 // Partial GC didn't free enough memory, try a full GC.
593 if (Runtime::Current()->HasStatsEnabled()) {
594 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
595 ++Thread::Current()->GetStats()->gc_for_alloc_count;
596 }
597
598 // Don't bother trying a young GC unless we have a few MB AllocSpace.
599 self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
600 CollectGarbageInternal(GC_STICKY, false);
601 self->TransitionFromSuspendedToRunnable();
602
603 ptr = space->AllocWithoutGrowth(alloc_size);
604 if (ptr != NULL) {
605 return ptr;
606 }
607 }
608
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700609 if (!have_zygote_space_) {
610 // Partial GC didn't free enough memory, try a full GC.
611 if (Runtime::Current()->HasStatsEnabled()) {
612 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
613 ++Thread::Current()->GetStats()->gc_for_alloc_count;
614 }
615 self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700616 CollectGarbageInternal(GC_PARTIAL, false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700617 self->TransitionFromSuspendedToRunnable();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700618
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700619 ptr = space->AllocWithoutGrowth(alloc_size);
620 if (ptr != NULL) {
621 return ptr;
622 }
623 }
624
625 // Allocations have failed after GCs; this is an exceptional state.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700626 // Try harder, growing the heap if necessary.
Ian Rogers30fab402012-01-23 15:43:46 -0800627 ptr = space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700628 if (ptr != NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800629 size_t new_footprint = space->GetFootprintLimit();
Elliott Hughes418dfe72011-10-06 18:56:27 -0700630 // OLD-TODO: may want to grow a little bit more so that the amount of
Carl Shapiro58551df2011-07-24 03:09:51 -0700631 // free space is equal to the old free space + the
632 // utilization slop for the new allocation.
Ian Rogers3bb17a62012-01-27 23:56:44 -0800633 VLOG(gc) << "Grow heap (frag case) to " << PrettySize(new_footprint)
Ian Rogers162a31c2012-01-31 16:14:31 -0800634 << " for a " << PrettySize(alloc_size) << " allocation";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700635 return ptr;
636 }
637
Elliott Hughes81ff3182012-03-23 20:35:56 -0700638 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
639 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
640 // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700641
Elliott Hughes418dfe72011-10-06 18:56:27 -0700642 // OLD-TODO: wait for the finalizers from the previous GC to finish
Ian Rogers3bb17a62012-01-27 23:56:44 -0800643 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) << " allocation";
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700644
645 if (Runtime::Current()->HasStatsEnabled()) {
646 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
647 ++Thread::Current()->GetStats()->gc_for_alloc_count;
648 }
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700649 // We don't need a WaitForConcurrentGcToComplete here either.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700650 self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700651 CollectGarbageInternal(GC_FULL, true);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700652 self->TransitionFromSuspendedToRunnable();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700653 return space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700654}
655
Elliott Hughesbf86d042011-08-31 17:53:14 -0700656int64_t Heap::GetMaxMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700657 size_t total = 0;
658 // TODO: C++0x auto
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700659 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
660 Space* space = *it;
661 if (space->IsAllocSpace()) {
662 total += space->AsAllocSpace()->Capacity();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700663 }
664 }
665 return total;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700666}
667
668int64_t Heap::GetTotalMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700669 return GetMaxMemory();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700670}
671
672int64_t Heap::GetFreeMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700673 return GetMaxMemory() - num_bytes_allocated_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700674}
675
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700676class InstanceCounter {
677 public:
678 InstanceCounter(Class* c, bool count_assignable)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700679 SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_)
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700680 : class_(c), count_assignable_(count_assignable), count_(0) {
681 }
682
683 size_t GetCount() {
684 return count_;
685 }
686
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700687 static void Callback(Object* o, void* arg)
688 SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700689 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
690 }
691
692 private:
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700693 void VisitInstance(Object* o) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700694 Class* instance_class = o->GetClass();
695 if (count_assignable_) {
696 if (instance_class == class_) {
697 ++count_;
698 }
699 } else {
700 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
701 ++count_;
702 }
703 }
704 }
705
706 Class* class_;
707 bool count_assignable_;
708 size_t count_;
709};
710
711int64_t Heap::CountInstances(Class* c, bool count_assignable) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700712 ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700713 InstanceCounter counter(c, count_assignable);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700714 GetLiveBitmap()->Walk(InstanceCounter::Callback, &counter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700715 return counter.GetCount();
716}
717
Ian Rogers30fab402012-01-23 15:43:46 -0800718void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700719 // If we just waited for a GC to complete then we do not need to do another
720 // GC unless we clear soft references.
721 if (!WaitForConcurrentGcToComplete() || clear_soft_references) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700722 ScopedThreadStateChange tsc(Thread::Current(), kWaitingPerformingGc);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700723 CollectGarbageInternal(have_zygote_space_ ? GC_PARTIAL : GC_FULL, clear_soft_references);
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700724 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700725}
726
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700727void Heap::PreZygoteFork() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700728 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
729 MutexLock mu(zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700730
731 // Try to see if we have any Zygote spaces.
732 if (have_zygote_space_) {
733 return;
734 }
735
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700736 VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(alloc_space_->Size());
737
738 {
739 // Flush the alloc stack.
740 WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
741 FlushAllocStack();
742 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700743
744 // Replace the first alloc space we find with a zygote space.
745 // TODO: C++0x auto
746 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
747 if ((*it)->IsAllocSpace()) {
748 AllocSpace* zygote_space = (*it)->AsAllocSpace();
749
750 // Turns the current alloc space into a Zygote space and obtain the new alloc space composed
751 // of the remaining available heap memory.
752 alloc_space_ = zygote_space->CreateZygoteSpace();
753
754 // Change the GC retention policy of the zygote space to only collect when full.
755 zygote_space->SetGcRetentionPolicy(GCRP_FULL_COLLECT);
756 AddSpace(alloc_space_);
757 have_zygote_space_ = true;
758 break;
759 }
760 }
761}
762
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700763void Heap::FlushAllocStack() {
764 MarkStackAsLive(allocation_stack_.get());
765 allocation_stack_->Reset();
766}
767
768void Heap::MarkStackAsLive(MarkStack* alloc_stack) {
769 // We can just assume everything is inside the alloc_space_'s bitmap since we should only have
770 // fresh allocations.
771 SpaceBitmap* live_bitmap = alloc_space_->GetLiveBitmap();
772
773 // Empty the allocation stack.
774 const size_t count = alloc_stack->Size();
775 for (size_t i = 0; i < count; ++i) {
776 const Object* obj = alloc_stack->Get(i);
777 DCHECK(obj != NULL);
778 live_bitmap->Set(obj);
779 }
780}
781
782void Heap::UnMarkStack(MarkStack* alloc_stack) {
783 SpaceBitmap* mark_bitmap = alloc_space_->GetMarkBitmap();
784
785 // Clear all of the things in the AllocStack.
786 size_t count = alloc_stack->Size();
787 for (size_t i = 0;i < count;++i) {
788 const Object* obj = alloc_stack->Get(i);
789 DCHECK(obj != NULL);
790 if (mark_bitmap->Test(obj)) {
791 mark_bitmap->Clear(obj);
792 }
793 }
794}
795
796void Heap::CollectGarbageInternal(GcType gc_type, bool clear_soft_references) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700797 GlobalSynchronization::mutator_lock_->AssertNotHeld();
798#ifndef NDEBUG
799 {
800 MutexLock mu(*GlobalSynchronization::thread_suspend_count_lock_);
801 CHECK_EQ(Thread::Current()->GetState(), kWaitingPerformingGc);
802 }
803#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700804
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700805 // Ensure there is only one GC at a time.
806 bool start_collect = false;
807 while (!start_collect) {
808 {
809 MutexLock mu(*gc_complete_lock_);
810 if (!is_gc_running_) {
811 is_gc_running_ = true;
812 start_collect = true;
813 }
814 }
815 if (!start_collect) {
816 WaitForConcurrentGcToComplete();
817 // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
818 // Not doing at the moment to ensure soft references are cleared.
819 }
820 }
821 gc_complete_lock_->AssertNotHeld();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700822
823 // We need to do partial GCs every now and then to avoid the heap growing too much and
824 // fragmenting.
825 if (gc_type == GC_STICKY && ++sticky_gc_count_ > kPartialGCFrequency) {
826 gc_type = GC_PARTIAL;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700827 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700828 if (gc_type != GC_STICKY) {
829 sticky_gc_count_ = 0;
830 }
831
Mathieu Chartier637e3482012-08-17 10:41:32 -0700832 if (concurrent_gc_) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700833 CollectGarbageConcurrentMarkSweepPlan(gc_type, clear_soft_references);
834 } else {
835 CollectGarbageMarkSweepPlan(gc_type, clear_soft_references);
836 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700837
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700838 gc_complete_lock_->AssertNotHeld();
839 MutexLock mu(*gc_complete_lock_);
840 is_gc_running_ = false;
841 // Wake anyone who may have been waiting for the GC to complete.
842 gc_complete_cond_->Broadcast();
843}
Mathieu Chartiera6399032012-06-11 18:49:50 -0700844
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700845void Heap::CollectGarbageMarkSweepPlan(GcType gc_type, bool clear_soft_references) {
846 TimingLogger timings("CollectGarbageInternal", true);
Mathieu Chartier662618f2012-06-06 12:01:47 -0700847
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700848 // Suspend all threads are get exclusive access to the heap.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700849 uint64_t start_time = NanoTime();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700850 ThreadList* thread_list = Runtime::Current()->GetThreadList();
851 thread_list->SuspendAll();
Mathieu Chartier662618f2012-06-06 12:01:47 -0700852 timings.AddSplit("SuspendAll");
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700853 GlobalSynchronization::mutator_lock_->AssertExclusiveHeld();
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700854
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700855 size_t bytes_freed = 0;
Elliott Hughesadb460d2011-10-05 17:02:34 -0700856 Object* cleared_references = NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700857 {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700858 MarkSweep mark_sweep(mark_stack_.get());
Carl Shapiro58551df2011-07-24 03:09:51 -0700859
860 mark_sweep.Init();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700861 timings.AddSplit("Init");
Carl Shapiro58551df2011-07-24 03:09:51 -0700862
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700863 // Make sure that the tables have the correct pointer for the mark sweep.
864 mod_union_table_->Init(&mark_sweep);
865 zygote_mod_union_table_->Init(&mark_sweep);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700866
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700867 // Swap allocation stack and live stack, enabling us to have new allocations during this GC.
868 MarkStack* temp = allocation_stack_.release();
869 allocation_stack_.reset(live_stack_.release());
870 live_stack_.reset(temp);
871
872 // We will need to know which cards were dirty for doing concurrent processing of dirty cards.
873 // TODO: Investigate using a mark stack instead of a vector.
874 std::vector<byte*> dirty_cards;
875 if (gc_type == GC_STICKY) {
876 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
877 card_table_->GetDirtyCards(*it, dirty_cards);
878 }
879 }
880
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700881 // Clear image space cards and keep track of cards we cleared in the mod-union table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700882 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
883 Space* space = *it;
884 if (space->IsImageSpace()) {
885 mod_union_table_->ClearCards(*it);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700886 timings.AddSplit("ClearModUnionCards");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700887 } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
888 zygote_mod_union_table_->ClearCards(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700889 timings.AddSplit("ClearZygoteCards");
890 } else {
891 card_table_->ClearSpaceCards(space);
892 timings.AddSplit("ClearCards");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700893 }
894 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700895
896#if VERIFY_MOD_UNION
897 mod_union_table_->Verify();
898 zygote_mod_union_table_->Verify();
899#endif
900
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700901 WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700902 if (gc_type == GC_PARTIAL) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700903 // Copy the mark bits over from the live bits, do this as early as possible or else we can
904 // accidentally un-mark roots.
905 // Needed for scanning dirty objects.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700906 for (Spaces::iterator it = spaces_.begin();it != spaces_.end(); ++it) {
907 if ((*it)->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
908 mark_sweep.CopyMarkBits(*it);
909 }
910 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700911 timings.AddSplit("CopyMarkBits");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700912
913 // We can assume that everything < alloc_space_ start is marked at this point.
914 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
915 } else if (gc_type == GC_STICKY) {
916 for (Spaces::iterator it = spaces_.begin();it != spaces_.end(); ++it) {
917 if ((*it)->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
918 mark_sweep.CopyMarkBits(*it);
919 }
920 }
921 timings.AddSplit("CopyMarkBits");
922
923 if (VERIFY_OBJECT_ENABLED) {
924 UnMarkStack(live_stack_.get());
925 }
926
927 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700928 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700929
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700930 MarkStackAsLive(live_stack_.get());
931
Carl Shapiro58551df2011-07-24 03:09:51 -0700932 mark_sweep.MarkRoots();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700933 timings.AddSplit("MarkRoots");
Carl Shapiro58551df2011-07-24 03:09:51 -0700934
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700935 // Roots are marked on the bitmap and the mark_stack is empty.
Ian Rogers5d76c432011-10-31 21:42:49 -0700936 DCHECK(mark_sweep.IsMarkStackEmpty());
Carl Shapiro58551df2011-07-24 03:09:51 -0700937
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700938 // Update zygote mod union table.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700939 zygote_mod_union_table_->Update();
940 timings.AddSplit("UpdateZygoteModUnionTable");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700941
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700942 zygote_mod_union_table_->MarkReferences();
943 timings.AddSplit("ZygoteMarkReferences");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700944
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700945 // Processes the cards we cleared earlier and adds their objects into the mod-union table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700946 mod_union_table_->Update();
Mathieu Chartiere6e06512012-06-26 15:00:26 -0700947 timings.AddSplit("UpdateModUnionTable");
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700948
949 // Scans all objects in the mod-union table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700950 mod_union_table_->MarkReferences();
Mathieu Chartiere6e06512012-06-26 15:00:26 -0700951 timings.AddSplit("MarkImageToAllocSpaceReferences");
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700952
953 // Recursively mark all the non-image bits set in the mark bitmap.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700954 if (gc_type != GC_STICKY) {
955 live_stack_->Reset();
956 mark_sweep.RecursiveMark(gc_type == GC_PARTIAL, timings);
957 } else {
958 mark_sweep.RecursiveMarkCards(card_table_.get(), dirty_cards, timings);
959 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700960
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700961 // Need to process references the swap since it uses IsMarked.
Ian Rogers30fab402012-01-23 15:43:46 -0800962 mark_sweep.ProcessReferences(clear_soft_references);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700963 timings.AddSplit("ProcessReferences");
Carl Shapiro58551df2011-07-24 03:09:51 -0700964
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700965 // This doesn't work with mutators unpaused for some reason, TODO: Fix.
966 mark_sweep.SweepSystemWeaks(false);
967 timings.AddSplit("SweepSystemWeaks");
968
969 // Need to swap for VERIFY_OBJECT_ENABLED since we put things in the live bitmap after they
970 // have been allocated.
971 const bool swap = true;
972
973 if (swap) {
974 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
975 // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations
976 // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark bit
977 // instead, resulting in no new allocated objects being incorrectly freed by sweep.
978 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
979 Space* space = *it;
980 // We only allocate into AllocSpace, so we only need to swap AllocSpaces.
981 if (space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT) {
982 live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap());
983 mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap());
984 space->AsAllocSpace()->SwapBitmaps();
985 }
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700986 }
987 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700988
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700989#ifndef NDEBUG
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700990 // Verify that we only reach marked objects from the image space
991 mark_sweep.VerifyImageRoots();
992 timings.AddSplit("VerifyImageRoots");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700993#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700994
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700995 if (gc_type != GC_STICKY) {
996 mark_sweep.Sweep(gc_type == GC_PARTIAL, swap);
997 } else {
998 mark_sweep.SweepArray(timings, live_stack_.get(), swap);
999 }
Elliott Hughes307f75d2011-10-12 18:04:40 -07001000 timings.AddSplit("Sweep");
Elliott Hughesadb460d2011-10-05 17:02:34 -07001001
1002 cleared_references = mark_sweep.GetClearedReferences();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001003 bytes_freed = mark_sweep.GetFreedBytes();
Carl Shapiro58551df2011-07-24 03:09:51 -07001004 }
1005
1006 GrowForUtilization();
Elliott Hughes307f75d2011-10-12 18:04:40 -07001007 timings.AddSplit("GrowForUtilization");
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001008
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001009 thread_list->ResumeAll();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001010 timings.AddSplit("ResumeAll");
Elliott Hughesadb460d2011-10-05 17:02:34 -07001011
1012 EnqueueClearedReferences(&cleared_references);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001013 RequestHeapTrim();
Mathieu Chartier662618f2012-06-06 12:01:47 -07001014 timings.AddSplit("Finish");
Elliott Hughes83df2ac2011-10-11 16:37:54 -07001015
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001016 // If the GC was slow, then print timings in the log.
1017 uint64_t duration = (NanoTime() - start_time) / 1000 * 1000;
1018 if (duration > MsToNs(50)) {
Mathieu Chartier637e3482012-08-17 10:41:32 -07001019 const size_t percent_free = GetPercentFree();
1020 const size_t num_bytes_allocated = num_bytes_allocated_;
1021 const size_t total_memory = GetTotalMemory();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001022 LOG(INFO) << (gc_type == GC_PARTIAL ? "Partial " : (gc_type == GC_STICKY ? "Sticky " : ""))
Mathieu Chartier637e3482012-08-17 10:41:32 -07001023 << "GC freed " << PrettySize(bytes_freed) << ", " << percent_free << "% free, "
1024 << PrettySize(num_bytes_allocated) << "/" << PrettySize(total_memory) << ", "
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001025 << "paused " << PrettyDuration(duration);
Brian Carlstrom6b4ef022011-10-23 14:59:04 -07001026 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001027
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001028 if (VLOG_IS_ON(heap)) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -07001029 timings.Dump();
1030 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001031}
Mathieu Chartiera6399032012-06-11 18:49:50 -07001032
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001033void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft_references) {
1034 TimingLogger timings("ConcurrentCollectGarbageInternal", true);
1035 uint64_t root_begin = NanoTime(), root_end = 0, dirty_begin = 0, dirty_end = 0;
Mathieu Chartiera6399032012-06-11 18:49:50 -07001036
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001037 // Suspend all threads are get exclusive access to the heap.
1038 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1039 thread_list->SuspendAll();
1040 timings.AddSplit("SuspendAll");
1041 GlobalSynchronization::mutator_lock_->AssertExclusiveHeld();
1042
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001043 size_t bytes_freed = 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001044 Object* cleared_references = NULL;
1045 {
1046 MarkSweep mark_sweep(mark_stack_.get());
1047 timings.AddSplit("ctor");
1048
1049 mark_sweep.Init();
1050 timings.AddSplit("Init");
1051
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001052 // Swap the stacks, this is safe sunce all the mutators are suspended at this point.
1053 MarkStack* temp = allocation_stack_.release();
1054 allocation_stack_.reset(live_stack_.release());
1055 live_stack_.reset(temp);
1056
1057 // We will need to know which cards were dirty for doing concurrent processing of dirty cards.
1058 // TODO: Investigate using a mark stack instead of a vector.
1059 std::vector<byte*> dirty_cards;
1060 if (gc_type == GC_STICKY) {
1061 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
1062 card_table_->GetDirtyCards(*it, dirty_cards);
1063 }
1064 }
1065
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001066 // Make sure that the tables have the correct pointer for the mark sweep.
1067 mod_union_table_->Init(&mark_sweep);
1068 zygote_mod_union_table_->Init(&mark_sweep);
1069
1070 // Clear image space cards and keep track of cards we cleared in the mod-union table.
1071 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
1072 Space* space = *it;
1073 if (space->IsImageSpace()) {
1074 mod_union_table_->ClearCards(*it);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001075 timings.AddSplit("ModUnionClearCards");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001076 } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
1077 zygote_mod_union_table_->ClearCards(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001078 timings.AddSplit("ZygoteModUnionClearCards");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001079 } else {
1080 card_table_->ClearSpaceCards(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001081 timings.AddSplit("ClearCards");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001082 }
1083 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001084
1085#if VERIFY_MOD_UNION
1086 mod_union_table_->Verify();
1087 zygote_mod_union_table_->Verify();
1088#endif
1089
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001090
1091 {
1092 WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001093
1094 if (gc_type == GC_PARTIAL) {
1095 // Copy the mark bits over from the live bits, do this as early as possible or else we can
1096 // accidentally un-mark roots.
1097 // Needed for scanning dirty objects.
1098 for (Spaces::iterator it = spaces_.begin();it != spaces_.end(); ++it) {
1099 if ((*it)->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
1100 mark_sweep.CopyMarkBits(*it);
1101 }
1102 }
1103 timings.AddSplit("CopyMarkBits");
1104 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
1105 } else if (gc_type == GC_STICKY) {
1106 for (Spaces::iterator it = spaces_.begin();it != spaces_.end(); ++it) {
1107 if ((*it)->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
1108 mark_sweep.CopyMarkBits(*it);
1109 }
1110 }
1111 timings.AddSplit("CopyMarkBits");
1112 // We need to unmark the new objects since we marked them as live earlier to avoid verify
1113 // objects failing.
1114 if (VERIFY_OBJECT_ENABLED) {
1115 UnMarkStack(live_stack_.get());
1116 }
1117 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
1118 }
1119
1120 // TODO: Investigate whether or not this is really necessary for sticky mark bits.
1121 MarkStackAsLive(live_stack_.get());
1122
1123 if (gc_type != GC_STICKY) {
1124 live_stack_->Reset();
1125 mark_sweep.MarkRoots();
1126 timings.AddSplit("MarkRoots");
1127 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001128 }
1129
1130 // Roots are marked on the bitmap and the mark_stack is empty.
1131 DCHECK(mark_sweep.IsMarkStackEmpty());
1132
1133 // Allow mutators to go again, acquire share on mutator_lock_ to continue.
1134 thread_list->ResumeAll();
1135 {
1136 ReaderMutexLock reader_lock(*GlobalSynchronization::mutator_lock_);
1137 root_end = NanoTime();
1138 timings.AddSplit("RootEnd");
1139
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001140 WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
1141 if (gc_type != GC_STICKY) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001142 // Update zygote mod union table.
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001143 if (gc_type == GC_PARTIAL) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001144 zygote_mod_union_table_->Update();
1145 timings.AddSplit("UpdateZygoteModUnionTable");
1146
1147 zygote_mod_union_table_->MarkReferences();
1148 timings.AddSplit("ZygoteMarkReferences");
1149 }
1150
1151 // Processes the cards we cleared earlier and adds their objects into the mod-union table.
1152 mod_union_table_->Update();
1153 timings.AddSplit("UpdateModUnionTable");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001154
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001155 // Scans all objects in the mod-union table.
1156 mod_union_table_->MarkReferences();
1157 timings.AddSplit("MarkImageToAllocSpaceReferences");
1158
1159 // Recursively mark all the non-image bits set in the mark bitmap.
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001160 mark_sweep.RecursiveMark(gc_type == GC_PARTIAL, timings);
1161 } else {
1162 mark_sweep.RecursiveMarkCards(card_table_.get(), dirty_cards, timings);
1163 mark_sweep.DisableFinger();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001164 }
1165 }
1166 // Release share on mutator_lock_ and then get exclusive access.
1167 dirty_begin = NanoTime();
1168 thread_list->SuspendAll();
1169 timings.AddSplit("ReSuspend");
1170 GlobalSynchronization::mutator_lock_->AssertExclusiveHeld();
1171
1172 {
1173 WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001174
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001175 // Re-mark root set.
1176 mark_sweep.ReMarkRoots();
1177 timings.AddSplit("ReMarkRoots");
1178
1179 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001180 mark_sweep.RecursiveMarkDirtyObjects(false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001181 timings.AddSplit("RecursiveMarkDirtyObjects");
1182 }
1183 {
1184 ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
1185 mark_sweep.ProcessReferences(clear_soft_references);
1186 timings.AddSplit("ProcessReferences");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001187
1188 // This doesn't work with mutators unpaused for some reason, TODO: Fix.
1189 mark_sweep.SweepSystemWeaks(false);
1190 timings.AddSplit("SweepSystemWeaks");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001191 }
1192 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
1193 // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations
1194 // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark
1195 // bit instead, resulting in no new allocated objects being incorrectly freed by sweep.
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001196 bool swap = true;
1197 if (swap) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001198 WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
1199 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
1200 Space* space = *it;
1201 // We never allocate into zygote spaces.
1202 if (space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT) {
1203 live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap());
1204 mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap());
1205 space->AsAllocSpace()->SwapBitmaps();
1206 }
1207 }
1208 }
1209
1210 if (kIsDebugBuild) {
1211 // Verify that we only reach marked objects from the image space.
1212 ReaderMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
1213 mark_sweep.VerifyImageRoots();
1214 timings.AddSplit("VerifyImageRoots");
1215 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001216
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001217 thread_list->ResumeAll();
1218 dirty_end = NanoTime();
1219 GlobalSynchronization::mutator_lock_->AssertNotHeld();
1220
1221 {
1222 // TODO: this lock shouldn't be necessary (it's why we did the bitmap flip above).
1223 WriterMutexLock mu(*GlobalSynchronization::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001224 if (gc_type != GC_STICKY) {
1225 mark_sweep.Sweep(gc_type == GC_PARTIAL, swap);
1226 } else {
1227 mark_sweep.SweepArray(timings, live_stack_.get(), swap);
1228 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001229 timings.AddSplit("Sweep");
1230 }
1231
1232 cleared_references = mark_sweep.GetClearedReferences();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001233 bytes_freed = mark_sweep.GetFreedBytes();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001234 }
1235
1236 GrowForUtilization();
1237 timings.AddSplit("GrowForUtilization");
1238
1239 EnqueueClearedReferences(&cleared_references);
1240 RequestHeapTrim();
1241 timings.AddSplit("Finish");
1242
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001243 // If the GC was slow, then print timings in the log.
1244 uint64_t pause_roots = (root_end - root_begin) / 1000 * 1000;
1245 uint64_t pause_dirty = (dirty_end - dirty_begin) / 1000 * 1000;
Mathieu Chartier637e3482012-08-17 10:41:32 -07001246 uint64_t duration = (NanoTime() - root_begin) / 1000 * 1000;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001247 if (pause_roots > MsToNs(5) || pause_dirty > MsToNs(5)) {
Mathieu Chartier637e3482012-08-17 10:41:32 -07001248 const size_t percent_free = GetPercentFree();
1249 const size_t num_bytes_allocated = num_bytes_allocated_;
1250 const size_t total_memory = GetTotalMemory();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001251 LOG(INFO) << (gc_type == GC_PARTIAL ? "Partial " : (gc_type == GC_STICKY ? "Sticky " : ""))
Mathieu Chartier637e3482012-08-17 10:41:32 -07001252 << "Concurrent GC freed " << PrettySize(bytes_freed) << ", " << percent_free
1253 << "% free, " << PrettySize(num_bytes_allocated) << "/"
1254 << PrettySize(total_memory) << ", " << "paused " << PrettyDuration(pause_roots)
1255 << "+" << PrettyDuration(pause_dirty) << " total " << PrettyDuration(duration);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001256 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001257
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001258 if (VLOG_IS_ON(heap)) {
1259 timings.Dump();
1260 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001261}
1262
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -07001263bool Heap::WaitForConcurrentGcToComplete() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001264 if (concurrent_gc_) {
1265 bool do_wait = false;
1266 uint64_t wait_start;
1267 {
1268 // Check if GC is running holding gc_complete_lock_.
1269 MutexLock mu(*gc_complete_lock_);
1270 if (is_gc_running_) {
1271 wait_start = NanoTime();
1272 do_wait = true;
1273 }
Mathieu Chartiera6399032012-06-11 18:49:50 -07001274 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001275 if (do_wait) {
1276 // We must wait, change thread state then sleep on gc_complete_cond_;
1277 ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete);
1278 {
1279 MutexLock mu(*gc_complete_lock_);
1280 while (is_gc_running_) {
1281 gc_complete_cond_->Wait(*gc_complete_lock_);
1282 }
1283 }
1284 uint64_t wait_time = NanoTime() - wait_start;
1285 if (wait_time > MsToNs(5)) {
1286 LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time);
1287 }
1288 return true;
1289 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001290 }
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -07001291 return false;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001292}
1293
Elliott Hughesc967f782012-04-16 10:23:15 -07001294void Heap::DumpForSigQuit(std::ostream& os) {
1295 os << "Heap: " << GetPercentFree() << "% free, "
1296 << PrettySize(num_bytes_allocated_) << "/" << PrettySize(GetTotalMemory())
Elliott Hughesae80b492012-04-24 10:43:17 -07001297 << "; " << num_objects_allocated_ << " objects\n";
Elliott Hughesc967f782012-04-16 10:23:15 -07001298}
1299
1300size_t Heap::GetPercentFree() {
1301 size_t total = GetTotalMemory();
1302 return 100 - static_cast<size_t>(100.0f * static_cast<float>(num_bytes_allocated_) / total);
1303}
1304
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001305void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001306 AllocSpace* alloc_space = alloc_space_;
1307 // TODO: Behavior for multiple alloc spaces?
1308 size_t alloc_space_capacity = alloc_space->Capacity();
1309 if (max_allowed_footprint > alloc_space_capacity) {
1310 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint)
1311 << " to " << PrettySize(alloc_space_capacity);
1312 max_allowed_footprint = alloc_space_capacity;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001313 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001314 alloc_space->SetFootprintLimit(max_allowed_footprint);
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001315}
1316
Ian Rogers3bb17a62012-01-27 23:56:44 -08001317// kHeapIdealFree is the ideal maximum free size, when we grow the heap for utilization.
Shih-wei Liao7f1caab2011-10-06 12:11:04 -07001318static const size_t kHeapIdealFree = 2 * MB;
Ian Rogers3bb17a62012-01-27 23:56:44 -08001319// kHeapMinFree guarantees that you always have at least 512 KB free, when you grow for utilization,
1320// regardless of target utilization ratio.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001321static const size_t kHeapMinFree = kHeapIdealFree / 4;
1322
Carl Shapiro69759ea2011-07-21 18:13:35 -07001323void Heap::GrowForUtilization() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001324 size_t target_size;
1325 bool use_footprint_limit = false;
1326 {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001327 // We know what our utilization is at this moment.
1328 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
1329 target_size = num_bytes_allocated_ / Heap::GetTargetHeapUtilization();
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001330
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001331 if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
1332 target_size = num_bytes_allocated_ + kHeapIdealFree;
1333 } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
1334 target_size = num_bytes_allocated_ + kHeapMinFree;
1335 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001336
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001337 // Calculate when to perform the next ConcurrentGC.
1338 if (GetTotalMemory() - num_bytes_allocated_ < concurrent_min_free_) {
1339 // Not enough free memory to perform concurrent GC.
1340 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
1341 } else {
1342 // Compute below to avoid holding both the statistics and the alloc space lock
1343 use_footprint_limit = true;
1344 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001345 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001346
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001347 if (use_footprint_limit) {
1348 size_t foot_print_limit = alloc_space_->GetFootprintLimit();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001349 concurrent_start_bytes_ = foot_print_limit - concurrent_start_size_;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001350 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001351 SetIdealFootprint(target_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001352}
1353
jeffhaoc1160702011-10-27 15:48:45 -07001354void Heap::ClearGrowthLimit() {
jeffhaoc1160702011-10-27 15:48:45 -07001355 WaitForConcurrentGcToComplete();
jeffhaoc1160702011-10-27 15:48:45 -07001356 alloc_space_->ClearGrowthLimit();
1357}
1358
Elliott Hughesadb460d2011-10-05 17:02:34 -07001359void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
1360 MemberOffset reference_queue_offset,
1361 MemberOffset reference_queueNext_offset,
1362 MemberOffset reference_pendingNext_offset,
1363 MemberOffset finalizer_reference_zombie_offset) {
1364 reference_referent_offset_ = reference_referent_offset;
1365 reference_queue_offset_ = reference_queue_offset;
1366 reference_queueNext_offset_ = reference_queueNext_offset;
1367 reference_pendingNext_offset_ = reference_pendingNext_offset;
1368 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
1369 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1370 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
1371 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
1372 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
1373 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
1374}
1375
1376Object* Heap::GetReferenceReferent(Object* reference) {
1377 DCHECK(reference != NULL);
1378 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1379 return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
1380}
1381
1382void Heap::ClearReferenceReferent(Object* reference) {
1383 DCHECK(reference != NULL);
1384 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1385 reference->SetFieldObject(reference_referent_offset_, NULL, true);
1386}
1387
1388// Returns true if the reference object has not yet been enqueued.
1389bool Heap::IsEnqueuable(const Object* ref) {
1390 DCHECK(ref != NULL);
1391 const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
1392 const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
1393 return (queue != NULL) && (queue_next == NULL);
1394}
1395
1396void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
1397 DCHECK(ref != NULL);
1398 CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
1399 CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
1400 EnqueuePendingReference(ref, cleared_reference_list);
1401}
1402
1403void Heap::EnqueuePendingReference(Object* ref, Object** list) {
1404 DCHECK(ref != NULL);
1405 DCHECK(list != NULL);
1406
1407 if (*list == NULL) {
1408 ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
1409 *list = ref;
1410 } else {
1411 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1412 ref->SetFieldObject(reference_pendingNext_offset_, head, false);
1413 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
1414 }
1415}
1416
1417Object* Heap::DequeuePendingReference(Object** list) {
1418 DCHECK(list != NULL);
1419 DCHECK(*list != NULL);
1420 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1421 Object* ref;
1422 if (*list == head) {
1423 ref = *list;
1424 *list = NULL;
1425 } else {
1426 Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1427 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
1428 ref = head;
1429 }
1430 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
1431 return ref;
1432}
1433
Ian Rogers5d4bdc22011-11-02 22:15:43 -07001434void Heap::AddFinalizerReference(Thread* self, Object* object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001435 ScopedObjectAccess soa(self);
Elliott Hughes77405792012-03-15 15:22:12 -07001436 JValue args[1];
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07001437 args[0].SetL(object);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001438 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self,
1439 NULL, args, NULL);
1440}
1441
1442size_t Heap::GetBytesAllocated() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001443 return num_bytes_allocated_;
1444}
1445
1446size_t Heap::GetObjectsAllocated() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001447 return num_objects_allocated_;
1448}
1449
1450size_t Heap::GetConcurrentStartSize() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001451 return concurrent_start_size_;
1452}
1453
1454size_t Heap::GetConcurrentMinFree() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001455 return concurrent_min_free_;
Elliott Hughesadb460d2011-10-05 17:02:34 -07001456}
1457
1458void Heap::EnqueueClearedReferences(Object** cleared) {
1459 DCHECK(cleared != NULL);
1460 if (*cleared != NULL) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001461 ScopedObjectAccess soa(Thread::Current());
Elliott Hughes77405792012-03-15 15:22:12 -07001462 JValue args[1];
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07001463 args[0].SetL(*cleared);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001464 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
1465 NULL, args, NULL);
Elliott Hughesadb460d2011-10-05 17:02:34 -07001466 *cleared = NULL;
1467 }
1468}
1469
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001470void Heap::RequestConcurrentGC() {
Mathieu Chartier069387a2012-06-18 12:01:01 -07001471 // Make sure that we can do a concurrent GC.
1472 if (requesting_gc_ ||
1473 !Runtime::Current()->IsFinishedStarting() ||
1474 Runtime::Current()->IsShuttingDown() ||
1475 !Runtime::Current()->IsConcurrentGcEnabled()) {
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001476 return;
1477 }
1478
1479 requesting_gc_ = true;
1480 JNIEnv* env = Thread::Current()->GetJniEnv();
Mathieu Chartiera6399032012-06-11 18:49:50 -07001481 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
1482 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001483 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
1484 WellKnownClasses::java_lang_Daemons_requestGC);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001485 CHECK(!env->ExceptionCheck());
1486 requesting_gc_ = false;
1487}
1488
1489void Heap::ConcurrentGC() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001490 if (Runtime::Current()->IsShuttingDown() || !concurrent_gc_) {
Mathieu Chartier2542d662012-06-21 17:14:11 -07001491 return;
1492 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001493
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001494 // TODO: We shouldn't need a WaitForConcurrentGcToComplete here since only
1495 // concurrent GC resumes threads before the GC is completed and this function
1496 // is only called within the GC daemon thread.
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001497 if (!WaitForConcurrentGcToComplete()) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001498 // Start a concurrent GC as one wasn't in progress
1499 ScopedThreadStateChange tsc(Thread::Current(), kWaitingPerformingGc);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001500 if (alloc_space_->Size() > kMinAllocSpaceSizeForStickyGC) {
1501 CollectGarbageInternal(GC_STICKY, false);
1502 } else {
1503 CollectGarbageInternal(GC_PARTIAL, false);
1504 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001505 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001506}
1507
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001508void Heap::Trim(AllocSpace* alloc_space) {
Mathieu Chartiera6399032012-06-11 18:49:50 -07001509 WaitForConcurrentGcToComplete();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001510 alloc_space->Trim();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001511}
1512
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001513void Heap::RequestHeapTrim() {
1514 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
1515 // because that only marks object heads, so a large array looks like lots of empty space. We
1516 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
1517 // to utilization (which is probably inversely proportional to how much benefit we can expect).
1518 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
1519 // not how much use we're making of those pages.
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001520 uint64_t ms_time = NsToMs(NanoTime());
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001521 {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001522 float utilization = static_cast<float>(num_bytes_allocated_) / alloc_space_->Size();
1523 if ((utilization > 0.75f) || ((ms_time - last_trim_time_) < 2 * 1000)) {
1524 // Don't bother trimming the heap if it's more than 75% utilized, or if a
1525 // heap trim occurred in the last two seconds.
1526 return;
1527 }
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001528 }
Mathieu Chartiera6399032012-06-11 18:49:50 -07001529 if (!Runtime::Current()->IsFinishedStarting() || Runtime::Current()->IsShuttingDown()) {
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001530 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
Mathieu Chartiera6399032012-06-11 18:49:50 -07001531 // Also: we do not wish to start a heap trim if the runtime is shutting down.
Ian Rogerse1d490c2012-02-03 09:09:07 -08001532 return;
1533 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001534 last_trim_time_ = ms_time;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001535 JNIEnv* env = Thread::Current()->GetJniEnv();
Mathieu Chartiera6399032012-06-11 18:49:50 -07001536 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
1537 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001538 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
1539 WellKnownClasses::java_lang_Daemons_requestHeapTrim);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001540 CHECK(!env->ExceptionCheck());
1541}
1542
Carl Shapiro69759ea2011-07-21 18:13:35 -07001543} // namespace art