blob: aa7deb477619a60985bbf68b5a3bfe47780c2c8f [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Brian Carlstrom5643b782012-02-05 12:32:53 -080019#include <sys/types.h>
20#include <sys/wait.h>
21
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
24
Mathieu Chartier637e3482012-08-17 10:41:32 -070025#include "atomic.h"
Ian Rogers5d76c432011-10-31 21:42:49 -070026#include "card_table.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070027#include "debugger.h"
Mathieu Chartiercc236d72012-07-20 10:29:05 -070028#include "heap_bitmap.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070029#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070030#include "mark_sweep.h"
Mathieu Chartierb43b7d42012-06-19 13:15:09 -070031#include "mod_union_table.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070032#include "object.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080033#include "object_utils.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080034#include "os.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070035#include "ScopedLocalRef.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070036#include "scoped_thread_state_change.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070037#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070038#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070039#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070040#include "timing_logger.h"
41#include "UniquePtr.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070042#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070043
44namespace art {
45
Ian Rogers30fab402012-01-23 15:43:46 -080046static void UpdateFirstAndLastSpace(Space** first_space, Space** last_space, Space* space) {
47 if (*first_space == NULL) {
48 *first_space = space;
49 *last_space = space;
50 } else {
51 if ((*first_space)->Begin() > space->Begin()) {
52 *first_space = space;
53 } else if (space->Begin() > (*last_space)->Begin()) {
54 *last_space = space;
55 }
56 }
57}
58
Elliott Hughesae80b492012-04-24 10:43:17 -070059static bool GenerateImage(const std::string& image_file_name) {
Brian Carlstroma004aa92012-02-08 18:05:09 -080060 const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
Brian Carlstrom5643b782012-02-05 12:32:53 -080061 std::vector<std::string> boot_class_path;
62 Split(boot_class_path_string, ':', boot_class_path);
Brian Carlstromb2793372012-03-17 18:27:16 -070063 if (boot_class_path.empty()) {
64 LOG(FATAL) << "Failed to generate image because no boot class path specified";
65 }
Brian Carlstrom5643b782012-02-05 12:32:53 -080066
67 std::vector<char*> arg_vector;
68
69 std::string dex2oat_string(GetAndroidRoot());
Elliott Hughes67d92002012-03-26 15:08:51 -070070 dex2oat_string += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
Brian Carlstrom5643b782012-02-05 12:32:53 -080071 const char* dex2oat = dex2oat_string.c_str();
72 arg_vector.push_back(strdup(dex2oat));
73
74 std::string image_option_string("--image=");
75 image_option_string += image_file_name;
76 const char* image_option = image_option_string.c_str();
77 arg_vector.push_back(strdup(image_option));
78
79 arg_vector.push_back(strdup("--runtime-arg"));
80 arg_vector.push_back(strdup("-Xms64m"));
81
82 arg_vector.push_back(strdup("--runtime-arg"));
83 arg_vector.push_back(strdup("-Xmx64m"));
84
85 for (size_t i = 0; i < boot_class_path.size(); i++) {
86 std::string dex_file_option_string("--dex-file=");
87 dex_file_option_string += boot_class_path[i];
88 const char* dex_file_option = dex_file_option_string.c_str();
89 arg_vector.push_back(strdup(dex_file_option));
90 }
91
92 std::string oat_file_option_string("--oat-file=");
93 oat_file_option_string += image_file_name;
94 oat_file_option_string.erase(oat_file_option_string.size() - 3);
95 oat_file_option_string += "oat";
96 const char* oat_file_option = oat_file_option_string.c_str();
97 arg_vector.push_back(strdup(oat_file_option));
98
99 arg_vector.push_back(strdup("--base=0x60000000"));
100
Elliott Hughes48436bb2012-02-07 15:23:28 -0800101 std::string command_line(Join(arg_vector, ' '));
Brian Carlstrom5643b782012-02-05 12:32:53 -0800102 LOG(INFO) << command_line;
103
Elliott Hughes48436bb2012-02-07 15:23:28 -0800104 arg_vector.push_back(NULL);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800105 char** argv = &arg_vector[0];
106
107 // fork and exec dex2oat
108 pid_t pid = fork();
109 if (pid == 0) {
110 // no allocation allowed between fork and exec
111
112 // change process groups, so we don't get reaped by ProcessManager
113 setpgid(0, 0);
114
115 execv(dex2oat, argv);
116
117 PLOG(FATAL) << "execv(" << dex2oat << ") failed";
118 return false;
119 } else {
120 STLDeleteElements(&arg_vector);
121
122 // wait for dex2oat to finish
123 int status;
124 pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
125 if (got_pid != pid) {
126 PLOG(ERROR) << "waitpid failed: wanted " << pid << ", got " << got_pid;
127 return false;
128 }
129 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
130 LOG(ERROR) << dex2oat << " failed: " << command_line;
131 return false;
132 }
133 }
134 return true;
135}
136
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800137Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700138 const std::string& original_image_file_name, bool concurrent_gc)
139 : alloc_space_(NULL),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800140 card_table_(NULL),
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700141 concurrent_gc_(concurrent_gc),
142 have_zygote_space_(false),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800143 card_marking_disabled_(false),
144 is_gc_running_(false),
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700145 last_gc_type_(kGcTypeNone),
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700146 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700147 concurrent_start_size_(128 * KB),
148 concurrent_min_free_(256 * KB),
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700149 sticky_gc_count_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800150 num_bytes_allocated_(0),
151 num_objects_allocated_(0),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700152 verify_missing_card_marks_(false),
153 verify_system_weaks_(false),
154 verify_pre_gc_heap_(false),
155 verify_post_gc_heap_(false),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700156 verify_mod_union_table_(false),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700157 partial_gc_frequency_(10),
158 min_alloc_space_size_for_sticky_gc_(4 * MB),
159 min_remaining_space_for_sticky_gc_(1 * MB),
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700160 last_trim_time_(0),
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700161 try_running_gc_(false),
162 requesting_gc_(false),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800163 reference_referent_offset_(0),
164 reference_queue_offset_(0),
165 reference_queueNext_offset_(0),
166 reference_pendingNext_offset_(0),
167 finalizer_reference_zombie_offset_(0),
168 target_utilization_(0.5),
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700169 verify_objects_(false) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800170 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800171 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700172 }
173
Ian Rogers30fab402012-01-23 15:43:46 -0800174 // Compute the bounds of all spaces for allocating live and mark bitmaps
175 // there will be at least one space (the alloc space)
176 Space* first_space = NULL;
177 Space* last_space = NULL;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700178
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700179 live_bitmap_.reset(new HeapBitmap(this));
180 mark_bitmap_.reset(new HeapBitmap(this));
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700181
Ian Rogers30fab402012-01-23 15:43:46 -0800182 // Requested begin for the alloc space, to follow the mapped image and oat files
183 byte* requested_begin = NULL;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800184 std::string image_file_name(original_image_file_name);
185 if (!image_file_name.empty()) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700186 Space* image_space = NULL;
187
Brian Carlstrom5643b782012-02-05 12:32:53 -0800188 if (OS::FileExists(image_file_name.c_str())) {
189 // If the /system file exists, it should be up-to-date, don't try to generate
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700190 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800191 } else {
192 // If the /system file didn't exist, we need to use one from the art-cache.
193 // If the cache file exists, try to open, but if it fails, regenerate.
194 // If it does not exist, generate.
195 image_file_name = GetArtCacheFilenameOrDie(image_file_name);
196 if (OS::FileExists(image_file_name.c_str())) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700197 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800198 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700199 if (image_space == NULL) {
Brian Carlstrom5643b782012-02-05 12:32:53 -0800200 if (!GenerateImage(image_file_name)) {
201 LOG(FATAL) << "Failed to generate image: " << image_file_name;
202 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700203 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800204 }
205 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700206 if (image_space == NULL) {
Brian Carlstrom223f20f2012-02-04 23:06:55 -0800207 LOG(FATAL) << "Failed to create space from " << image_file_name;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700208 }
Brian Carlstrom5643b782012-02-05 12:32:53 -0800209
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700210 AddSpace(image_space);
211 UpdateFirstAndLastSpace(&first_space, &last_space, image_space);
Ian Rogers30fab402012-01-23 15:43:46 -0800212 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
213 // isn't going to get in the middle
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700214 byte* oat_end_addr = GetImageSpace()->GetImageHeader().GetOatEnd();
215 CHECK(oat_end_addr > GetImageSpace()->End());
Ian Rogers30fab402012-01-23 15:43:46 -0800216 if (oat_end_addr > requested_begin) {
217 requested_begin = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_end_addr),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700218 kPageSize));
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700219 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700220 }
221
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700222 UniquePtr<AllocSpace> alloc_space(Space::CreateAllocSpace(
223 "alloc space", initial_size, growth_limit, capacity, requested_begin));
224 alloc_space_ = alloc_space.release();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700225 CHECK(alloc_space_ != NULL) << "Failed to create alloc space";
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700226 AddSpace(alloc_space_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700227
Ian Rogers30fab402012-01-23 15:43:46 -0800228 UpdateFirstAndLastSpace(&first_space, &last_space, alloc_space_);
229 byte* heap_begin = first_space->Begin();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800230 size_t heap_capacity = (last_space->Begin() - first_space->Begin()) + last_space->NonGrowthLimitCapacity();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700231
Ian Rogers30fab402012-01-23 15:43:46 -0800232 // Mark image objects in the live bitmap
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800233 for (size_t i = 0; i < spaces_.size(); ++i) {
Ian Rogers30fab402012-01-23 15:43:46 -0800234 Space* space = spaces_[i];
235 if (space->IsImageSpace()) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700236 space->AsImageSpace()->RecordImageAllocations(space->GetLiveBitmap());
Ian Rogers30fab402012-01-23 15:43:46 -0800237 }
238 }
239
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800240 // Allocate the card table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700241 card_table_.reset(CardTable::Create(heap_begin, heap_capacity));
242 CHECK(card_table_.get() != NULL) << "Failed to create card table";
Ian Rogers5d76c432011-10-31 21:42:49 -0700243
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700244 mod_union_table_.reset(new ModUnionTableToZygoteAllocspace<ModUnionTableReferenceCache>(this));
245 CHECK(mod_union_table_.get() != NULL) << "Failed to create mod-union table";
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700246
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700247 zygote_mod_union_table_.reset(new ModUnionTableCardCache(this));
248 CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700249
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700250 // TODO: Count objects in the image space here.
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700251 num_bytes_allocated_ = 0;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700252 num_objects_allocated_ = 0;
253
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700254 // Max stack size in bytes.
255 static const size_t max_stack_size = capacity / SpaceBitmap::kAlignment * kWordSize;
256
257 // TODO: Rename MarkStack to a more generic name?
258 mark_stack_.reset(MarkStack::Create("dalvik-mark-stack", max_stack_size));
259 allocation_stack_.reset(MarkStack::Create("dalvik-allocation-stack", max_stack_size));
260 live_stack_.reset(MarkStack::Create("dalvik-live-stack", max_stack_size));
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700261
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800262 // It's still too early to take a lock because there are no threads yet,
Elliott Hughes92b3b562011-09-08 16:32:26 -0700263 // but we can create the heap lock now. We don't create it earlier to
264 // make it clear that you can't use locks during heap initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700265 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700266 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable"));
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700267
Mathieu Chartier0325e622012-09-05 14:22:51 -0700268 // Set up the cumulative timing loggers.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700269 for (size_t i = static_cast<size_t>(kGcTypeSticky); i < static_cast<size_t>(kGcTypeMax);
270 ++i) {
Mathieu Chartier0325e622012-09-05 14:22:51 -0700271 std::ostringstream name;
272 name << static_cast<GcType>(i);
273 cumulative_timings_.Put(static_cast<GcType>(i),
274 new CumulativeLogger(name.str().c_str(), true));
275 }
276
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800277 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800278 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700279 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700280}
281
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700282// Sort spaces based on begin address
283class SpaceSorter {
284 public:
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700285 bool operator ()(const Space* a, const Space* b) const {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700286 return a->Begin() < b->Begin();
287 }
288};
289
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800290void Heap::AddSpace(Space* space) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700291 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700292 DCHECK(space != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700293 DCHECK(space->GetLiveBitmap() != NULL);
294 live_bitmap_->AddSpaceBitmap(space->GetLiveBitmap());
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700295 DCHECK(space->GetMarkBitmap() != NULL);
296 mark_bitmap_->AddSpaceBitmap(space->GetMarkBitmap());
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800297 spaces_.push_back(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700298 if (space->IsAllocSpace()) {
299 alloc_space_ = space->AsAllocSpace();
300 }
301
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700302 // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger)
303 std::sort(spaces_.begin(), spaces_.end(), SpaceSorter());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700304
305 // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
306 // avoid redundant marking.
307 bool seen_zygote = false, seen_alloc = false;
308 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
309 Space* space = *it;
310 if (space->IsImageSpace()) {
311 DCHECK(!seen_zygote);
312 DCHECK(!seen_alloc);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700313 } else if (space->IsZygoteSpace()) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700314 DCHECK(!seen_alloc);
315 seen_zygote = true;
316 } else if (space->IsAllocSpace()) {
317 seen_alloc = true;
318 }
319 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800320}
321
322Heap::~Heap() {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700323 // If we don't reset then the mark stack complains in it's destructor.
324 allocation_stack_->Reset();
325 live_stack_->Reset();
326
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800327 VLOG(heap) << "~Heap()";
Elliott Hughesb3e66df2012-01-12 14:49:18 -0800328 // We can't take the heap lock here because there might be a daemon thread suspended with the
329 // heap lock held. We know though that no non-daemon threads are executing, and we know that
330 // all daemon threads are suspended, and we also know that the threads list have been deleted, so
331 // those threads can't resume. We're the only running thread, and we can do whatever we like...
Carl Shapiro58551df2011-07-24 03:09:51 -0700332 STLDeleteElements(&spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700333 delete gc_complete_lock_;
334
Mathieu Chartier0325e622012-09-05 14:22:51 -0700335 STLDeleteValues(&cumulative_timings_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700336}
337
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700338Space* Heap::FindSpaceFromObject(const Object* obj) const {
339 // TODO: C++0x auto
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700340 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
341 if ((*it)->Contains(obj)) {
342 return *it;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700343 }
344 }
345 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
346 return NULL;
347}
348
349ImageSpace* Heap::GetImageSpace() {
350 // TODO: C++0x auto
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700351 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
352 if ((*it)->IsImageSpace()) {
353 return (*it)->AsImageSpace();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700354 }
355 }
356 return NULL;
357}
358
359AllocSpace* Heap::GetAllocSpace() {
360 return alloc_space_;
361}
362
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700363static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
364 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
365
366 size_t chunk_size = static_cast<size_t>(reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start));
367 size_t chunk_free_bytes = 0;
368 if (used_bytes < chunk_size) {
369 chunk_free_bytes = chunk_size - used_bytes;
370 }
371
372 if (chunk_free_bytes > max_contiguous_allocation) {
373 max_contiguous_allocation = chunk_free_bytes;
374 }
375}
376
377Object* Heap::AllocObject(Class* c, size_t byte_count) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700378 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(Class)) ||
379 (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
380 strlen(ClassHelper(c).GetDescriptor()) == 0);
381 DCHECK_GE(byte_count, sizeof(Object));
Mathieu Chartier037813d2012-08-23 16:44:59 -0700382 Object* obj = Allocate(alloc_space_, byte_count);
383 if (LIKELY(obj != NULL)) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700384 obj->SetClass(c);
Mathieu Chartier037813d2012-08-23 16:44:59 -0700385
386 // Record allocation after since we want to use the atomic add for the atomic fence to guard
387 // the SetClass since we do not want the class to appear NULL in another thread.
388 RecordAllocation(alloc_space_, obj);
389
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700390 if (Dbg::IsAllocTrackingEnabled()) {
391 Dbg::RecordAllocation(c, byte_count);
Elliott Hughes418dfe72011-10-06 18:56:27 -0700392 }
Mathieu Chartier637e3482012-08-17 10:41:32 -0700393 const bool request_concurrent_gc = num_bytes_allocated_ >= concurrent_start_bytes_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700394 if (request_concurrent_gc) {
395 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
396 SirtRef<Object> ref(obj);
397 RequestConcurrentGC();
398 }
399 VerifyObject(obj);
400
401 // Additional verification to ensure that we did not allocate into a zygote space.
402 DCHECK(!have_zygote_space_ || !FindSpaceFromObject(obj)->IsZygoteSpace());
403
404 return obj;
405 }
Mathieu Chartier037813d2012-08-23 16:44:59 -0700406 int64_t total_bytes_free = GetFreeMemory();
407 size_t max_contiguous_allocation = 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700408 // TODO: C++0x auto
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700409 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
410 if ((*it)->IsAllocSpace()) {
411 (*it)->AsAllocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700412 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700413 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700414
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700415 std::string msg(StringPrintf("Failed to allocate a %zd-byte %s (%lld total bytes free; largest possible contiguous allocation %zd bytes)",
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700416 byte_count, PrettyDescriptor(c).c_str(), total_bytes_free, max_contiguous_allocation));
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700417 Thread::Current()->ThrowOutOfMemoryError(msg.c_str());
Elliott Hughes418dfe72011-10-06 18:56:27 -0700418 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700419}
420
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700421bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700422 // Note: we deliberately don't take the lock here, and mustn't test anything that would
423 // require taking the lock.
Elliott Hughes88c5c352012-03-15 18:49:48 -0700424 if (obj == NULL) {
425 return true;
426 }
427 if (!IsAligned<kObjectAlignment>(obj)) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700428 return false;
429 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800430 for (size_t i = 0; i < spaces_.size(); ++i) {
Ian Rogers30fab402012-01-23 15:43:46 -0800431 if (spaces_[i]->Contains(obj)) {
432 return true;
433 }
434 }
435 return false;
Elliott Hughesa2501992011-08-26 19:39:54 -0700436}
437
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700438bool Heap::IsLiveObjectLocked(const Object* obj) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700439 Locks::heap_bitmap_lock_->AssertReaderHeld();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700440 return IsHeapAddress(obj) && GetLiveBitmap()->Test(obj);
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700441}
442
Elliott Hughes3e465b12011-09-02 18:26:12 -0700443#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700444void Heap::VerifyObject(const Object* obj) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700445 if (obj == NULL || this == NULL || !verify_objects_ || Runtime::Current()->IsShuttingDown() ||
Ian Rogers141d6222012-04-05 12:23:06 -0700446 Thread::Current() == NULL ||
jeffhao25045522012-03-13 19:34:37 -0700447 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700448 return;
449 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700450 VerifyObjectBody(obj);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700451}
452#endif
453
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700454void Heap::DumpSpaces() {
455 // TODO: C++0x auto
456 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700457 Space* space = *it;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700458 LOG(INFO) << *space << "\n"
459 << *space->GetLiveBitmap() << "\n"
460 << *space->GetMarkBitmap();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700461 }
462}
463
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700464// We want to avoid bit rotting.
465void Heap::VerifyObjectBody(const Object* obj) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700466 if (!IsAligned<kObjectAlignment>(obj)) {
467 LOG(FATAL) << "Object isn't aligned: " << obj;
Mathieu Chartier0325e622012-09-05 14:22:51 -0700468 }
469
Mathieu Chartier0325e622012-09-05 14:22:51 -0700470 if (!GetLiveBitmap()->Test(obj)) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700471 // Check the allocation stack / live stack.
472 if (!std::binary_search(live_stack_->Begin(), live_stack_->End(), obj) &&
473 std::find(allocation_stack_->Begin(), allocation_stack_->End(), obj) ==
474 allocation_stack_->End()) {
475 DumpSpaces();
476 LOG(FATAL) << "Object is dead: " << obj;
477 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700478 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700479
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700480 // Ignore early dawn of the universe verifications
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700481 if (!VERIFY_OBJECT_FAST && num_objects_allocated_ > 10) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700482 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
483 Object::ClassOffset().Int32Value();
484 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
485 if (c == NULL) {
486 LOG(FATAL) << "Null class in object: " << obj;
487 } else if (!IsAligned<kObjectAlignment>(c)) {
488 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
489 } else if (!GetLiveBitmap()->Test(c)) {
490 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
491 }
492 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
493 // Note: we don't use the accessors here as they have internal sanity checks
494 // that we don't want to run
495 raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
496 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
497 raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
498 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
499 CHECK_EQ(c_c, c_c_c);
500 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700501}
502
Brian Carlstrom78128a62011-09-15 17:21:19 -0700503void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700504 DCHECK(obj != NULL);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700505 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700506}
507
508void Heap::VerifyHeap() {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700509 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700510 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700511}
512
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700513void Heap::RecordAllocation(AllocSpace* space, const Object* obj) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700514 DCHECK(obj != NULL);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700515
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700516 size_t size = space->AllocationSize(obj);
517 DCHECK_GT(size, 0u);
518 COMPILE_ASSERT(sizeof(size_t) == sizeof(int32_t),
519 int32_t_must_be_same_size_as_size_t_for_used_atomic_operations);
520 android_atomic_add(
521 size, reinterpret_cast<volatile int32_t*>(reinterpret_cast<size_t>(&num_bytes_allocated_)));
522 android_atomic_add(
523 1, reinterpret_cast<volatile int32_t*>(reinterpret_cast<size_t>(&num_objects_allocated_)));
524
525 if (Runtime::Current()->HasStatsEnabled()) {
526 RuntimeStats* global_stats = Runtime::Current()->GetStats();
527 RuntimeStats* thread_stats = Thread::Current()->GetStats();
528 ++global_stats->allocated_objects;
529 ++thread_stats->allocated_objects;
530 global_stats->allocated_bytes += size;
531 thread_stats->allocated_bytes += size;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700532 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700533
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700534 allocation_stack_->AtomicPush(obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700535}
536
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700537void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
Mathieu Chartier637e3482012-08-17 10:41:32 -0700538 COMPILE_ASSERT(sizeof(size_t) == sizeof(int32_t),
539 int32_t_must_be_same_size_as_size_t_for_used_atomic_operations);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700540 DCHECK_LE(freed_objects, num_objects_allocated_);
Mathieu Chartier637e3482012-08-17 10:41:32 -0700541 android_atomic_add(-static_cast<int32_t>(freed_objects),
Mathieu Chartier556fad32012-08-20 16:13:20 -0700542 reinterpret_cast<volatile int32_t*>(
543 reinterpret_cast<size_t>(&num_objects_allocated_)));
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700544
545 DCHECK_LE(freed_bytes, num_bytes_allocated_);
Mathieu Chartier637e3482012-08-17 10:41:32 -0700546 android_atomic_add(-static_cast<int32_t>(freed_bytes),
Mathieu Chartier556fad32012-08-20 16:13:20 -0700547 reinterpret_cast<volatile int32_t*>(
548 reinterpret_cast<size_t>(&num_bytes_allocated_)));
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700549
550 if (Runtime::Current()->HasStatsEnabled()) {
551 RuntimeStats* global_stats = Runtime::Current()->GetStats();
552 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700553 global_stats->freed_objects += freed_objects;
554 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700555 global_stats->freed_bytes += freed_bytes;
556 thread_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700557 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700558}
559
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700560Object* Heap::Allocate(AllocSpace* space, size_t alloc_size) {
561 Thread* self = Thread::Current();
Ian Rogers0399dde2012-06-06 17:09:28 -0700562 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
563 // done in the runnable state where suspension is expected.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700564#ifndef NDEBUG
565 {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700566 MutexLock mu(*Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700567 CHECK_EQ(self->GetState(), kRunnable);
568 }
569 self->AssertThreadSuspensionIsAllowable();
570#endif
Brian Carlstromb82b6872011-10-26 17:18:07 -0700571
Ian Rogers30fab402012-01-23 15:43:46 -0800572 Object* ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700573 if (ptr != NULL) {
574 return ptr;
575 }
576
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700577 // The allocation failed. If the GC is running, block until it completes, and then retry the
578 // allocation.
579 GcType last_gc = WaitForConcurrentGcToComplete();
580 if (last_gc != kGcTypeNone) {
581 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
582 Object* ptr = space->AllocWithoutGrowth(alloc_size);
583 if (ptr != NULL) {
584 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700585 }
586 }
587
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700588 // Loop through our different Gc types and try to Gc until we get enough free memory.
589 for (size_t i = static_cast<size_t>(last_gc) + 1; i < static_cast<size_t>(kGcTypeMax); ++i) {
590 bool run_gc = false;
591 GcType gc_type = static_cast<GcType>(i);
592 switch (gc_type) {
593 case kGcTypeSticky: {
594 const size_t alloc_space_size = alloc_space_->Size();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700595 run_gc = alloc_space_size > min_alloc_space_size_for_sticky_gc_ &&
596 alloc_space_->Capacity() - alloc_space_size >= min_remaining_space_for_sticky_gc_;
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700597 break;
598 }
599 case kGcTypePartial:
600 run_gc = have_zygote_space_;
601 break;
602 case kGcTypeFull:
603 run_gc = true;
604 break;
605 default:
606 break;
607 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700608
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700609 if (run_gc) {
610 if (Runtime::Current()->HasStatsEnabled()) {
611 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
612 ++Thread::Current()->GetStats()->gc_for_alloc_count;
613 }
614 self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
615
616 // If we actually ran a different type of Gc than requested, we can skip the index forwards.
617 GcType gc_type_ran = CollectGarbageInternal(gc_type, false);
618 DCHECK(static_cast<size_t>(gc_type_ran) >= i);
619 i = static_cast<size_t>(gc_type_ran);
620 self->TransitionFromSuspendedToRunnable();
621
622 // Did we free sufficient memory for the allocation to succeed?
623 ptr = space->AllocWithoutGrowth(alloc_size);
624 if (ptr != NULL) {
625 return ptr;
626 }
627 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700628 }
629
630 // Allocations have failed after GCs; this is an exceptional state.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700631 // Try harder, growing the heap if necessary.
Ian Rogers30fab402012-01-23 15:43:46 -0800632 ptr = space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700633 if (ptr != NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800634 size_t new_footprint = space->GetFootprintLimit();
Elliott Hughes418dfe72011-10-06 18:56:27 -0700635 // OLD-TODO: may want to grow a little bit more so that the amount of
Carl Shapiro58551df2011-07-24 03:09:51 -0700636 // free space is equal to the old free space + the
637 // utilization slop for the new allocation.
Ian Rogers3bb17a62012-01-27 23:56:44 -0800638 VLOG(gc) << "Grow heap (frag case) to " << PrettySize(new_footprint)
Ian Rogers162a31c2012-01-31 16:14:31 -0800639 << " for a " << PrettySize(alloc_size) << " allocation";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700640 return ptr;
641 }
642
Elliott Hughes81ff3182012-03-23 20:35:56 -0700643 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
644 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
645 // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700646
Elliott Hughes418dfe72011-10-06 18:56:27 -0700647 // OLD-TODO: wait for the finalizers from the previous GC to finish
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700648 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
649 << " allocation";
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700650
651 if (Runtime::Current()->HasStatsEnabled()) {
652 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
653 ++Thread::Current()->GetStats()->gc_for_alloc_count;
654 }
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700655 // We don't need a WaitForConcurrentGcToComplete here either.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700656 self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700657 CollectGarbageInternal(kGcTypeFull, true);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700658 self->TransitionFromSuspendedToRunnable();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700659 return space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700660}
661
Elliott Hughesbf86d042011-08-31 17:53:14 -0700662int64_t Heap::GetMaxMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700663 size_t total = 0;
664 // TODO: C++0x auto
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700665 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
666 Space* space = *it;
667 if (space->IsAllocSpace()) {
668 total += space->AsAllocSpace()->Capacity();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700669 }
670 }
671 return total;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700672}
673
674int64_t Heap::GetTotalMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700675 return GetMaxMemory();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700676}
677
678int64_t Heap::GetFreeMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700679 return GetMaxMemory() - num_bytes_allocated_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700680}
681
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700682class InstanceCounter {
683 public:
684 InstanceCounter(Class* c, bool count_assignable)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700685 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700686 : class_(c), count_assignable_(count_assignable), count_(0) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700687
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700688 }
689
690 size_t GetCount() {
691 return count_;
692 }
693
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700694 static void Callback(Object* o, void* arg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700695 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700696 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
697 }
698
699 private:
Ian Rogersb726dcb2012-09-05 08:57:23 -0700700 void VisitInstance(Object* o) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700701 Class* instance_class = o->GetClass();
702 if (count_assignable_) {
703 if (instance_class == class_) {
704 ++count_;
705 }
706 } else {
707 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
708 ++count_;
709 }
710 }
711 }
712
713 Class* class_;
714 bool count_assignable_;
715 size_t count_;
716};
717
718int64_t Heap::CountInstances(Class* c, bool count_assignable) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700719 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700720 InstanceCounter counter(c, count_assignable);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700721 GetLiveBitmap()->Walk(InstanceCounter::Callback, &counter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700722 return counter.GetCount();
723}
724
Ian Rogers30fab402012-01-23 15:43:46 -0800725void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700726 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
727 // last GC will not have necessarily been cleared.
728 WaitForConcurrentGcToComplete();
729 ScopedThreadStateChange tsc(Thread::Current(), kWaitingPerformingGc);
730 CollectGarbageInternal(have_zygote_space_ ? kGcTypePartial : kGcTypeFull, clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700731}
732
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700733void Heap::PreZygoteFork() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700734 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
735 MutexLock mu(zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700736
737 // Try to see if we have any Zygote spaces.
738 if (have_zygote_space_) {
739 return;
740 }
741
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700742 VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(alloc_space_->Size());
743
744 {
745 // Flush the alloc stack.
Ian Rogersb726dcb2012-09-05 08:57:23 -0700746 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700747 FlushAllocStack();
748 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700749
750 // Replace the first alloc space we find with a zygote space.
751 // TODO: C++0x auto
752 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
753 if ((*it)->IsAllocSpace()) {
754 AllocSpace* zygote_space = (*it)->AsAllocSpace();
755
756 // Turns the current alloc space into a Zygote space and obtain the new alloc space composed
757 // of the remaining available heap memory.
758 alloc_space_ = zygote_space->CreateZygoteSpace();
759
760 // Change the GC retention policy of the zygote space to only collect when full.
761 zygote_space->SetGcRetentionPolicy(GCRP_FULL_COLLECT);
762 AddSpace(alloc_space_);
763 have_zygote_space_ = true;
764 break;
765 }
766 }
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700767
Mathieu Chartier0325e622012-09-05 14:22:51 -0700768 // Reset the cumulative loggers since we now haave a few additional timing phases.
769 // TODO: C++0x
770 for (CumulativeTimings::iterator it = cumulative_timings_.begin();
771 it != cumulative_timings_.end(); ++it) {
772 it->second->Reset();
773 }
774
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700775 // Reset this since we now count the ZygoteSpace in the total heap size.
776 num_bytes_allocated_ = 0;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700777}
778
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700779void Heap::FlushAllocStack() {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700780 MarkAllocStack(alloc_space_->GetLiveBitmap(), allocation_stack_.get());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700781 allocation_stack_->Reset();
782}
783
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700784size_t Heap::GetUsedMemorySize() const {
785 size_t total = num_bytes_allocated_;
786 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
787 if ((*it)->IsZygoteSpace()) {
788 total += (*it)->AsAllocSpace()->Size();
789 }
790 }
791 return total;
792}
793
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700794void Heap::MarkAllocStack(SpaceBitmap* bitmap, MarkStack* stack) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700795 // Empty the allocation stack.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700796 const size_t count = stack->Size();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700797 for (size_t i = 0; i < count; ++i) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700798 const Object* obj = stack->Get(i);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700799 DCHECK(obj != NULL);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700800 bitmap->Set(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700801 }
802}
803
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700804void Heap::UnMarkAllocStack(SpaceBitmap* bitmap, MarkStack* stack) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700805 // Clear all of the things in the AllocStack.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700806 size_t count = stack->Size();
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700807 for (size_t i = 0; i < count; ++i) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700808 const Object* obj = stack->Get(i);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700809 DCHECK(obj != NULL);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700810 bitmap->Clear(obj);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700811 }
812}
813
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700814GcType Heap::CollectGarbageInternal(GcType gc_type, bool clear_soft_references) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700815 Locks::mutator_lock_->AssertNotHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700816#ifndef NDEBUG
817 {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700818 MutexLock mu(*Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700819 CHECK_EQ(Thread::Current()->GetState(), kWaitingPerformingGc);
820 }
821#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700822
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700823 // Ensure there is only one GC at a time.
824 bool start_collect = false;
825 while (!start_collect) {
826 {
827 MutexLock mu(*gc_complete_lock_);
828 if (!is_gc_running_) {
829 is_gc_running_ = true;
830 start_collect = true;
831 }
832 }
833 if (!start_collect) {
834 WaitForConcurrentGcToComplete();
835 // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
836 // Not doing at the moment to ensure soft references are cleared.
837 }
838 }
839 gc_complete_lock_->AssertNotHeld();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700840
841 // We need to do partial GCs every now and then to avoid the heap growing too much and
842 // fragmenting.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700843 if (gc_type == kGcTypeSticky && ++sticky_gc_count_ > partial_gc_frequency_) {
Mathieu Chartier0325e622012-09-05 14:22:51 -0700844 gc_type = kGcTypePartial;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700845 }
Mathieu Chartier0325e622012-09-05 14:22:51 -0700846 if (gc_type != kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700847 sticky_gc_count_ = 0;
848 }
849
Mathieu Chartier637e3482012-08-17 10:41:32 -0700850 if (concurrent_gc_) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700851 CollectGarbageConcurrentMarkSweepPlan(gc_type, clear_soft_references);
852 } else {
853 CollectGarbageMarkSweepPlan(gc_type, clear_soft_references);
854 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700855
Ian Rogers15bf2d32012-08-28 17:33:04 -0700856 {
857 MutexLock mu(*gc_complete_lock_);
858 is_gc_running_ = false;
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700859 last_gc_type_ = gc_type;
Ian Rogers15bf2d32012-08-28 17:33:04 -0700860 // Wake anyone who may have been waiting for the GC to complete.
861 gc_complete_cond_->Broadcast();
862 }
863 // Inform DDMS that a GC completed.
864 Dbg::GcDidFinish();
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700865 return gc_type;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700866}
Mathieu Chartiera6399032012-06-11 18:49:50 -0700867
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700868void Heap::CollectGarbageMarkSweepPlan(GcType gc_type, bool clear_soft_references) {
869 TimingLogger timings("CollectGarbageInternal", true);
Mathieu Chartier662618f2012-06-06 12:01:47 -0700870
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700871 std::stringstream gc_type_str;
872 gc_type_str << gc_type << " ";
873
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700874 // Suspend all threads are get exclusive access to the heap.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700875 uint64_t start_time = NanoTime();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700876 ThreadList* thread_list = Runtime::Current()->GetThreadList();
877 thread_list->SuspendAll();
Mathieu Chartier662618f2012-06-06 12:01:47 -0700878 timings.AddSplit("SuspendAll");
Ian Rogersb726dcb2012-09-05 08:57:23 -0700879 Locks::mutator_lock_->AssertExclusiveHeld();
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700880
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700881 size_t bytes_freed = 0;
Elliott Hughesadb460d2011-10-05 17:02:34 -0700882 Object* cleared_references = NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700883 {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700884 MarkSweep mark_sweep(mark_stack_.get());
Carl Shapiro58551df2011-07-24 03:09:51 -0700885
886 mark_sweep.Init();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700887 timings.AddSplit("Init");
Carl Shapiro58551df2011-07-24 03:09:51 -0700888
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700889 if (verify_pre_gc_heap_) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700890 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700891 if (!VerifyHeapReferences()) {
892 LOG(FATAL) << "Pre " << gc_type_str.str() << "Gc verification failed";
893 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700894 timings.AddSplit("VerifyHeapReferencesPreGC");
895 }
896
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700897 // Make sure that the tables have the correct pointer for the mark sweep.
898 mod_union_table_->Init(&mark_sweep);
899 zygote_mod_union_table_->Init(&mark_sweep);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700900
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700901 // Swap allocation stack and live stack, enabling us to have new allocations during this GC.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700902 SwapStacks();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700903
904 // We will need to know which cards were dirty for doing concurrent processing of dirty cards.
905 // TODO: Investigate using a mark stack instead of a vector.
906 std::vector<byte*> dirty_cards;
Mathieu Chartier0325e622012-09-05 14:22:51 -0700907 if (gc_type == kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700908 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
909 card_table_->GetDirtyCards(*it, dirty_cards);
910 }
911 }
912
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700913 // Clear image space cards and keep track of cards we cleared in the mod-union table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700914 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
915 Space* space = *it;
916 if (space->IsImageSpace()) {
917 mod_union_table_->ClearCards(*it);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700918 timings.AddSplit("ClearModUnionCards");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700919 } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
920 zygote_mod_union_table_->ClearCards(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700921 timings.AddSplit("ClearZygoteCards");
922 } else {
923 card_table_->ClearSpaceCards(space);
924 timings.AddSplit("ClearCards");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700925 }
926 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700927
Ian Rogersb726dcb2012-09-05 08:57:23 -0700928 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700929 if (gc_type == kGcTypePartial) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700930 // Copy the mark bits over from the live bits, do this as early as possible or else we can
931 // accidentally un-mark roots.
932 // Needed for scanning dirty objects.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700933 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700934 if ((*it)->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
935 mark_sweep.CopyMarkBits(*it);
936 }
937 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700938 timings.AddSplit("CopyMarkBits");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700939
940 // We can assume that everything < alloc_space_ start is marked at this point.
941 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
Mathieu Chartier0325e622012-09-05 14:22:51 -0700942 } else if (gc_type == kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700943 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700944 if ((*it)->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
945 mark_sweep.CopyMarkBits(*it);
946 }
947 }
948 timings.AddSplit("CopyMarkBits");
949
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700950 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700951 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700952
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700953 MarkAllocStack(alloc_space_->GetLiveBitmap(), live_stack_.get());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700954
Mathieu Chartier0325e622012-09-05 14:22:51 -0700955 if (gc_type != kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700956 live_stack_->Reset();
957 }
958
Carl Shapiro58551df2011-07-24 03:09:51 -0700959 mark_sweep.MarkRoots();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700960 timings.AddSplit("MarkRoots");
Carl Shapiro58551df2011-07-24 03:09:51 -0700961
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700962 // Roots are marked on the bitmap and the mark_stack is empty.
Ian Rogers5d76c432011-10-31 21:42:49 -0700963 DCHECK(mark_sweep.IsMarkStackEmpty());
Carl Shapiro58551df2011-07-24 03:09:51 -0700964
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700965 UpdateAndMarkModUnion(timings, gc_type);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700966
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700967 if (verify_mod_union_table_) {
968 zygote_mod_union_table_->Update();
969 zygote_mod_union_table_->Verify();
970 mod_union_table_->Update();
971 mod_union_table_->Verify();
972 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700973
974 // Recursively mark all the non-image bits set in the mark bitmap.
Mathieu Chartier0325e622012-09-05 14:22:51 -0700975 if (gc_type != kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700976 live_stack_->Reset();
Mathieu Chartier0325e622012-09-05 14:22:51 -0700977 mark_sweep.RecursiveMark(gc_type == kGcTypePartial, timings);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700978 } else {
979 mark_sweep.RecursiveMarkCards(card_table_.get(), dirty_cards, timings);
980 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700981 mark_sweep.DisableFinger();
Carl Shapiro58551df2011-07-24 03:09:51 -0700982
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700983 // Need to process references before the swap since it uses IsMarked.
Ian Rogers30fab402012-01-23 15:43:46 -0800984 mark_sweep.ProcessReferences(clear_soft_references);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700985 timings.AddSplit("ProcessReferences");
Carl Shapiro58551df2011-07-24 03:09:51 -0700986
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700987 // This doesn't work with mutators unpaused for some reason, TODO: Fix.
988 mark_sweep.SweepSystemWeaks(false);
989 timings.AddSplit("SweepSystemWeaks");
990
991 // Need to swap for VERIFY_OBJECT_ENABLED since we put things in the live bitmap after they
992 // have been allocated.
993 const bool swap = true;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700994 if (swap) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700995 SwapBitmaps();
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700996 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700997
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700998#ifndef NDEBUG
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700999 // Verify that we only reach marked objects from the image space
1000 mark_sweep.VerifyImageRoots();
1001 timings.AddSplit("VerifyImageRoots");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001002#endif
Carl Shapiro58551df2011-07-24 03:09:51 -07001003
Mathieu Chartier0325e622012-09-05 14:22:51 -07001004 if (gc_type != kGcTypeSticky) {
1005 mark_sweep.Sweep(gc_type == kGcTypePartial, swap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001006 } else {
1007 mark_sweep.SweepArray(timings, live_stack_.get(), swap);
1008 }
Elliott Hughes307f75d2011-10-12 18:04:40 -07001009 timings.AddSplit("Sweep");
Elliott Hughesadb460d2011-10-05 17:02:34 -07001010
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001011 if (verify_system_weaks_) {
1012 mark_sweep.VerifySystemWeaks();
1013 timings.AddSplit("VerifySystemWeaks");
1014 }
1015
Elliott Hughesadb460d2011-10-05 17:02:34 -07001016 cleared_references = mark_sweep.GetClearedReferences();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001017 bytes_freed = mark_sweep.GetFreedBytes();
Carl Shapiro58551df2011-07-24 03:09:51 -07001018 }
1019
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001020 if (verify_post_gc_heap_) {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001021 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001022 if (!VerifyHeapReferences()) {
1023 LOG(FATAL) << "Post " + gc_type_str.str() + "Gc verification failed";
1024 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001025 timings.AddSplit("VerifyHeapReferencesPostGC");
1026 }
1027
Carl Shapiro58551df2011-07-24 03:09:51 -07001028 GrowForUtilization();
Elliott Hughes307f75d2011-10-12 18:04:40 -07001029 timings.AddSplit("GrowForUtilization");
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001030
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001031 thread_list->ResumeAll();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001032 timings.AddSplit("ResumeAll");
Elliott Hughesadb460d2011-10-05 17:02:34 -07001033
1034 EnqueueClearedReferences(&cleared_references);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001035 RequestHeapTrim();
Mathieu Chartier662618f2012-06-06 12:01:47 -07001036 timings.AddSplit("Finish");
Elliott Hughes83df2ac2011-10-11 16:37:54 -07001037
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001038 // If the GC was slow, then print timings in the log.
1039 uint64_t duration = (NanoTime() - start_time) / 1000 * 1000;
1040 if (duration > MsToNs(50)) {
Mathieu Chartier637e3482012-08-17 10:41:32 -07001041 const size_t percent_free = GetPercentFree();
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001042 const size_t current_heap_size = GetUsedMemorySize();
Mathieu Chartier637e3482012-08-17 10:41:32 -07001043 const size_t total_memory = GetTotalMemory();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001044 LOG(INFO) << gc_type_str.str() << " "
Mathieu Chartier637e3482012-08-17 10:41:32 -07001045 << "GC freed " << PrettySize(bytes_freed) << ", " << percent_free << "% free, "
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001046 << PrettySize(current_heap_size) << "/" << PrettySize(total_memory) << ", "
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001047 << "paused " << PrettyDuration(duration);
Mathieu Chartier0325e622012-09-05 14:22:51 -07001048 if (VLOG_IS_ON(heap)) {
1049 timings.Dump();
1050 }
Brian Carlstrom6b4ef022011-10-23 14:59:04 -07001051 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001052
Mathieu Chartier0325e622012-09-05 14:22:51 -07001053 CumulativeLogger* logger = cumulative_timings_.Get(gc_type);
1054 logger->Start();
1055 logger->AddLogger(timings);
1056 logger->End(); // Next iteration.
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001057}
Mathieu Chartiera6399032012-06-11 18:49:50 -07001058
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001059void Heap::UpdateAndMarkModUnion(TimingLogger& timings, GcType gc_type) {
Mathieu Chartier0325e622012-09-05 14:22:51 -07001060 if (gc_type == kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001061 // Don't need to do anythign for mod union table in this case since we are only scanning dirty
1062 // cards.
1063 return;
1064 }
1065
1066 // Update zygote mod union table.
Mathieu Chartier0325e622012-09-05 14:22:51 -07001067 if (gc_type == kGcTypePartial) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001068 zygote_mod_union_table_->Update();
1069 timings.AddSplit("UpdateZygoteModUnionTable");
1070
1071 zygote_mod_union_table_->MarkReferences();
1072 timings.AddSplit("ZygoteMarkReferences");
1073 }
1074
1075 // Processes the cards we cleared earlier and adds their objects into the mod-union table.
1076 mod_union_table_->Update();
1077 timings.AddSplit("UpdateModUnionTable");
1078
1079 // Scans all objects in the mod-union table.
1080 mod_union_table_->MarkReferences();
1081 timings.AddSplit("MarkImageToAllocSpaceReferences");
1082}
1083
1084void Heap::RootMatchesObjectVisitor(const Object* root, void* arg) {
1085 Object* obj = reinterpret_cast<Object*>(arg);
1086 if (root == obj) {
1087 LOG(INFO) << "Object " << obj << " is a root";
1088 }
1089}
1090
1091class ScanVisitor {
1092 public:
1093 void operator ()(const Object* obj) const {
1094 LOG(INFO) << "Would have rescanned object " << obj;
1095 }
1096};
1097
1098class VerifyReferenceVisitor {
1099 public:
1100 VerifyReferenceVisitor(Heap* heap, bool* failed)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001101 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
1102 Locks::heap_bitmap_lock_)
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001103 : heap_(heap),
1104 failed_(failed) {
1105 }
1106
1107 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
1108 // analysis.
1109 void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
1110 bool /* is_static */) const NO_THREAD_SAFETY_ANALYSIS {
1111 // Verify that the reference is live.
1112 if (ref != NULL && !IsLive(ref)) {
1113 CardTable* card_table = heap_->GetCardTable();
1114 MarkStack* alloc_stack = heap_->allocation_stack_.get();
1115 MarkStack* live_stack = heap_->live_stack_.get();
1116
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001117 byte* card_addr = card_table->CardFromAddr(obj);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001118 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " on IsDirty = "
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001119 << (*card_addr == GC_CARD_DIRTY);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001120 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
1121 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
1122 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001123 void* cover_begin = card_table->AddrFromCard(card_addr);
1124 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
1125 GC_CARD_SIZE);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001126 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001127 << "-" << cover_end;
1128 SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetSpaceBitmap(obj);
1129
1130 // Print out how the object is live.
1131 if (bitmap->Test(obj)) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001132 LOG(ERROR) << "Object " << obj << " found in live bitmap";
1133 }
1134
1135 if (std::binary_search(alloc_stack->Begin(), alloc_stack->End(), obj)) {
1136 LOG(ERROR) << "Object " << obj << " found in allocation stack";
1137 }
1138
1139 if (std::binary_search(live_stack->Begin(), live_stack->End(), obj)) {
1140 LOG(ERROR) << "Object " << obj << " found in live stack";
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001141 }
1142
1143 if (std::binary_search(live_stack->Begin(), live_stack->End(), ref)) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001144 LOG(ERROR) << "Reference " << ref << " found in live stack!";
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001145 }
1146
1147 // Attempt to see if the card table missed the reference.
1148 ScanVisitor scan_visitor;
1149 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
1150 card_table->Scan(bitmap, byte_cover_begin, byte_cover_begin + GC_CARD_SIZE, scan_visitor,
1151 IdentityFunctor());
1152
1153 // Try and see if a mark sweep collector scans the reference.
1154 MarkStack* mark_stack = heap_->mark_stack_.get();
1155 MarkSweep ms(mark_stack);
1156 ms.Init();
1157 mark_stack->Reset();
1158 ms.SetFinger(reinterpret_cast<Object*>(~size_t(0)));
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001159
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001160 // All the references should end up in the mark stack.
1161 ms.ScanRoot(obj);
1162 if (std::find(mark_stack->Begin(), mark_stack->End(), ref)) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001163 LOG(ERROR) << "Ref found in the mark_stack when rescanning the object!";
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001164 } else {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001165 LOG(ERROR) << "Dumping mark stack contents";
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001166 for (Object** it = mark_stack->Begin(); it != mark_stack->End(); ++it) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001167 LOG(ERROR) << *it;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001168 }
1169 }
1170 mark_stack->Reset();
1171
1172 // Search to see if any of the roots reference our object.
1173 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
1174 Runtime::Current()->VisitRoots(&Heap::RootMatchesObjectVisitor, arg);
1175 *failed_ = true;
1176 }
1177 }
1178
1179 bool IsLive(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
1180 SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetSpaceBitmap(obj);
1181 if (bitmap != NULL) {
1182 if (bitmap->Test(obj)) {
1183 return true;
1184 }
1185 } else {
1186 heap_->DumpSpaces();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001187 LOG(ERROR) << "Object " << obj << " not found in any spaces";
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001188 }
1189 MarkStack* alloc_stack = heap_->allocation_stack_.get();
1190 // At this point we need to search the allocation since things in the live stack may get swept.
1191 if (std::binary_search(alloc_stack->Begin(), alloc_stack->End(), const_cast<Object*>(obj))) {
1192 return true;
1193 }
1194 // Not either in the live bitmap or allocation stack, so the object must be dead.
1195 return false;
1196 }
1197
1198 private:
1199 Heap* heap_;
1200 bool* failed_;
1201};
1202
1203class VerifyObjectVisitor {
1204 public:
1205 VerifyObjectVisitor(Heap* heap)
1206 : heap_(heap),
1207 failed_(false) {
1208
1209 }
1210
1211 void operator ()(const Object* obj) const
Ian Rogersb726dcb2012-09-05 08:57:23 -07001212 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001213 VerifyReferenceVisitor visitor(heap_, const_cast<bool*>(&failed_));
1214 MarkSweep::VisitObjectReferences(obj, visitor);
1215 }
1216
1217 bool Failed() const {
1218 return failed_;
1219 }
1220
1221 private:
1222 Heap* heap_;
1223 bool failed_;
1224};
1225
1226// Must do this with mutators suspended since we are directly accessing the allocation stacks.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001227bool Heap::VerifyHeapReferences() {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001228 Locks::mutator_lock_->AssertExclusiveHeld();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001229 // Lets sort our allocation stacks so that we can efficiently binary search them.
1230 std::sort(allocation_stack_->Begin(), allocation_stack_->End());
1231 std::sort(live_stack_->Begin(), live_stack_->End());
1232 // Perform the verification.
1233 VerifyObjectVisitor visitor(this);
1234 GetLiveBitmap()->Visit(visitor);
1235 // We don't want to verify the objects in the allocation stack since they themselves may be
1236 // pointing to dead objects if they are not reachable.
1237 if (visitor.Failed()) {
1238 DumpSpaces();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001239 return false;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001240 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001241 return true;
1242}
1243
1244class VerifyReferenceCardVisitor {
1245 public:
1246 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
1247 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
1248 Locks::heap_bitmap_lock_)
1249 : heap_(heap),
1250 failed_(failed) {
1251 }
1252
1253 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
1254 // analysis.
1255 void operator ()(const Object* obj, const Object* ref, const MemberOffset& offset,
1256 bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
1257 if (ref != NULL) {
1258 CardTable* card_table = heap_->GetCardTable();
1259 // If the object is not dirty and it is referencing something in the live stack other than
1260 // class, then it must be on a dirty card.
1261 if (!card_table->IsDirty(obj)) {
1262 MarkStack* live_stack = heap_->live_stack_.get();
1263 if (std::binary_search(live_stack->Begin(), live_stack->End(), ref) && !ref->IsClass()) {
1264 if (std::binary_search(live_stack->Begin(), live_stack->End(), obj)) {
1265 LOG(ERROR) << "Object " << obj << " found in live stack";
1266 }
1267 if (heap_->GetLiveBitmap()->Test(obj)) {
1268 LOG(ERROR) << "Object " << obj << " found in live bitmap";
1269 }
1270 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
1271 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
1272
1273 // Print which field of the object is dead.
1274 if (!obj->IsObjectArray()) {
1275 const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1276 CHECK(klass != NULL);
1277 const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
1278 CHECK(fields != NULL);
1279 for (int32_t i = 0; i < fields->GetLength(); ++i) {
1280 const Field* cur = fields->Get(i);
1281 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1282 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
1283 << PrettyField(cur);
1284 break;
1285 }
1286 }
1287 } else {
1288 const ObjectArray<Object>* object_array = obj->AsObjectArray<Object>();
1289 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
1290 if (object_array->Get(i) == ref) {
1291 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
1292 }
1293 }
1294 }
1295
1296 *failed_ = true;
1297 }
1298 }
1299 }
1300 }
1301
1302 private:
1303 Heap* heap_;
1304 bool* failed_;
1305};
1306
1307class VerifyLiveStackReferences {
1308 public:
1309 VerifyLiveStackReferences(Heap* heap)
1310 : heap_(heap),
1311 failed_(false) {
1312
1313 }
1314
1315 void operator ()(const Object* obj) const
1316 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1317 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
1318 MarkSweep::VisitObjectReferences(obj, visitor);
1319 }
1320
1321 bool Failed() const {
1322 return failed_;
1323 }
1324
1325 private:
1326 Heap* heap_;
1327 bool failed_;
1328};
1329
1330bool Heap::VerifyMissingCardMarks() {
1331 Locks::mutator_lock_->AssertExclusiveHeld();
1332
1333 VerifyLiveStackReferences visitor(this);
1334 GetLiveBitmap()->Visit(visitor);
1335
1336 // We can verify objects in the live stack since none of these should reference dead objects.
1337 for (Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
1338 visitor(*it);
1339 }
1340
1341 if (visitor.Failed()) {
1342 DumpSpaces();
1343 return false;
1344 }
1345 return true;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001346}
1347
1348void Heap::SwapBitmaps() {
1349 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
1350 // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations
1351 // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark bit
1352 // instead, resulting in no new allocated objects being incorrectly freed by sweep.
Ian Rogersb726dcb2012-09-05 08:57:23 -07001353 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001354 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
1355 Space* space = *it;
1356 // We never allocate into zygote spaces.
1357 if (space->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
1358 live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap());
1359 mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap());
1360 space->AsAllocSpace()->SwapBitmaps();
1361 }
1362 }
1363}
1364
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001365void Heap::SwapStacks() {
1366 MarkStack* temp = allocation_stack_.release();
1367 allocation_stack_.reset(live_stack_.release());
1368 live_stack_.reset(temp);
1369
1370 // Sort the live stack so that we can quickly binary search it later.
1371 if (VERIFY_OBJECT_ENABLED) {
1372 std::sort(live_stack_->Begin(), live_stack_->End());
1373 }
1374}
1375
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001376void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft_references) {
1377 TimingLogger timings("ConcurrentCollectGarbageInternal", true);
1378 uint64_t root_begin = NanoTime(), root_end = 0, dirty_begin = 0, dirty_end = 0;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001379 std::stringstream gc_type_str;
1380 gc_type_str << gc_type << " ";
Mathieu Chartiera6399032012-06-11 18:49:50 -07001381
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001382 // Suspend all threads are get exclusive access to the heap.
1383 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1384 thread_list->SuspendAll();
1385 timings.AddSplit("SuspendAll");
Ian Rogersb726dcb2012-09-05 08:57:23 -07001386 Locks::mutator_lock_->AssertExclusiveHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001387
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001388 size_t bytes_freed = 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001389 Object* cleared_references = NULL;
1390 {
1391 MarkSweep mark_sweep(mark_stack_.get());
1392 timings.AddSplit("ctor");
1393
1394 mark_sweep.Init();
1395 timings.AddSplit("Init");
1396
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001397 if (verify_pre_gc_heap_) {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001398 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001399 if (!VerifyHeapReferences()) {
1400 LOG(FATAL) << "Pre " << gc_type_str.str() << "Gc verification failed";
1401 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001402 timings.AddSplit("VerifyHeapReferencesPreGC");
1403 }
1404
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001405 // Swap the stacks, this is safe since all the mutators are suspended at this point.
1406 SwapStacks();
1407
1408 // Check that all objects which reference things in the live stack are on dirty cards.
1409 if (verify_missing_card_marks_) {
1410 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
1411 // Sort the live stack so that we can quickly binary search it later.
1412 std::sort(live_stack_->Begin(), live_stack_->End());
1413 if (!VerifyMissingCardMarks()) {
1414 LOG(FATAL) << "Pre GC verification of missing card marks failed";
1415 }
1416 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001417
1418 // We will need to know which cards were dirty for doing concurrent processing of dirty cards.
1419 // TODO: Investigate using a mark stack instead of a vector.
1420 std::vector<byte*> dirty_cards;
Mathieu Chartier0325e622012-09-05 14:22:51 -07001421 if (gc_type == kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001422 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
1423 card_table_->GetDirtyCards(*it, dirty_cards);
1424 }
1425 }
1426
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001427 // Make sure that the tables have the correct pointer for the mark sweep.
1428 mod_union_table_->Init(&mark_sweep);
1429 zygote_mod_union_table_->Init(&mark_sweep);
1430
1431 // Clear image space cards and keep track of cards we cleared in the mod-union table.
1432 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
1433 Space* space = *it;
1434 if (space->IsImageSpace()) {
1435 mod_union_table_->ClearCards(*it);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001436 timings.AddSplit("ModUnionClearCards");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001437 } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
1438 zygote_mod_union_table_->ClearCards(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001439 timings.AddSplit("ZygoteModUnionClearCards");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001440 } else {
1441 card_table_->ClearSpaceCards(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001442 timings.AddSplit("ClearCards");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001443 }
1444 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001445
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001446 {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001447 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001448
Mathieu Chartier0325e622012-09-05 14:22:51 -07001449 if (gc_type == kGcTypePartial) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001450 // Copy the mark bits over from the live bits, do this as early as possible or else we can
1451 // accidentally un-mark roots.
1452 // Needed for scanning dirty objects.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001453 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001454 if ((*it)->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
1455 mark_sweep.CopyMarkBits(*it);
1456 }
1457 }
1458 timings.AddSplit("CopyMarkBits");
1459 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
Mathieu Chartier0325e622012-09-05 14:22:51 -07001460 } else if (gc_type == kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001461 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001462 if ((*it)->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
1463 mark_sweep.CopyMarkBits(*it);
1464 }
1465 }
1466 timings.AddSplit("CopyMarkBits");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001467 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
1468 }
1469
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001470 // Marking roots is not necessary for sticky mark bits since we only actually require the
1471 // remarking of roots.
Mathieu Chartier0325e622012-09-05 14:22:51 -07001472 if (gc_type != kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001473 mark_sweep.MarkRoots();
1474 timings.AddSplit("MarkRoots");
1475 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001476
1477 if (verify_mod_union_table_) {
1478 zygote_mod_union_table_->Update();
1479 zygote_mod_union_table_->Verify();
1480 mod_union_table_->Update();
1481 mod_union_table_->Verify();
1482 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001483 }
1484
1485 // Roots are marked on the bitmap and the mark_stack is empty.
1486 DCHECK(mark_sweep.IsMarkStackEmpty());
1487
1488 // Allow mutators to go again, acquire share on mutator_lock_ to continue.
1489 thread_list->ResumeAll();
1490 {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001491 ReaderMutexLock reader_lock(*Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001492 root_end = NanoTime();
1493 timings.AddSplit("RootEnd");
1494
Ian Rogersb726dcb2012-09-05 08:57:23 -07001495 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001496 UpdateAndMarkModUnion(timings, gc_type);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001497
1498 // Mark everything as live so that sweeping system weak works correctly for sticky mark bit
1499 // GCs.
1500 MarkAllocStack(alloc_space_->GetLiveBitmap(), live_stack_.get());
1501 timings.AddSplit("MarkStackAsLive");
1502
Mathieu Chartier0325e622012-09-05 14:22:51 -07001503 if (gc_type != kGcTypeSticky) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001504 // Recursively mark all the non-image bits set in the mark bitmap.
Mathieu Chartier0325e622012-09-05 14:22:51 -07001505 mark_sweep.RecursiveMark(gc_type == kGcTypePartial, timings);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001506 } else {
1507 mark_sweep.RecursiveMarkCards(card_table_.get(), dirty_cards, timings);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001508 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001509 mark_sweep.DisableFinger();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001510 }
1511 // Release share on mutator_lock_ and then get exclusive access.
1512 dirty_begin = NanoTime();
1513 thread_list->SuspendAll();
1514 timings.AddSplit("ReSuspend");
Ian Rogersb726dcb2012-09-05 08:57:23 -07001515 Locks::mutator_lock_->AssertExclusiveHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001516
1517 {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001518 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001519
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001520 // Re-mark root set.
1521 mark_sweep.ReMarkRoots();
1522 timings.AddSplit("ReMarkRoots");
1523
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001524 if (verify_missing_card_marks_) {
1525 // Since verify missing card marks uses a sweep array to empty the allocation stack, we
1526 // need to make sure that we don't free weaks which wont get swept by SweepSystemWeaks.
1527 MarkAllocStack(alloc_space_->GetLiveBitmap(), allocation_stack_.get());
1528 }
1529
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001530 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001531 mark_sweep.RecursiveMarkDirtyObjects(false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001532 timings.AddSplit("RecursiveMarkDirtyObjects");
1533 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001534
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001535 {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001536 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001537
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001538 mark_sweep.ProcessReferences(clear_soft_references);
1539 timings.AddSplit("ProcessReferences");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001540
1541 // This doesn't work with mutators unpaused for some reason, TODO: Fix.
1542 mark_sweep.SweepSystemWeaks(false);
1543 timings.AddSplit("SweepSystemWeaks");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001544 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001545
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001546 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
1547 // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations
1548 // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark
1549 // bit instead, resulting in no new allocated objects being incorrectly freed by sweep.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001550 const bool swap = true;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001551 if (swap) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001552 SwapBitmaps();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001553 }
1554
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001555 // Only need to do this if we have the card mark verification on, and only during concurrent GC.
1556 if (verify_missing_card_marks_) {
1557 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
1558 mark_sweep.SweepArray(timings, allocation_stack_.get(), swap);
1559 } else {
1560 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
1561 // We only sweep over the live stack, and the live stack should not intersect with the
1562 // allocation stack, so it should be safe to UnMark anything in the allocation stack as live.
1563 UnMarkAllocStack(alloc_space_->GetLiveBitmap(), allocation_stack_.get());
1564 timings.AddSplit("UnMarkAllocStack");
1565 }
1566
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001567 if (kIsDebugBuild) {
1568 // Verify that we only reach marked objects from the image space.
Ian Rogersb726dcb2012-09-05 08:57:23 -07001569 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001570 mark_sweep.VerifyImageRoots();
1571 timings.AddSplit("VerifyImageRoots");
1572 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001573
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001574 if (verify_post_gc_heap_) {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001575 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001576 if (!VerifyHeapReferences()) {
1577 LOG(FATAL) << "Post " << gc_type_str.str() << "Gc verification failed";
1578 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001579 timings.AddSplit("VerifyHeapReferencesPostGC");
1580 }
1581
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001582 thread_list->ResumeAll();
1583 dirty_end = NanoTime();
Ian Rogersb726dcb2012-09-05 08:57:23 -07001584 Locks::mutator_lock_->AssertNotHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001585
1586 {
1587 // TODO: this lock shouldn't be necessary (it's why we did the bitmap flip above).
Ian Rogersb726dcb2012-09-05 08:57:23 -07001588 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier0325e622012-09-05 14:22:51 -07001589 if (gc_type != kGcTypeSticky) {
1590 mark_sweep.Sweep(gc_type == kGcTypePartial, swap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001591 } else {
1592 mark_sweep.SweepArray(timings, live_stack_.get(), swap);
1593 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001594 live_stack_->Reset();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001595 timings.AddSplit("Sweep");
1596 }
1597
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001598 if (verify_system_weaks_) {
1599 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
1600 mark_sweep.VerifySystemWeaks();
1601 timings.AddSplit("VerifySystemWeaks");
1602 }
1603
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001604 cleared_references = mark_sweep.GetClearedReferences();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001605 bytes_freed = mark_sweep.GetFreedBytes();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001606 }
1607
1608 GrowForUtilization();
1609 timings.AddSplit("GrowForUtilization");
1610
1611 EnqueueClearedReferences(&cleared_references);
1612 RequestHeapTrim();
1613 timings.AddSplit("Finish");
1614
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001615 // If the GC was slow, then print timings in the log.
1616 uint64_t pause_roots = (root_end - root_begin) / 1000 * 1000;
1617 uint64_t pause_dirty = (dirty_end - dirty_begin) / 1000 * 1000;
Mathieu Chartier637e3482012-08-17 10:41:32 -07001618 uint64_t duration = (NanoTime() - root_begin) / 1000 * 1000;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001619 if (pause_roots > MsToNs(5) || pause_dirty > MsToNs(5)) {
Mathieu Chartier637e3482012-08-17 10:41:32 -07001620 const size_t percent_free = GetPercentFree();
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001621 const size_t current_heap_size = GetUsedMemorySize();
Mathieu Chartier637e3482012-08-17 10:41:32 -07001622 const size_t total_memory = GetTotalMemory();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001623 LOG(INFO) << gc_type_str.str()
Mathieu Chartier637e3482012-08-17 10:41:32 -07001624 << "Concurrent GC freed " << PrettySize(bytes_freed) << ", " << percent_free
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001625 << "% free, " << PrettySize(current_heap_size) << "/"
Mathieu Chartier637e3482012-08-17 10:41:32 -07001626 << PrettySize(total_memory) << ", " << "paused " << PrettyDuration(pause_roots)
1627 << "+" << PrettyDuration(pause_dirty) << " total " << PrettyDuration(duration);
Mathieu Chartier0325e622012-09-05 14:22:51 -07001628
1629 if (VLOG_IS_ON(heap)) {
1630 timings.Dump();
1631 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001632 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001633
Mathieu Chartier0325e622012-09-05 14:22:51 -07001634 CumulativeLogger* logger = cumulative_timings_.Get(gc_type);
1635 logger->Start();
1636 logger->AddLogger(timings);
1637 logger->End(); // Next iteration.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001638}
1639
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001640GcType Heap::WaitForConcurrentGcToComplete() {
1641 GcType last_gc_type = kGcTypeNone;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001642 if (concurrent_gc_) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001643 bool do_wait;
1644 uint64_t wait_start = NanoTime();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001645 {
1646 // Check if GC is running holding gc_complete_lock_.
1647 MutexLock mu(*gc_complete_lock_);
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001648 do_wait = is_gc_running_;
Mathieu Chartiera6399032012-06-11 18:49:50 -07001649 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001650 if (do_wait) {
1651 // We must wait, change thread state then sleep on gc_complete_cond_;
1652 ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete);
1653 {
1654 MutexLock mu(*gc_complete_lock_);
1655 while (is_gc_running_) {
1656 gc_complete_cond_->Wait(*gc_complete_lock_);
1657 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001658 last_gc_type = last_gc_type_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001659 }
1660 uint64_t wait_time = NanoTime() - wait_start;
1661 if (wait_time > MsToNs(5)) {
1662 LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time);
1663 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001664 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001665 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001666 return last_gc_type;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001667}
1668
Elliott Hughesc967f782012-04-16 10:23:15 -07001669void Heap::DumpForSigQuit(std::ostream& os) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001670 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(num_bytes_allocated_) << "/"
1671 << PrettySize(GetTotalMemory()) << "; " << num_objects_allocated_ << " objects\n";
Mathieu Chartier0325e622012-09-05 14:22:51 -07001672 // Dump cumulative timings.
1673 LOG(INFO) << "Dumping cumulative Gc timings";
1674 for (CumulativeTimings::iterator it = cumulative_timings_.begin();
1675 it != cumulative_timings_.end(); ++it) {
1676 it->second->Dump();
1677 }
Elliott Hughesc967f782012-04-16 10:23:15 -07001678}
1679
1680size_t Heap::GetPercentFree() {
1681 size_t total = GetTotalMemory();
1682 return 100 - static_cast<size_t>(100.0f * static_cast<float>(num_bytes_allocated_) / total);
1683}
1684
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001685void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001686 AllocSpace* alloc_space = alloc_space_;
1687 // TODO: Behavior for multiple alloc spaces?
1688 size_t alloc_space_capacity = alloc_space->Capacity();
1689 if (max_allowed_footprint > alloc_space_capacity) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001690 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
1691 << PrettySize(alloc_space_capacity);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001692 max_allowed_footprint = alloc_space_capacity;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001693 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001694 alloc_space->SetFootprintLimit(max_allowed_footprint);
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001695}
1696
Ian Rogers3bb17a62012-01-27 23:56:44 -08001697// kHeapIdealFree is the ideal maximum free size, when we grow the heap for utilization.
Shih-wei Liao7f1caab2011-10-06 12:11:04 -07001698static const size_t kHeapIdealFree = 2 * MB;
Ian Rogers3bb17a62012-01-27 23:56:44 -08001699// kHeapMinFree guarantees that you always have at least 512 KB free, when you grow for utilization,
1700// regardless of target utilization ratio.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001701static const size_t kHeapMinFree = kHeapIdealFree / 4;
1702
Carl Shapiro69759ea2011-07-21 18:13:35 -07001703void Heap::GrowForUtilization() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001704 size_t target_size;
1705 bool use_footprint_limit = false;
1706 {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001707 // We know what our utilization is at this moment.
1708 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
1709 target_size = num_bytes_allocated_ / Heap::GetTargetHeapUtilization();
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001710
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001711 if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
1712 target_size = num_bytes_allocated_ + kHeapIdealFree;
1713 } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
1714 target_size = num_bytes_allocated_ + kHeapMinFree;
1715 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001716
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001717 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001718 if (GetTotalMemory() - GetUsedMemorySize() < concurrent_min_free_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001719 // Not enough free memory to perform concurrent GC.
1720 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
1721 } else {
1722 // Compute below to avoid holding both the statistics and the alloc space lock
1723 use_footprint_limit = true;
1724 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001725 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001726
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001727 if (use_footprint_limit) {
1728 size_t foot_print_limit = alloc_space_->GetFootprintLimit();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001729 concurrent_start_bytes_ = foot_print_limit - concurrent_start_size_;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001730 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001731 SetIdealFootprint(target_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001732}
1733
jeffhaoc1160702011-10-27 15:48:45 -07001734void Heap::ClearGrowthLimit() {
jeffhaoc1160702011-10-27 15:48:45 -07001735 WaitForConcurrentGcToComplete();
jeffhaoc1160702011-10-27 15:48:45 -07001736 alloc_space_->ClearGrowthLimit();
1737}
1738
Elliott Hughesadb460d2011-10-05 17:02:34 -07001739void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001740 MemberOffset reference_queue_offset,
1741 MemberOffset reference_queueNext_offset,
1742 MemberOffset reference_pendingNext_offset,
1743 MemberOffset finalizer_reference_zombie_offset) {
Elliott Hughesadb460d2011-10-05 17:02:34 -07001744 reference_referent_offset_ = reference_referent_offset;
1745 reference_queue_offset_ = reference_queue_offset;
1746 reference_queueNext_offset_ = reference_queueNext_offset;
1747 reference_pendingNext_offset_ = reference_pendingNext_offset;
1748 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
1749 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1750 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
1751 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
1752 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
1753 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
1754}
1755
1756Object* Heap::GetReferenceReferent(Object* reference) {
1757 DCHECK(reference != NULL);
1758 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1759 return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
1760}
1761
1762void Heap::ClearReferenceReferent(Object* reference) {
1763 DCHECK(reference != NULL);
1764 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1765 reference->SetFieldObject(reference_referent_offset_, NULL, true);
1766}
1767
1768// Returns true if the reference object has not yet been enqueued.
1769bool Heap::IsEnqueuable(const Object* ref) {
1770 DCHECK(ref != NULL);
1771 const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
1772 const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
1773 return (queue != NULL) && (queue_next == NULL);
1774}
1775
1776void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
1777 DCHECK(ref != NULL);
1778 CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
1779 CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
1780 EnqueuePendingReference(ref, cleared_reference_list);
1781}
1782
1783void Heap::EnqueuePendingReference(Object* ref, Object** list) {
1784 DCHECK(ref != NULL);
1785 DCHECK(list != NULL);
1786
1787 if (*list == NULL) {
1788 ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
1789 *list = ref;
1790 } else {
1791 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1792 ref->SetFieldObject(reference_pendingNext_offset_, head, false);
1793 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
1794 }
1795}
1796
1797Object* Heap::DequeuePendingReference(Object** list) {
1798 DCHECK(list != NULL);
1799 DCHECK(*list != NULL);
1800 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1801 Object* ref;
1802 if (*list == head) {
1803 ref = *list;
1804 *list = NULL;
1805 } else {
1806 Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1807 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
1808 ref = head;
1809 }
1810 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
1811 return ref;
1812}
1813
Ian Rogers5d4bdc22011-11-02 22:15:43 -07001814void Heap::AddFinalizerReference(Thread* self, Object* object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001815 ScopedObjectAccess soa(self);
Elliott Hughes77405792012-03-15 15:22:12 -07001816 JValue args[1];
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07001817 args[0].SetL(object);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001818 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, NULL, args,
1819 NULL);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001820}
1821
1822size_t Heap::GetBytesAllocated() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001823 return num_bytes_allocated_;
1824}
1825
1826size_t Heap::GetObjectsAllocated() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001827 return num_objects_allocated_;
1828}
1829
1830size_t Heap::GetConcurrentStartSize() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001831 return concurrent_start_size_;
1832}
1833
1834size_t Heap::GetConcurrentMinFree() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001835 return concurrent_min_free_;
Elliott Hughesadb460d2011-10-05 17:02:34 -07001836}
1837
1838void Heap::EnqueueClearedReferences(Object** cleared) {
1839 DCHECK(cleared != NULL);
1840 if (*cleared != NULL) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001841 ScopedObjectAccess soa(Thread::Current());
Elliott Hughes77405792012-03-15 15:22:12 -07001842 JValue args[1];
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07001843 args[0].SetL(*cleared);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001844 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(), NULL,
1845 args, NULL);
Elliott Hughesadb460d2011-10-05 17:02:34 -07001846 *cleared = NULL;
1847 }
1848}
1849
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001850void Heap::RequestConcurrentGC() {
Mathieu Chartier069387a2012-06-18 12:01:01 -07001851 // Make sure that we can do a concurrent GC.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001852 if (requesting_gc_ || !Runtime::Current()->IsFinishedStarting() ||
1853 Runtime::Current()->IsShuttingDown() || !Runtime::Current()->IsConcurrentGcEnabled()) {
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001854 return;
1855 }
1856
1857 requesting_gc_ = true;
1858 JNIEnv* env = Thread::Current()->GetJniEnv();
Mathieu Chartiera6399032012-06-11 18:49:50 -07001859 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
1860 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001861 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
1862 WellKnownClasses::java_lang_Daemons_requestGC);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001863 CHECK(!env->ExceptionCheck());
1864 requesting_gc_ = false;
1865}
1866
1867void Heap::ConcurrentGC() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001868 if (Runtime::Current()->IsShuttingDown() || !concurrent_gc_) {
Mathieu Chartier2542d662012-06-21 17:14:11 -07001869 return;
1870 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001871
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001872 // TODO: We shouldn't need a WaitForConcurrentGcToComplete here since only
1873 // concurrent GC resumes threads before the GC is completed and this function
1874 // is only called within the GC daemon thread.
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001875 if (WaitForConcurrentGcToComplete() == kGcTypeNone) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001876 // Start a concurrent GC as one wasn't in progress
1877 ScopedThreadStateChange tsc(Thread::Current(), kWaitingPerformingGc);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001878 if (alloc_space_->Size() > min_alloc_space_size_for_sticky_gc_) {
Mathieu Chartier0325e622012-09-05 14:22:51 -07001879 CollectGarbageInternal(kGcTypeSticky, false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001880 } else {
Mathieu Chartier0325e622012-09-05 14:22:51 -07001881 CollectGarbageInternal(kGcTypePartial, false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001882 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001883 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001884}
1885
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001886void Heap::Trim() {
Mathieu Chartiera6399032012-06-11 18:49:50 -07001887 WaitForConcurrentGcToComplete();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001888 alloc_space_->Trim();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001889}
1890
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001891void Heap::RequestHeapTrim() {
1892 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
1893 // because that only marks object heads, so a large array looks like lots of empty space. We
1894 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
1895 // to utilization (which is probably inversely proportional to how much benefit we can expect).
1896 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
1897 // not how much use we're making of those pages.
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001898 uint64_t ms_time = NsToMs(NanoTime());
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001899 {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001900 float utilization = static_cast<float>(num_bytes_allocated_) / alloc_space_->Size();
1901 if ((utilization > 0.75f) || ((ms_time - last_trim_time_) < 2 * 1000)) {
1902 // Don't bother trimming the heap if it's more than 75% utilized, or if a
1903 // heap trim occurred in the last two seconds.
1904 return;
1905 }
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001906 }
Mathieu Chartiera6399032012-06-11 18:49:50 -07001907 if (!Runtime::Current()->IsFinishedStarting() || Runtime::Current()->IsShuttingDown()) {
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001908 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
Mathieu Chartiera6399032012-06-11 18:49:50 -07001909 // Also: we do not wish to start a heap trim if the runtime is shutting down.
Ian Rogerse1d490c2012-02-03 09:09:07 -08001910 return;
1911 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001912 last_trim_time_ = ms_time;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001913 JNIEnv* env = Thread::Current()->GetJniEnv();
Mathieu Chartiera6399032012-06-11 18:49:50 -07001914 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
1915 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001916 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
1917 WellKnownClasses::java_lang_Daemons_requestHeapTrim);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001918 CHECK(!env->ExceptionCheck());
1919}
1920
Carl Shapiro69759ea2011-07-21 18:13:35 -07001921} // namespace art