blob: fa0d05b5bc0ec1a8374140dd89c2cf5e8e8ea102 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Brian Carlstrom5643b782012-02-05 12:32:53 -080019#include <sys/types.h>
20#include <sys/wait.h>
21
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
24
Mathieu Chartier637e3482012-08-17 10:41:32 -070025#include "atomic.h"
Ian Rogers5d76c432011-10-31 21:42:49 -070026#include "card_table.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070027#include "debugger.h"
Mathieu Chartiercc236d72012-07-20 10:29:05 -070028#include "heap_bitmap.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070029#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070030#include "mark_sweep.h"
Mathieu Chartierb43b7d42012-06-19 13:15:09 -070031#include "mod_union_table.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070032#include "object.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080033#include "object_utils.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080034#include "os.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070035#include "ScopedLocalRef.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070036#include "scoped_thread_state_change.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070037#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070038#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070039#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070040#include "timing_logger.h"
41#include "UniquePtr.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070042#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070043
44namespace art {
45
Ian Rogers30fab402012-01-23 15:43:46 -080046static void UpdateFirstAndLastSpace(Space** first_space, Space** last_space, Space* space) {
47 if (*first_space == NULL) {
48 *first_space = space;
49 *last_space = space;
50 } else {
51 if ((*first_space)->Begin() > space->Begin()) {
52 *first_space = space;
53 } else if (space->Begin() > (*last_space)->Begin()) {
54 *last_space = space;
55 }
56 }
57}
58
Elliott Hughesae80b492012-04-24 10:43:17 -070059static bool GenerateImage(const std::string& image_file_name) {
Brian Carlstroma004aa92012-02-08 18:05:09 -080060 const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
Brian Carlstrom5643b782012-02-05 12:32:53 -080061 std::vector<std::string> boot_class_path;
62 Split(boot_class_path_string, ':', boot_class_path);
Brian Carlstromb2793372012-03-17 18:27:16 -070063 if (boot_class_path.empty()) {
64 LOG(FATAL) << "Failed to generate image because no boot class path specified";
65 }
Brian Carlstrom5643b782012-02-05 12:32:53 -080066
67 std::vector<char*> arg_vector;
68
69 std::string dex2oat_string(GetAndroidRoot());
Elliott Hughes67d92002012-03-26 15:08:51 -070070 dex2oat_string += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
Brian Carlstrom5643b782012-02-05 12:32:53 -080071 const char* dex2oat = dex2oat_string.c_str();
72 arg_vector.push_back(strdup(dex2oat));
73
74 std::string image_option_string("--image=");
75 image_option_string += image_file_name;
76 const char* image_option = image_option_string.c_str();
77 arg_vector.push_back(strdup(image_option));
78
79 arg_vector.push_back(strdup("--runtime-arg"));
80 arg_vector.push_back(strdup("-Xms64m"));
81
82 arg_vector.push_back(strdup("--runtime-arg"));
83 arg_vector.push_back(strdup("-Xmx64m"));
84
85 for (size_t i = 0; i < boot_class_path.size(); i++) {
86 std::string dex_file_option_string("--dex-file=");
87 dex_file_option_string += boot_class_path[i];
88 const char* dex_file_option = dex_file_option_string.c_str();
89 arg_vector.push_back(strdup(dex_file_option));
90 }
91
92 std::string oat_file_option_string("--oat-file=");
93 oat_file_option_string += image_file_name;
94 oat_file_option_string.erase(oat_file_option_string.size() - 3);
95 oat_file_option_string += "oat";
96 const char* oat_file_option = oat_file_option_string.c_str();
97 arg_vector.push_back(strdup(oat_file_option));
98
99 arg_vector.push_back(strdup("--base=0x60000000"));
100
Elliott Hughes48436bb2012-02-07 15:23:28 -0800101 std::string command_line(Join(arg_vector, ' '));
Brian Carlstrom5643b782012-02-05 12:32:53 -0800102 LOG(INFO) << command_line;
103
Elliott Hughes48436bb2012-02-07 15:23:28 -0800104 arg_vector.push_back(NULL);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800105 char** argv = &arg_vector[0];
106
107 // fork and exec dex2oat
108 pid_t pid = fork();
109 if (pid == 0) {
110 // no allocation allowed between fork and exec
111
112 // change process groups, so we don't get reaped by ProcessManager
113 setpgid(0, 0);
114
115 execv(dex2oat, argv);
116
117 PLOG(FATAL) << "execv(" << dex2oat << ") failed";
118 return false;
119 } else {
120 STLDeleteElements(&arg_vector);
121
122 // wait for dex2oat to finish
123 int status;
124 pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
125 if (got_pid != pid) {
126 PLOG(ERROR) << "waitpid failed: wanted " << pid << ", got " << got_pid;
127 return false;
128 }
129 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
130 LOG(ERROR) << dex2oat << " failed: " << command_line;
131 return false;
132 }
133 }
134 return true;
135}
136
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800137Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700138 const std::string& original_image_file_name, bool concurrent_gc)
139 : alloc_space_(NULL),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800140 card_table_(NULL),
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700141 concurrent_gc_(concurrent_gc),
142 have_zygote_space_(false),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800143 card_marking_disabled_(false),
144 is_gc_running_(false),
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700145 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700146 concurrent_start_size_(128 * KB),
147 concurrent_min_free_(256 * KB),
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700148 sticky_gc_count_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800149 num_bytes_allocated_(0),
150 num_objects_allocated_(0),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700151 pre_gc_verify_heap_(false),
152 post_gc_verify_heap_(false),
153 verify_mod_union_table_(false),
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700154 last_trim_time_(0),
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700155 try_running_gc_(false),
156 requesting_gc_(false),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800157 reference_referent_offset_(0),
158 reference_queue_offset_(0),
159 reference_queueNext_offset_(0),
160 reference_pendingNext_offset_(0),
161 finalizer_reference_zombie_offset_(0),
162 target_utilization_(0.5),
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700163 verify_objects_(false) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800164 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800165 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700166 }
167
Ian Rogers30fab402012-01-23 15:43:46 -0800168 // Compute the bounds of all spaces for allocating live and mark bitmaps
169 // there will be at least one space (the alloc space)
170 Space* first_space = NULL;
171 Space* last_space = NULL;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700172
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700173 live_bitmap_.reset(new HeapBitmap(this));
174 mark_bitmap_.reset(new HeapBitmap(this));
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700175
Ian Rogers30fab402012-01-23 15:43:46 -0800176 // Requested begin for the alloc space, to follow the mapped image and oat files
177 byte* requested_begin = NULL;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800178 std::string image_file_name(original_image_file_name);
179 if (!image_file_name.empty()) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700180 Space* image_space = NULL;
181
Brian Carlstrom5643b782012-02-05 12:32:53 -0800182 if (OS::FileExists(image_file_name.c_str())) {
183 // If the /system file exists, it should be up-to-date, don't try to generate
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700184 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800185 } else {
186 // If the /system file didn't exist, we need to use one from the art-cache.
187 // If the cache file exists, try to open, but if it fails, regenerate.
188 // If it does not exist, generate.
189 image_file_name = GetArtCacheFilenameOrDie(image_file_name);
190 if (OS::FileExists(image_file_name.c_str())) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700191 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800192 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700193 if (image_space == NULL) {
Brian Carlstrom5643b782012-02-05 12:32:53 -0800194 if (!GenerateImage(image_file_name)) {
195 LOG(FATAL) << "Failed to generate image: " << image_file_name;
196 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700197 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800198 }
199 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700200 if (image_space == NULL) {
Brian Carlstrom223f20f2012-02-04 23:06:55 -0800201 LOG(FATAL) << "Failed to create space from " << image_file_name;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700202 }
Brian Carlstrom5643b782012-02-05 12:32:53 -0800203
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700204 AddSpace(image_space);
205 UpdateFirstAndLastSpace(&first_space, &last_space, image_space);
Ian Rogers30fab402012-01-23 15:43:46 -0800206 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
207 // isn't going to get in the middle
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700208 byte* oat_end_addr = GetImageSpace()->GetImageHeader().GetOatEnd();
209 CHECK(oat_end_addr > GetImageSpace()->End());
Ian Rogers30fab402012-01-23 15:43:46 -0800210 if (oat_end_addr > requested_begin) {
211 requested_begin = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_end_addr),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700212 kPageSize));
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700213 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700214 }
215
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700216 UniquePtr<AllocSpace> alloc_space(Space::CreateAllocSpace(
217 "alloc space", initial_size, growth_limit, capacity, requested_begin));
218 alloc_space_ = alloc_space.release();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700219 CHECK(alloc_space_ != NULL) << "Failed to create alloc space";
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700220 AddSpace(alloc_space_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700221
Ian Rogers30fab402012-01-23 15:43:46 -0800222 UpdateFirstAndLastSpace(&first_space, &last_space, alloc_space_);
223 byte* heap_begin = first_space->Begin();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800224 size_t heap_capacity = (last_space->Begin() - first_space->Begin()) + last_space->NonGrowthLimitCapacity();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700225
Ian Rogers30fab402012-01-23 15:43:46 -0800226 // Mark image objects in the live bitmap
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800227 for (size_t i = 0; i < spaces_.size(); ++i) {
Ian Rogers30fab402012-01-23 15:43:46 -0800228 Space* space = spaces_[i];
229 if (space->IsImageSpace()) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700230 space->AsImageSpace()->RecordImageAllocations(space->GetLiveBitmap());
Ian Rogers30fab402012-01-23 15:43:46 -0800231 }
232 }
233
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800234 // Allocate the card table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700235 card_table_.reset(CardTable::Create(heap_begin, heap_capacity));
236 CHECK(card_table_.get() != NULL) << "Failed to create card table";
Ian Rogers5d76c432011-10-31 21:42:49 -0700237
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700238 mod_union_table_.reset(new ModUnionTableToZygoteAllocspace<ModUnionTableReferenceCache>(this));
239 CHECK(mod_union_table_.get() != NULL) << "Failed to create mod-union table";
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700240
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700241 zygote_mod_union_table_.reset(new ModUnionTableCardCache(this));
242 CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700243
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700244 // TODO: Count objects in the image space here.
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700245 num_bytes_allocated_ = 0;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700246 num_objects_allocated_ = 0;
247
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700248 // Max stack size in bytes.
249 static const size_t max_stack_size = capacity / SpaceBitmap::kAlignment * kWordSize;
250
251 // TODO: Rename MarkStack to a more generic name?
252 mark_stack_.reset(MarkStack::Create("dalvik-mark-stack", max_stack_size));
253 allocation_stack_.reset(MarkStack::Create("dalvik-allocation-stack", max_stack_size));
254 live_stack_.reset(MarkStack::Create("dalvik-live-stack", max_stack_size));
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700255
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800256 // It's still too early to take a lock because there are no threads yet,
Elliott Hughes92b3b562011-09-08 16:32:26 -0700257 // but we can create the heap lock now. We don't create it earlier to
258 // make it clear that you can't use locks during heap initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700259 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700260 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable"));
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700261
Mathieu Chartier0325e622012-09-05 14:22:51 -0700262 // Set up the cumulative timing loggers.
263 for (size_t i = 0; i < static_cast<size_t>(kGcTypeMax); ++i) {
264 std::ostringstream name;
265 name << static_cast<GcType>(i);
266 cumulative_timings_.Put(static_cast<GcType>(i),
267 new CumulativeLogger(name.str().c_str(), true));
268 }
269
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800270 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800271 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700272 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700273}
274
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700275// Sort spaces based on begin address
276class SpaceSorter {
277 public:
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700278 bool operator ()(const Space* a, const Space* b) const {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700279 return a->Begin() < b->Begin();
280 }
281};
282
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800283void Heap::AddSpace(Space* space) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700284 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700285 DCHECK(space != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700286 DCHECK(space->GetLiveBitmap() != NULL);
287 live_bitmap_->AddSpaceBitmap(space->GetLiveBitmap());
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700288 DCHECK(space->GetMarkBitmap() != NULL);
289 mark_bitmap_->AddSpaceBitmap(space->GetMarkBitmap());
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800290 spaces_.push_back(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700291 if (space->IsAllocSpace()) {
292 alloc_space_ = space->AsAllocSpace();
293 }
294
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700295 // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger)
296 std::sort(spaces_.begin(), spaces_.end(), SpaceSorter());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700297
298 // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
299 // avoid redundant marking.
300 bool seen_zygote = false, seen_alloc = false;
301 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
302 Space* space = *it;
303 if (space->IsImageSpace()) {
304 DCHECK(!seen_zygote);
305 DCHECK(!seen_alloc);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700306 } else if (space->IsZygoteSpace()) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700307 DCHECK(!seen_alloc);
308 seen_zygote = true;
309 } else if (space->IsAllocSpace()) {
310 seen_alloc = true;
311 }
312 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800313}
314
315Heap::~Heap() {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700316 // If we don't reset then the mark stack complains in it's destructor.
317 allocation_stack_->Reset();
318 live_stack_->Reset();
319
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800320 VLOG(heap) << "~Heap()";
Elliott Hughesb3e66df2012-01-12 14:49:18 -0800321 // We can't take the heap lock here because there might be a daemon thread suspended with the
322 // heap lock held. We know though that no non-daemon threads are executing, and we know that
323 // all daemon threads are suspended, and we also know that the threads list have been deleted, so
324 // those threads can't resume. We're the only running thread, and we can do whatever we like...
Carl Shapiro58551df2011-07-24 03:09:51 -0700325 STLDeleteElements(&spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700326 delete gc_complete_lock_;
327
Mathieu Chartier0325e622012-09-05 14:22:51 -0700328 STLDeleteValues(&cumulative_timings_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700329}
330
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700331Space* Heap::FindSpaceFromObject(const Object* obj) const {
332 // TODO: C++0x auto
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700333 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
334 if ((*it)->Contains(obj)) {
335 return *it;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700336 }
337 }
338 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
339 return NULL;
340}
341
342ImageSpace* Heap::GetImageSpace() {
343 // TODO: C++0x auto
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700344 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
345 if ((*it)->IsImageSpace()) {
346 return (*it)->AsImageSpace();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700347 }
348 }
349 return NULL;
350}
351
352AllocSpace* Heap::GetAllocSpace() {
353 return alloc_space_;
354}
355
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700356static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
357 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
358
359 size_t chunk_size = static_cast<size_t>(reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start));
360 size_t chunk_free_bytes = 0;
361 if (used_bytes < chunk_size) {
362 chunk_free_bytes = chunk_size - used_bytes;
363 }
364
365 if (chunk_free_bytes > max_contiguous_allocation) {
366 max_contiguous_allocation = chunk_free_bytes;
367 }
368}
369
370Object* Heap::AllocObject(Class* c, size_t byte_count) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700371 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(Class)) ||
372 (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
373 strlen(ClassHelper(c).GetDescriptor()) == 0);
374 DCHECK_GE(byte_count, sizeof(Object));
Mathieu Chartier037813d2012-08-23 16:44:59 -0700375 Object* obj = Allocate(alloc_space_, byte_count);
376 if (LIKELY(obj != NULL)) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700377 obj->SetClass(c);
Mathieu Chartier037813d2012-08-23 16:44:59 -0700378
379 // Record allocation after since we want to use the atomic add for the atomic fence to guard
380 // the SetClass since we do not want the class to appear NULL in another thread.
381 RecordAllocation(alloc_space_, obj);
382
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700383 if (Dbg::IsAllocTrackingEnabled()) {
384 Dbg::RecordAllocation(c, byte_count);
Elliott Hughes418dfe72011-10-06 18:56:27 -0700385 }
Mathieu Chartier637e3482012-08-17 10:41:32 -0700386 const bool request_concurrent_gc = num_bytes_allocated_ >= concurrent_start_bytes_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700387 if (request_concurrent_gc) {
388 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
389 SirtRef<Object> ref(obj);
390 RequestConcurrentGC();
391 }
392 VerifyObject(obj);
393
394 // Additional verification to ensure that we did not allocate into a zygote space.
395 DCHECK(!have_zygote_space_ || !FindSpaceFromObject(obj)->IsZygoteSpace());
396
397 return obj;
398 }
Mathieu Chartier037813d2012-08-23 16:44:59 -0700399 int64_t total_bytes_free = GetFreeMemory();
400 size_t max_contiguous_allocation = 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700401 // TODO: C++0x auto
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700402 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
403 if ((*it)->IsAllocSpace()) {
404 (*it)->AsAllocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700405 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700406 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700407
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700408 std::string msg(StringPrintf("Failed to allocate a %zd-byte %s (%lld total bytes free; largest possible contiguous allocation %zd bytes)",
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700409 byte_count, PrettyDescriptor(c).c_str(), total_bytes_free, max_contiguous_allocation));
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700410 Thread::Current()->ThrowOutOfMemoryError(msg.c_str());
Elliott Hughes418dfe72011-10-06 18:56:27 -0700411 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700412}
413
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700414bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700415 // Note: we deliberately don't take the lock here, and mustn't test anything that would
416 // require taking the lock.
Elliott Hughes88c5c352012-03-15 18:49:48 -0700417 if (obj == NULL) {
418 return true;
419 }
420 if (!IsAligned<kObjectAlignment>(obj)) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700421 return false;
422 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800423 for (size_t i = 0; i < spaces_.size(); ++i) {
Ian Rogers30fab402012-01-23 15:43:46 -0800424 if (spaces_[i]->Contains(obj)) {
425 return true;
426 }
427 }
428 return false;
Elliott Hughesa2501992011-08-26 19:39:54 -0700429}
430
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700431bool Heap::IsLiveObjectLocked(const Object* obj) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700432 Locks::heap_bitmap_lock_->AssertReaderHeld();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700433 return IsHeapAddress(obj) && GetLiveBitmap()->Test(obj);
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700434}
435
Elliott Hughes3e465b12011-09-02 18:26:12 -0700436#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700437void Heap::VerifyObject(const Object* obj) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700438 if (obj == NULL || this == NULL || !verify_objects_ || Runtime::Current()->IsShuttingDown() ||
Ian Rogers141d6222012-04-05 12:23:06 -0700439 Thread::Current() == NULL ||
jeffhao25045522012-03-13 19:34:37 -0700440 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700441 return;
442 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700443 VerifyObjectBody(obj);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700444}
445#endif
446
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700447void Heap::DumpSpaces() {
448 // TODO: C++0x auto
449 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700450 Space* space = *it;
451 LOG(INFO) << *space;
452 LOG(INFO) << *space->GetLiveBitmap();
453 LOG(INFO) << *space->GetMarkBitmap();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700454 }
455}
456
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700457// We want to avoid bit rotting.
458void Heap::VerifyObjectBody(const Object* obj) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700459 if (!IsAligned<kObjectAlignment>(obj)) {
460 LOG(FATAL) << "Object isn't aligned: " << obj;
Mathieu Chartier0325e622012-09-05 14:22:51 -0700461 }
462
463 // TODO: Smarter live check here which takes into account allocation stacks.
464 //GlobalSynchronization::heap_bitmap_lock_->GetExclusiveOwnerTid()
465 if (!GetLiveBitmap()->Test(obj)) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700466 DumpSpaces();
467 LOG(FATAL) << "Object is dead: " << obj;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700468 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700469
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700470 // Ignore early dawn of the universe verifications
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700471 if (!VERIFY_OBJECT_FAST && num_objects_allocated_ > 10) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700472 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
473 Object::ClassOffset().Int32Value();
474 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
475 if (c == NULL) {
476 LOG(FATAL) << "Null class in object: " << obj;
477 } else if (!IsAligned<kObjectAlignment>(c)) {
478 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
479 } else if (!GetLiveBitmap()->Test(c)) {
480 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
481 }
482 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
483 // Note: we don't use the accessors here as they have internal sanity checks
484 // that we don't want to run
485 raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
486 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
487 raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
488 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
489 CHECK_EQ(c_c, c_c_c);
490 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700491}
492
Brian Carlstrom78128a62011-09-15 17:21:19 -0700493void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700494 DCHECK(obj != NULL);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700495 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700496}
497
498void Heap::VerifyHeap() {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700499 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700500 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700501}
502
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700503void Heap::RecordAllocation(AllocSpace* space, const Object* obj) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700504 DCHECK(obj != NULL);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700505
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700506 size_t size = space->AllocationSize(obj);
507 DCHECK_GT(size, 0u);
508 COMPILE_ASSERT(sizeof(size_t) == sizeof(int32_t),
509 int32_t_must_be_same_size_as_size_t_for_used_atomic_operations);
510 android_atomic_add(
511 size, reinterpret_cast<volatile int32_t*>(reinterpret_cast<size_t>(&num_bytes_allocated_)));
512 android_atomic_add(
513 1, reinterpret_cast<volatile int32_t*>(reinterpret_cast<size_t>(&num_objects_allocated_)));
514
515 if (Runtime::Current()->HasStatsEnabled()) {
516 RuntimeStats* global_stats = Runtime::Current()->GetStats();
517 RuntimeStats* thread_stats = Thread::Current()->GetStats();
518 ++global_stats->allocated_objects;
519 ++thread_stats->allocated_objects;
520 global_stats->allocated_bytes += size;
521 thread_stats->allocated_bytes += size;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700522 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700523
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700524 allocation_stack_->AtomicPush(obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700525}
526
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700527void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
Mathieu Chartier637e3482012-08-17 10:41:32 -0700528 COMPILE_ASSERT(sizeof(size_t) == sizeof(int32_t),
529 int32_t_must_be_same_size_as_size_t_for_used_atomic_operations);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700530 DCHECK_LE(freed_objects, num_objects_allocated_);
Mathieu Chartier637e3482012-08-17 10:41:32 -0700531 android_atomic_add(-static_cast<int32_t>(freed_objects),
Mathieu Chartier556fad32012-08-20 16:13:20 -0700532 reinterpret_cast<volatile int32_t*>(
533 reinterpret_cast<size_t>(&num_objects_allocated_)));
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700534
535 DCHECK_LE(freed_bytes, num_bytes_allocated_);
Mathieu Chartier637e3482012-08-17 10:41:32 -0700536 android_atomic_add(-static_cast<int32_t>(freed_bytes),
Mathieu Chartier556fad32012-08-20 16:13:20 -0700537 reinterpret_cast<volatile int32_t*>(
538 reinterpret_cast<size_t>(&num_bytes_allocated_)));
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700539
540 if (Runtime::Current()->HasStatsEnabled()) {
541 RuntimeStats* global_stats = Runtime::Current()->GetStats();
542 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700543 global_stats->freed_objects += freed_objects;
544 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700545 global_stats->freed_bytes += freed_bytes;
546 thread_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700547 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700548}
549
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700550Object* Heap::Allocate(AllocSpace* space, size_t alloc_size) {
551 Thread* self = Thread::Current();
Ian Rogers0399dde2012-06-06 17:09:28 -0700552 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
553 // done in the runnable state where suspension is expected.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700554#ifndef NDEBUG
555 {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700556 MutexLock mu(*Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700557 CHECK_EQ(self->GetState(), kRunnable);
558 }
559 self->AssertThreadSuspensionIsAllowable();
560#endif
Brian Carlstromb82b6872011-10-26 17:18:07 -0700561
Ian Rogers30fab402012-01-23 15:43:46 -0800562 Object* ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700563 if (ptr != NULL) {
564 return ptr;
565 }
566
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700567 // The allocation failed. If the GC is running, block until it completes else request a
568 // foreground partial collection.
569 if (!WaitForConcurrentGcToComplete()) {
570 // No concurrent GC so perform a foreground collection.
571 if (Runtime::Current()->HasStatsEnabled()) {
572 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
573 ++Thread::Current()->GetStats()->gc_for_alloc_count;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700574 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700575 self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700576 const size_t alloc_space_size = alloc_space_->Size();
577 if (alloc_space_size > kMinAllocSpaceSizeForStickyGC
578 && alloc_space_->Capacity() - alloc_space_size >= kMinRemainingSpaceForStickyGC) {
Mathieu Chartier0325e622012-09-05 14:22:51 -0700579 CollectGarbageInternal(kGcTypeSticky, false);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700580 } else {
Mathieu Chartier0325e622012-09-05 14:22:51 -0700581 CollectGarbageInternal(have_zygote_space_ ? kGcTypePartial : kGcTypeFull, false);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700582 }
583 self->TransitionFromSuspendedToRunnable();
584 } else if (have_zygote_space_) {
585 // TODO: Keep track of what kind of Gc we waited to complete and is this to figure out what Gc
586 // to do.
587 // Try a partial Gc.
588 if (Runtime::Current()->HasStatsEnabled()) {
589 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
590 ++Thread::Current()->GetStats()->gc_for_alloc_count;
591 }
592 self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700593 CollectGarbageInternal(kGcTypePartial, false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700594 self->TransitionFromSuspendedToRunnable();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700595 }
596
Ian Rogers30fab402012-01-23 15:43:46 -0800597 ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700598 if (ptr != NULL) {
599 return ptr;
600 }
601
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700602 // Partial GC didn't free enough memory, try a full GC.
603 if (Runtime::Current()->HasStatsEnabled()) {
604 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
605 ++Thread::Current()->GetStats()->gc_for_alloc_count;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700606 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700607 self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700608 CollectGarbageInternal(kGcTypeFull, false);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700609 self->TransitionFromSuspendedToRunnable();
610 ptr = space->AllocWithoutGrowth(alloc_size);
611 if (ptr != NULL) {
612 return ptr;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700613 }
614
615 // Allocations have failed after GCs; this is an exceptional state.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700616 // Try harder, growing the heap if necessary.
Ian Rogers30fab402012-01-23 15:43:46 -0800617 ptr = space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700618 if (ptr != NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800619 size_t new_footprint = space->GetFootprintLimit();
Elliott Hughes418dfe72011-10-06 18:56:27 -0700620 // OLD-TODO: may want to grow a little bit more so that the amount of
Carl Shapiro58551df2011-07-24 03:09:51 -0700621 // free space is equal to the old free space + the
622 // utilization slop for the new allocation.
Ian Rogers3bb17a62012-01-27 23:56:44 -0800623 VLOG(gc) << "Grow heap (frag case) to " << PrettySize(new_footprint)
Ian Rogers162a31c2012-01-31 16:14:31 -0800624 << " for a " << PrettySize(alloc_size) << " allocation";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700625 return ptr;
626 }
627
Elliott Hughes81ff3182012-03-23 20:35:56 -0700628 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
629 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
630 // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700631
Elliott Hughes418dfe72011-10-06 18:56:27 -0700632 // OLD-TODO: wait for the finalizers from the previous GC to finish
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700633 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
634 << " allocation";
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700635
636 if (Runtime::Current()->HasStatsEnabled()) {
637 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
638 ++Thread::Current()->GetStats()->gc_for_alloc_count;
639 }
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700640 // We don't need a WaitForConcurrentGcToComplete here either.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700641 self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700642 CollectGarbageInternal(kGcTypeFull, true);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700643 self->TransitionFromSuspendedToRunnable();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700644 return space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700645}
646
Elliott Hughesbf86d042011-08-31 17:53:14 -0700647int64_t Heap::GetMaxMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700648 size_t total = 0;
649 // TODO: C++0x auto
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700650 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
651 Space* space = *it;
652 if (space->IsAllocSpace()) {
653 total += space->AsAllocSpace()->Capacity();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700654 }
655 }
656 return total;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700657}
658
659int64_t Heap::GetTotalMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700660 return GetMaxMemory();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700661}
662
663int64_t Heap::GetFreeMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700664 return GetMaxMemory() - num_bytes_allocated_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700665}
666
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700667class InstanceCounter {
668 public:
669 InstanceCounter(Class* c, bool count_assignable)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700670 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700671 : class_(c), count_assignable_(count_assignable), count_(0) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700672
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700673 }
674
675 size_t GetCount() {
676 return count_;
677 }
678
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700679 static void Callback(Object* o, void* arg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700680 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700681 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
682 }
683
684 private:
Ian Rogersb726dcb2012-09-05 08:57:23 -0700685 void VisitInstance(Object* o) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700686 Class* instance_class = o->GetClass();
687 if (count_assignable_) {
688 if (instance_class == class_) {
689 ++count_;
690 }
691 } else {
692 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
693 ++count_;
694 }
695 }
696 }
697
698 Class* class_;
699 bool count_assignable_;
700 size_t count_;
701};
702
703int64_t Heap::CountInstances(Class* c, bool count_assignable) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700704 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700705 InstanceCounter counter(c, count_assignable);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700706 GetLiveBitmap()->Walk(InstanceCounter::Callback, &counter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700707 return counter.GetCount();
708}
709
Ian Rogers30fab402012-01-23 15:43:46 -0800710void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700711 // If we just waited for a GC to complete then we do not need to do another
712 // GC unless we clear soft references.
713 if (!WaitForConcurrentGcToComplete() || clear_soft_references) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700714 ScopedThreadStateChange tsc(Thread::Current(), kWaitingPerformingGc);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700715 CollectGarbageInternal(have_zygote_space_ ? kGcTypePartial : kGcTypeFull, clear_soft_references);
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700716 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700717}
718
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700719void Heap::PreZygoteFork() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700720 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
721 MutexLock mu(zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700722
723 // Try to see if we have any Zygote spaces.
724 if (have_zygote_space_) {
725 return;
726 }
727
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700728 VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(alloc_space_->Size());
729
730 {
731 // Flush the alloc stack.
Ian Rogersb726dcb2012-09-05 08:57:23 -0700732 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700733 FlushAllocStack();
734 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700735
736 // Replace the first alloc space we find with a zygote space.
737 // TODO: C++0x auto
738 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
739 if ((*it)->IsAllocSpace()) {
740 AllocSpace* zygote_space = (*it)->AsAllocSpace();
741
742 // Turns the current alloc space into a Zygote space and obtain the new alloc space composed
743 // of the remaining available heap memory.
744 alloc_space_ = zygote_space->CreateZygoteSpace();
745
746 // Change the GC retention policy of the zygote space to only collect when full.
747 zygote_space->SetGcRetentionPolicy(GCRP_FULL_COLLECT);
748 AddSpace(alloc_space_);
749 have_zygote_space_ = true;
750 break;
751 }
752 }
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700753
Mathieu Chartier0325e622012-09-05 14:22:51 -0700754 // Reset the cumulative loggers since we now haave a few additional timing phases.
755 // TODO: C++0x
756 for (CumulativeTimings::iterator it = cumulative_timings_.begin();
757 it != cumulative_timings_.end(); ++it) {
758 it->second->Reset();
759 }
760
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700761 // Reset this since we now count the ZygoteSpace in the total heap size.
762 num_bytes_allocated_ = 0;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700763}
764
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700765void Heap::FlushAllocStack() {
766 MarkStackAsLive(allocation_stack_.get());
767 allocation_stack_->Reset();
768}
769
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700770size_t Heap::GetUsedMemorySize() const {
771 size_t total = num_bytes_allocated_;
772 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
773 if ((*it)->IsZygoteSpace()) {
774 total += (*it)->AsAllocSpace()->Size();
775 }
776 }
777 return total;
778}
779
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700780void Heap::MarkStackAsLive(MarkStack* alloc_stack) {
781 // We can just assume everything is inside the alloc_space_'s bitmap since we should only have
782 // fresh allocations.
783 SpaceBitmap* live_bitmap = alloc_space_->GetLiveBitmap();
784
785 // Empty the allocation stack.
786 const size_t count = alloc_stack->Size();
787 for (size_t i = 0; i < count; ++i) {
788 const Object* obj = alloc_stack->Get(i);
789 DCHECK(obj != NULL);
790 live_bitmap->Set(obj);
791 }
792}
793
794void Heap::UnMarkStack(MarkStack* alloc_stack) {
795 SpaceBitmap* mark_bitmap = alloc_space_->GetMarkBitmap();
796
797 // Clear all of the things in the AllocStack.
798 size_t count = alloc_stack->Size();
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700799 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700800 const Object* obj = alloc_stack->Get(i);
801 DCHECK(obj != NULL);
802 if (mark_bitmap->Test(obj)) {
803 mark_bitmap->Clear(obj);
804 }
805 }
806}
807
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700808void Heap::UnMarkStackAsLive(MarkStack* alloc_stack) {
809 SpaceBitmap* live_bitmap = alloc_space_->GetLiveBitmap();
810
811 // Clear all of the things in the AllocStack.
812 size_t count = alloc_stack->Size();
813 for (size_t i = 0; i < count; ++i) {
814 const Object* obj = alloc_stack->Get(i);
815 DCHECK(obj != NULL);
816 if (live_bitmap->Test(obj)) {
817 live_bitmap->Clear(obj);
818 }
819 }
820}
821
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700822void Heap::CollectGarbageInternal(GcType gc_type, bool clear_soft_references) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700823 Locks::mutator_lock_->AssertNotHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700824#ifndef NDEBUG
825 {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700826 MutexLock mu(*Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700827 CHECK_EQ(Thread::Current()->GetState(), kWaitingPerformingGc);
828 }
829#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700830
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700831 // Ensure there is only one GC at a time.
832 bool start_collect = false;
833 while (!start_collect) {
834 {
835 MutexLock mu(*gc_complete_lock_);
836 if (!is_gc_running_) {
837 is_gc_running_ = true;
838 start_collect = true;
839 }
840 }
841 if (!start_collect) {
842 WaitForConcurrentGcToComplete();
843 // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
844 // Not doing at the moment to ensure soft references are cleared.
845 }
846 }
847 gc_complete_lock_->AssertNotHeld();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700848
849 // We need to do partial GCs every now and then to avoid the heap growing too much and
850 // fragmenting.
Mathieu Chartier0325e622012-09-05 14:22:51 -0700851 if (gc_type == kGcTypeSticky && ++sticky_gc_count_ > kPartialGCFrequency) {
852 gc_type = kGcTypePartial;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700853 }
Mathieu Chartier0325e622012-09-05 14:22:51 -0700854 if (gc_type != kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700855 sticky_gc_count_ = 0;
856 }
857
Mathieu Chartier637e3482012-08-17 10:41:32 -0700858 if (concurrent_gc_) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700859 CollectGarbageConcurrentMarkSweepPlan(gc_type, clear_soft_references);
860 } else {
861 CollectGarbageMarkSweepPlan(gc_type, clear_soft_references);
862 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700863
Ian Rogers15bf2d32012-08-28 17:33:04 -0700864 {
865 MutexLock mu(*gc_complete_lock_);
866 is_gc_running_ = false;
867 // Wake anyone who may have been waiting for the GC to complete.
868 gc_complete_cond_->Broadcast();
869 }
870 // Inform DDMS that a GC completed.
871 Dbg::GcDidFinish();
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700872}
Mathieu Chartiera6399032012-06-11 18:49:50 -0700873
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700874void Heap::CollectGarbageMarkSweepPlan(GcType gc_type, bool clear_soft_references) {
875 TimingLogger timings("CollectGarbageInternal", true);
Mathieu Chartier662618f2012-06-06 12:01:47 -0700876
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700877 std::stringstream gc_type_str;
878 gc_type_str << gc_type << " ";
879
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700880 // Suspend all threads are get exclusive access to the heap.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700881 uint64_t start_time = NanoTime();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700882 ThreadList* thread_list = Runtime::Current()->GetThreadList();
883 thread_list->SuspendAll();
Mathieu Chartier662618f2012-06-06 12:01:47 -0700884 timings.AddSplit("SuspendAll");
Ian Rogersb726dcb2012-09-05 08:57:23 -0700885 Locks::mutator_lock_->AssertExclusiveHeld();
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700886
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700887 size_t bytes_freed = 0;
Elliott Hughesadb460d2011-10-05 17:02:34 -0700888 Object* cleared_references = NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700889 {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700890 MarkSweep mark_sweep(mark_stack_.get());
Carl Shapiro58551df2011-07-24 03:09:51 -0700891
892 mark_sweep.Init();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700893 timings.AddSplit("Init");
Carl Shapiro58551df2011-07-24 03:09:51 -0700894
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700895 // Pre verify the heap
896 if (pre_gc_verify_heap_) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700897 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700898 VerifyHeapReferences(std::string("Pre ") + gc_type_str.str() + "Gc");
899 timings.AddSplit("VerifyHeapReferencesPreGC");
900 }
901
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700902 // Make sure that the tables have the correct pointer for the mark sweep.
903 mod_union_table_->Init(&mark_sweep);
904 zygote_mod_union_table_->Init(&mark_sweep);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700905
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700906 // Swap allocation stack and live stack, enabling us to have new allocations during this GC.
907 MarkStack* temp = allocation_stack_.release();
908 allocation_stack_.reset(live_stack_.release());
909 live_stack_.reset(temp);
910
911 // We will need to know which cards were dirty for doing concurrent processing of dirty cards.
912 // TODO: Investigate using a mark stack instead of a vector.
913 std::vector<byte*> dirty_cards;
Mathieu Chartier0325e622012-09-05 14:22:51 -0700914 if (gc_type == kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700915 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
916 card_table_->GetDirtyCards(*it, dirty_cards);
917 }
918 }
919
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700920 // Clear image space cards and keep track of cards we cleared in the mod-union table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700921 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
922 Space* space = *it;
923 if (space->IsImageSpace()) {
924 mod_union_table_->ClearCards(*it);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700925 timings.AddSplit("ClearModUnionCards");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700926 } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
927 zygote_mod_union_table_->ClearCards(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700928 timings.AddSplit("ClearZygoteCards");
929 } else {
930 card_table_->ClearSpaceCards(space);
931 timings.AddSplit("ClearCards");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700932 }
933 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700934
Ian Rogersb726dcb2012-09-05 08:57:23 -0700935 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700936 if (gc_type == kGcTypePartial) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700937 // Copy the mark bits over from the live bits, do this as early as possible or else we can
938 // accidentally un-mark roots.
939 // Needed for scanning dirty objects.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700940 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700941 if ((*it)->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
942 mark_sweep.CopyMarkBits(*it);
943 }
944 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700945 timings.AddSplit("CopyMarkBits");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700946
947 // We can assume that everything < alloc_space_ start is marked at this point.
948 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
Mathieu Chartier0325e622012-09-05 14:22:51 -0700949 } else if (gc_type == kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700950 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700951 if ((*it)->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
952 mark_sweep.CopyMarkBits(*it);
953 }
954 }
955 timings.AddSplit("CopyMarkBits");
956
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700957 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700958 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700959
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700960 MarkStackAsLive(live_stack_.get());
961
Mathieu Chartier0325e622012-09-05 14:22:51 -0700962 if (gc_type != kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700963 live_stack_->Reset();
964 }
965
Carl Shapiro58551df2011-07-24 03:09:51 -0700966 mark_sweep.MarkRoots();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700967 timings.AddSplit("MarkRoots");
Carl Shapiro58551df2011-07-24 03:09:51 -0700968
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700969 // Roots are marked on the bitmap and the mark_stack is empty.
Ian Rogers5d76c432011-10-31 21:42:49 -0700970 DCHECK(mark_sweep.IsMarkStackEmpty());
Carl Shapiro58551df2011-07-24 03:09:51 -0700971
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700972 UpdateAndMarkModUnion(timings, gc_type);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700973
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700974 if (verify_mod_union_table_) {
975 zygote_mod_union_table_->Update();
976 zygote_mod_union_table_->Verify();
977 mod_union_table_->Update();
978 mod_union_table_->Verify();
979 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700980
981 // Recursively mark all the non-image bits set in the mark bitmap.
Mathieu Chartier0325e622012-09-05 14:22:51 -0700982 if (gc_type != kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700983 live_stack_->Reset();
Mathieu Chartier0325e622012-09-05 14:22:51 -0700984 mark_sweep.RecursiveMark(gc_type == kGcTypePartial, timings);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700985 } else {
986 mark_sweep.RecursiveMarkCards(card_table_.get(), dirty_cards, timings);
987 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700988 mark_sweep.DisableFinger();
Carl Shapiro58551df2011-07-24 03:09:51 -0700989
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700990 // Need to process references the swap since it uses IsMarked.
Ian Rogers30fab402012-01-23 15:43:46 -0800991 mark_sweep.ProcessReferences(clear_soft_references);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700992 timings.AddSplit("ProcessReferences");
Carl Shapiro58551df2011-07-24 03:09:51 -0700993
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700994 // This doesn't work with mutators unpaused for some reason, TODO: Fix.
995 mark_sweep.SweepSystemWeaks(false);
996 timings.AddSplit("SweepSystemWeaks");
997
998 // Need to swap for VERIFY_OBJECT_ENABLED since we put things in the live bitmap after they
999 // have been allocated.
1000 const bool swap = true;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001001 if (swap) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001002 SwapBitmaps();
Mathieu Chartier654d3a22012-07-11 17:54:18 -07001003 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001004
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001005#ifndef NDEBUG
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001006 // Verify that we only reach marked objects from the image space
1007 mark_sweep.VerifyImageRoots();
1008 timings.AddSplit("VerifyImageRoots");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001009#endif
Carl Shapiro58551df2011-07-24 03:09:51 -07001010
Mathieu Chartier0325e622012-09-05 14:22:51 -07001011 if (gc_type != kGcTypeSticky) {
1012 mark_sweep.Sweep(gc_type == kGcTypePartial, swap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001013 } else {
1014 mark_sweep.SweepArray(timings, live_stack_.get(), swap);
1015 }
Elliott Hughes307f75d2011-10-12 18:04:40 -07001016 timings.AddSplit("Sweep");
Elliott Hughesadb460d2011-10-05 17:02:34 -07001017
1018 cleared_references = mark_sweep.GetClearedReferences();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001019 bytes_freed = mark_sweep.GetFreedBytes();
Carl Shapiro58551df2011-07-24 03:09:51 -07001020 }
1021
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001022 // Post gc verify the heap
1023 if (post_gc_verify_heap_) {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001024 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001025 VerifyHeapReferences(std::string("Post ") + gc_type_str.str() + "Gc");
1026 timings.AddSplit("VerifyHeapReferencesPostGC");
1027 }
1028
Carl Shapiro58551df2011-07-24 03:09:51 -07001029 GrowForUtilization();
Elliott Hughes307f75d2011-10-12 18:04:40 -07001030 timings.AddSplit("GrowForUtilization");
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001031
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001032 thread_list->ResumeAll();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001033 timings.AddSplit("ResumeAll");
Elliott Hughesadb460d2011-10-05 17:02:34 -07001034
1035 EnqueueClearedReferences(&cleared_references);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001036 RequestHeapTrim();
Mathieu Chartier662618f2012-06-06 12:01:47 -07001037 timings.AddSplit("Finish");
Elliott Hughes83df2ac2011-10-11 16:37:54 -07001038
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001039 // If the GC was slow, then print timings in the log.
1040 uint64_t duration = (NanoTime() - start_time) / 1000 * 1000;
1041 if (duration > MsToNs(50)) {
Mathieu Chartier637e3482012-08-17 10:41:32 -07001042 const size_t percent_free = GetPercentFree();
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001043 const size_t current_heap_size = GetUsedMemorySize();
Mathieu Chartier637e3482012-08-17 10:41:32 -07001044 const size_t total_memory = GetTotalMemory();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001045 LOG(INFO) << gc_type_str.str() << " "
Mathieu Chartier637e3482012-08-17 10:41:32 -07001046 << "GC freed " << PrettySize(bytes_freed) << ", " << percent_free << "% free, "
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001047 << PrettySize(current_heap_size) << "/" << PrettySize(total_memory) << ", "
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001048 << "paused " << PrettyDuration(duration);
Mathieu Chartier0325e622012-09-05 14:22:51 -07001049 if (VLOG_IS_ON(heap)) {
1050 timings.Dump();
1051 }
Brian Carlstrom6b4ef022011-10-23 14:59:04 -07001052 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001053
Mathieu Chartier0325e622012-09-05 14:22:51 -07001054 CumulativeLogger* logger = cumulative_timings_.Get(gc_type);
1055 logger->Start();
1056 logger->AddLogger(timings);
1057 logger->End(); // Next iteration.
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001058}
Mathieu Chartiera6399032012-06-11 18:49:50 -07001059
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001060void Heap::UpdateAndMarkModUnion(TimingLogger& timings, GcType gc_type) {
Mathieu Chartier0325e622012-09-05 14:22:51 -07001061 if (gc_type == kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001062 // Don't need to do anythign for mod union table in this case since we are only scanning dirty
1063 // cards.
1064 return;
1065 }
1066
1067 // Update zygote mod union table.
Mathieu Chartier0325e622012-09-05 14:22:51 -07001068 if (gc_type == kGcTypePartial) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001069 zygote_mod_union_table_->Update();
1070 timings.AddSplit("UpdateZygoteModUnionTable");
1071
1072 zygote_mod_union_table_->MarkReferences();
1073 timings.AddSplit("ZygoteMarkReferences");
1074 }
1075
1076 // Processes the cards we cleared earlier and adds their objects into the mod-union table.
1077 mod_union_table_->Update();
1078 timings.AddSplit("UpdateModUnionTable");
1079
1080 // Scans all objects in the mod-union table.
1081 mod_union_table_->MarkReferences();
1082 timings.AddSplit("MarkImageToAllocSpaceReferences");
1083}
1084
1085void Heap::RootMatchesObjectVisitor(const Object* root, void* arg) {
1086 Object* obj = reinterpret_cast<Object*>(arg);
1087 if (root == obj) {
1088 LOG(INFO) << "Object " << obj << " is a root";
1089 }
1090}
1091
1092class ScanVisitor {
1093 public:
1094 void operator ()(const Object* obj) const {
1095 LOG(INFO) << "Would have rescanned object " << obj;
1096 }
1097};
1098
1099class VerifyReferenceVisitor {
1100 public:
1101 VerifyReferenceVisitor(Heap* heap, bool* failed)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001102 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
1103 Locks::heap_bitmap_lock_)
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001104 : heap_(heap),
1105 failed_(failed) {
1106 }
1107
1108 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
1109 // analysis.
1110 void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
1111 bool /* is_static */) const NO_THREAD_SAFETY_ANALYSIS {
1112 // Verify that the reference is live.
1113 if (ref != NULL && !IsLive(ref)) {
1114 CardTable* card_table = heap_->GetCardTable();
1115 MarkStack* alloc_stack = heap_->allocation_stack_.get();
1116 MarkStack* live_stack = heap_->live_stack_.get();
1117
1118 // Print the cards around our object
1119 byte* card_addr = card_table->CardFromAddr(obj);
1120 LOG(INFO) << "Object " << obj << " references dead object " << ref << " on IsDirty = "
1121 << (*card_addr == GC_CARD_DIRTY);
1122 void* cover_begin = card_table->AddrFromCard(card_addr);
1123 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
1124 GC_CARD_SIZE);
1125 LOG(INFO) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
1126 << "-" << cover_end;
1127 SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetSpaceBitmap(obj);
1128
1129 // Print out how the object is live.
1130 if (bitmap->Test(obj)) {
1131 LOG(INFO) << "Object " << obj << " found in live bitmap";
1132 } else if (std::binary_search(alloc_stack->Begin(), alloc_stack->End(), obj)) {
1133 LOG(INFO) << "Object " << obj << " found in allocation stack";
1134 }
1135
1136 if (std::binary_search(live_stack->Begin(), live_stack->End(), ref)) {
1137 LOG(INFO) << "Reference " << ref << " found in live stack!";
1138 }
1139
1140 // Attempt to see if the card table missed the reference.
1141 ScanVisitor scan_visitor;
1142 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
1143 card_table->Scan(bitmap, byte_cover_begin, byte_cover_begin + GC_CARD_SIZE, scan_visitor,
1144 IdentityFunctor());
1145
1146 // Try and see if a mark sweep collector scans the reference.
1147 MarkStack* mark_stack = heap_->mark_stack_.get();
1148 MarkSweep ms(mark_stack);
1149 ms.Init();
1150 mark_stack->Reset();
1151 ms.SetFinger(reinterpret_cast<Object*>(~size_t(0)));
1152 // All the references should end up in the mark stack.
1153 ms.ScanRoot(obj);
1154 if (std::find(mark_stack->Begin(), mark_stack->End(), ref)) {
1155 LOG(INFO) << "Ref found in the mark_stack when rescanning the object!";
1156 } else {
1157 LOG(INFO) << "Dumping mark stack contents";
1158 for (Object** it = mark_stack->Begin(); it != mark_stack->End(); ++it) {
1159 LOG(INFO) << *it;
1160 }
1161 }
1162 mark_stack->Reset();
1163
1164 // Search to see if any of the roots reference our object.
1165 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
1166 Runtime::Current()->VisitRoots(&Heap::RootMatchesObjectVisitor, arg);
1167 *failed_ = true;
1168 }
1169 }
1170
1171 bool IsLive(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
1172 SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetSpaceBitmap(obj);
1173 if (bitmap != NULL) {
1174 if (bitmap->Test(obj)) {
1175 return true;
1176 }
1177 } else {
1178 heap_->DumpSpaces();
1179 LOG(FATAL) << "Object " << obj << " not found in any spaces";
1180 }
1181 MarkStack* alloc_stack = heap_->allocation_stack_.get();
1182 // At this point we need to search the allocation since things in the live stack may get swept.
1183 if (std::binary_search(alloc_stack->Begin(), alloc_stack->End(), const_cast<Object*>(obj))) {
1184 return true;
1185 }
1186 // Not either in the live bitmap or allocation stack, so the object must be dead.
1187 return false;
1188 }
1189
1190 private:
1191 Heap* heap_;
1192 bool* failed_;
1193};
1194
1195class VerifyObjectVisitor {
1196 public:
1197 VerifyObjectVisitor(Heap* heap)
1198 : heap_(heap),
1199 failed_(false) {
1200
1201 }
1202
1203 void operator ()(const Object* obj) const
Ian Rogersb726dcb2012-09-05 08:57:23 -07001204 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001205 VerifyReferenceVisitor visitor(heap_, const_cast<bool*>(&failed_));
1206 MarkSweep::VisitObjectReferences(obj, visitor);
1207 }
1208
1209 bool Failed() const {
1210 return failed_;
1211 }
1212
1213 private:
1214 Heap* heap_;
1215 bool failed_;
1216};
1217
1218// Must do this with mutators suspended since we are directly accessing the allocation stacks.
1219void Heap::VerifyHeapReferences(const std::string& phase) {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001220 Locks::mutator_lock_->AssertExclusiveHeld();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001221 // Lets sort our allocation stacks so that we can efficiently binary search them.
1222 std::sort(allocation_stack_->Begin(), allocation_stack_->End());
1223 std::sort(live_stack_->Begin(), live_stack_->End());
1224 // Perform the verification.
1225 VerifyObjectVisitor visitor(this);
1226 GetLiveBitmap()->Visit(visitor);
1227 // We don't want to verify the objects in the allocation stack since they themselves may be
1228 // pointing to dead objects if they are not reachable.
1229 if (visitor.Failed()) {
1230 DumpSpaces();
1231 LOG(FATAL) << phase << " heap verification failed";
1232 }
1233}
1234
1235void Heap::SwapBitmaps() {
1236 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
1237 // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations
1238 // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark bit
1239 // instead, resulting in no new allocated objects being incorrectly freed by sweep.
Ian Rogersb726dcb2012-09-05 08:57:23 -07001240 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001241 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
1242 Space* space = *it;
1243 // We never allocate into zygote spaces.
1244 if (space->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
1245 live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap());
1246 mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap());
1247 space->AsAllocSpace()->SwapBitmaps();
1248 }
1249 }
1250}
1251
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001252void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft_references) {
1253 TimingLogger timings("ConcurrentCollectGarbageInternal", true);
1254 uint64_t root_begin = NanoTime(), root_end = 0, dirty_begin = 0, dirty_end = 0;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001255 std::stringstream gc_type_str;
1256 gc_type_str << gc_type << " ";
Mathieu Chartiera6399032012-06-11 18:49:50 -07001257
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001258 // Suspend all threads are get exclusive access to the heap.
1259 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1260 thread_list->SuspendAll();
1261 timings.AddSplit("SuspendAll");
Ian Rogersb726dcb2012-09-05 08:57:23 -07001262 Locks::mutator_lock_->AssertExclusiveHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001263
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001264 size_t bytes_freed = 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001265 Object* cleared_references = NULL;
1266 {
1267 MarkSweep mark_sweep(mark_stack_.get());
1268 timings.AddSplit("ctor");
1269
1270 mark_sweep.Init();
1271 timings.AddSplit("Init");
1272
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001273 // Pre verify the heap
1274 if (pre_gc_verify_heap_) {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001275 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001276 VerifyHeapReferences(std::string("Pre ") + gc_type_str.str() + "Gc");
1277 timings.AddSplit("VerifyHeapReferencesPreGC");
1278 }
1279
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001280 // Swap the stacks, this is safe sunce all the mutators are suspended at this point.
1281 MarkStack* temp = allocation_stack_.release();
1282 allocation_stack_.reset(live_stack_.release());
1283 live_stack_.reset(temp);
1284
1285 // We will need to know which cards were dirty for doing concurrent processing of dirty cards.
1286 // TODO: Investigate using a mark stack instead of a vector.
1287 std::vector<byte*> dirty_cards;
Mathieu Chartier0325e622012-09-05 14:22:51 -07001288 if (gc_type == kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001289 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
1290 card_table_->GetDirtyCards(*it, dirty_cards);
1291 }
1292 }
1293
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001294 // Make sure that the tables have the correct pointer for the mark sweep.
1295 mod_union_table_->Init(&mark_sweep);
1296 zygote_mod_union_table_->Init(&mark_sweep);
1297
1298 // Clear image space cards and keep track of cards we cleared in the mod-union table.
1299 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
1300 Space* space = *it;
1301 if (space->IsImageSpace()) {
1302 mod_union_table_->ClearCards(*it);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001303 timings.AddSplit("ModUnionClearCards");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001304 } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
1305 zygote_mod_union_table_->ClearCards(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001306 timings.AddSplit("ZygoteModUnionClearCards");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001307 } else {
1308 card_table_->ClearSpaceCards(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001309 timings.AddSplit("ClearCards");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001310 }
1311 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001312
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001313 {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001314 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001315
Mathieu Chartier0325e622012-09-05 14:22:51 -07001316 if (gc_type == kGcTypePartial) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001317 // Copy the mark bits over from the live bits, do this as early as possible or else we can
1318 // accidentally un-mark roots.
1319 // Needed for scanning dirty objects.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001320 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001321 if ((*it)->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
1322 mark_sweep.CopyMarkBits(*it);
1323 }
1324 }
1325 timings.AddSplit("CopyMarkBits");
1326 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
Mathieu Chartier0325e622012-09-05 14:22:51 -07001327 } else if (gc_type == kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001328 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001329 if ((*it)->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
1330 mark_sweep.CopyMarkBits(*it);
1331 }
1332 }
1333 timings.AddSplit("CopyMarkBits");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001334 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
1335 }
1336
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001337 // Mark everything as live so that sweeping system weak works correctly for sticky mark bit
1338 // GCs.
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001339 MarkStackAsLive(live_stack_.get());
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001340 timings.AddSplit("MarkStackAsLive");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001341
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001342 // TODO: Investigate whether or not this is really necessary for sticky mark bits.
Mathieu Chartier0325e622012-09-05 14:22:51 -07001343 if (gc_type != kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001344 live_stack_->Reset();
1345 mark_sweep.MarkRoots();
1346 timings.AddSplit("MarkRoots");
1347 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001348
1349 if (verify_mod_union_table_) {
1350 zygote_mod_union_table_->Update();
1351 zygote_mod_union_table_->Verify();
1352 mod_union_table_->Update();
1353 mod_union_table_->Verify();
1354 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001355 }
1356
1357 // Roots are marked on the bitmap and the mark_stack is empty.
1358 DCHECK(mark_sweep.IsMarkStackEmpty());
1359
1360 // Allow mutators to go again, acquire share on mutator_lock_ to continue.
1361 thread_list->ResumeAll();
1362 {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001363 ReaderMutexLock reader_lock(*Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001364 root_end = NanoTime();
1365 timings.AddSplit("RootEnd");
1366
Ian Rogersb726dcb2012-09-05 08:57:23 -07001367 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001368 UpdateAndMarkModUnion(timings, gc_type);
Mathieu Chartier0325e622012-09-05 14:22:51 -07001369 if (gc_type != kGcTypeSticky) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001370 // Recursively mark all the non-image bits set in the mark bitmap.
Mathieu Chartier0325e622012-09-05 14:22:51 -07001371 mark_sweep.RecursiveMark(gc_type == kGcTypePartial, timings);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001372 } else {
1373 mark_sweep.RecursiveMarkCards(card_table_.get(), dirty_cards, timings);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001374 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001375 mark_sweep.DisableFinger();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001376 }
1377 // Release share on mutator_lock_ and then get exclusive access.
1378 dirty_begin = NanoTime();
1379 thread_list->SuspendAll();
1380 timings.AddSplit("ReSuspend");
Ian Rogersb726dcb2012-09-05 08:57:23 -07001381 Locks::mutator_lock_->AssertExclusiveHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001382
1383 {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001384 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001385
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001386 // Re-mark root set.
1387 mark_sweep.ReMarkRoots();
1388 timings.AddSplit("ReMarkRoots");
1389
1390 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001391 mark_sweep.RecursiveMarkDirtyObjects(false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001392 timings.AddSplit("RecursiveMarkDirtyObjects");
1393 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001394
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001395 {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001396 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001397 mark_sweep.ProcessReferences(clear_soft_references);
1398 timings.AddSplit("ProcessReferences");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001399
1400 // This doesn't work with mutators unpaused for some reason, TODO: Fix.
1401 mark_sweep.SweepSystemWeaks(false);
1402 timings.AddSplit("SweepSystemWeaks");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001403 }
1404 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
1405 // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations
1406 // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark
1407 // bit instead, resulting in no new allocated objects being incorrectly freed by sweep.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001408 const bool swap = true;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001409 if (swap) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001410 SwapBitmaps();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001411 }
1412
1413 if (kIsDebugBuild) {
1414 // Verify that we only reach marked objects from the image space.
Ian Rogersb726dcb2012-09-05 08:57:23 -07001415 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001416 mark_sweep.VerifyImageRoots();
1417 timings.AddSplit("VerifyImageRoots");
1418 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001419
Mathieu Chartier0325e622012-09-05 14:22:51 -07001420 if (gc_type == kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001421 // We only sweep over the live stack, and the live stack should not intersect with the
1422 // allocation stack, so it should be safe to UnMark anything in the allocation stack as live.
1423 // This only works for sticky Gcs though!
1424 UnMarkStackAsLive(allocation_stack_.get());
1425 }
1426 timings.AddSplit("UnMarkStacks");
1427
1428 // If we are going to do post Gc verification, lets keep the mutators paused since we don't
1429 // want them to touch dead objects before we find these in verification.
1430 if (post_gc_verify_heap_) {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001431 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001432 VerifyHeapReferences(std::string("Post ") + gc_type_str.str() + "Gc");
1433 timings.AddSplit("VerifyHeapReferencesPostGC");
1434 }
1435
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001436 thread_list->ResumeAll();
1437 dirty_end = NanoTime();
Ian Rogersb726dcb2012-09-05 08:57:23 -07001438 Locks::mutator_lock_->AssertNotHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001439
1440 {
1441 // TODO: this lock shouldn't be necessary (it's why we did the bitmap flip above).
Ian Rogersb726dcb2012-09-05 08:57:23 -07001442 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier0325e622012-09-05 14:22:51 -07001443 if (gc_type != kGcTypeSticky) {
1444 mark_sweep.Sweep(gc_type == kGcTypePartial, swap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001445 } else {
1446 mark_sweep.SweepArray(timings, live_stack_.get(), swap);
1447 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001448 timings.AddSplit("Sweep");
1449 }
1450
1451 cleared_references = mark_sweep.GetClearedReferences();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001452 bytes_freed = mark_sweep.GetFreedBytes();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001453 }
1454
1455 GrowForUtilization();
1456 timings.AddSplit("GrowForUtilization");
1457
1458 EnqueueClearedReferences(&cleared_references);
1459 RequestHeapTrim();
1460 timings.AddSplit("Finish");
1461
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001462 // If the GC was slow, then print timings in the log.
1463 uint64_t pause_roots = (root_end - root_begin) / 1000 * 1000;
1464 uint64_t pause_dirty = (dirty_end - dirty_begin) / 1000 * 1000;
Mathieu Chartier637e3482012-08-17 10:41:32 -07001465 uint64_t duration = (NanoTime() - root_begin) / 1000 * 1000;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001466 if (pause_roots > MsToNs(5) || pause_dirty > MsToNs(5)) {
Mathieu Chartier637e3482012-08-17 10:41:32 -07001467 const size_t percent_free = GetPercentFree();
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001468 const size_t current_heap_size = GetUsedMemorySize();
Mathieu Chartier637e3482012-08-17 10:41:32 -07001469 const size_t total_memory = GetTotalMemory();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001470 LOG(INFO) << gc_type_str.str()
Mathieu Chartier637e3482012-08-17 10:41:32 -07001471 << "Concurrent GC freed " << PrettySize(bytes_freed) << ", " << percent_free
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001472 << "% free, " << PrettySize(current_heap_size) << "/"
Mathieu Chartier637e3482012-08-17 10:41:32 -07001473 << PrettySize(total_memory) << ", " << "paused " << PrettyDuration(pause_roots)
1474 << "+" << PrettyDuration(pause_dirty) << " total " << PrettyDuration(duration);
Mathieu Chartier0325e622012-09-05 14:22:51 -07001475
1476 if (VLOG_IS_ON(heap)) {
1477 timings.Dump();
1478 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001479 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001480
Mathieu Chartier0325e622012-09-05 14:22:51 -07001481 CumulativeLogger* logger = cumulative_timings_.Get(gc_type);
1482 logger->Start();
1483 logger->AddLogger(timings);
1484 logger->End(); // Next iteration.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001485}
1486
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -07001487bool Heap::WaitForConcurrentGcToComplete() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001488 if (concurrent_gc_) {
1489 bool do_wait = false;
1490 uint64_t wait_start;
1491 {
1492 // Check if GC is running holding gc_complete_lock_.
1493 MutexLock mu(*gc_complete_lock_);
1494 if (is_gc_running_) {
1495 wait_start = NanoTime();
1496 do_wait = true;
1497 }
Mathieu Chartiera6399032012-06-11 18:49:50 -07001498 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001499 if (do_wait) {
1500 // We must wait, change thread state then sleep on gc_complete_cond_;
1501 ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete);
1502 {
1503 MutexLock mu(*gc_complete_lock_);
1504 while (is_gc_running_) {
1505 gc_complete_cond_->Wait(*gc_complete_lock_);
1506 }
1507 }
1508 uint64_t wait_time = NanoTime() - wait_start;
1509 if (wait_time > MsToNs(5)) {
1510 LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time);
1511 }
1512 return true;
1513 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001514 }
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -07001515 return false;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001516}
1517
Elliott Hughesc967f782012-04-16 10:23:15 -07001518void Heap::DumpForSigQuit(std::ostream& os) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001519 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(num_bytes_allocated_) << "/"
1520 << PrettySize(GetTotalMemory()) << "; " << num_objects_allocated_ << " objects\n";
Mathieu Chartier0325e622012-09-05 14:22:51 -07001521 // Dump cumulative timings.
1522 LOG(INFO) << "Dumping cumulative Gc timings";
1523 for (CumulativeTimings::iterator it = cumulative_timings_.begin();
1524 it != cumulative_timings_.end(); ++it) {
1525 it->second->Dump();
1526 }
Elliott Hughesc967f782012-04-16 10:23:15 -07001527}
1528
1529size_t Heap::GetPercentFree() {
1530 size_t total = GetTotalMemory();
1531 return 100 - static_cast<size_t>(100.0f * static_cast<float>(num_bytes_allocated_) / total);
1532}
1533
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001534void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001535 AllocSpace* alloc_space = alloc_space_;
1536 // TODO: Behavior for multiple alloc spaces?
1537 size_t alloc_space_capacity = alloc_space->Capacity();
1538 if (max_allowed_footprint > alloc_space_capacity) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001539 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
1540 << PrettySize(alloc_space_capacity);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001541 max_allowed_footprint = alloc_space_capacity;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001542 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001543 alloc_space->SetFootprintLimit(max_allowed_footprint);
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001544}
1545
Ian Rogers3bb17a62012-01-27 23:56:44 -08001546// kHeapIdealFree is the ideal maximum free size, when we grow the heap for utilization.
Shih-wei Liao7f1caab2011-10-06 12:11:04 -07001547static const size_t kHeapIdealFree = 2 * MB;
Ian Rogers3bb17a62012-01-27 23:56:44 -08001548// kHeapMinFree guarantees that you always have at least 512 KB free, when you grow for utilization,
1549// regardless of target utilization ratio.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001550static const size_t kHeapMinFree = kHeapIdealFree / 4;
1551
Carl Shapiro69759ea2011-07-21 18:13:35 -07001552void Heap::GrowForUtilization() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001553 size_t target_size;
1554 bool use_footprint_limit = false;
1555 {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001556 // We know what our utilization is at this moment.
1557 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
1558 target_size = num_bytes_allocated_ / Heap::GetTargetHeapUtilization();
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001559
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001560 if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
1561 target_size = num_bytes_allocated_ + kHeapIdealFree;
1562 } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
1563 target_size = num_bytes_allocated_ + kHeapMinFree;
1564 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001565
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001566 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001567 if (GetTotalMemory() - GetUsedMemorySize() < concurrent_min_free_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001568 // Not enough free memory to perform concurrent GC.
1569 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
1570 } else {
1571 // Compute below to avoid holding both the statistics and the alloc space lock
1572 use_footprint_limit = true;
1573 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001574 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001575
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001576 if (use_footprint_limit) {
1577 size_t foot_print_limit = alloc_space_->GetFootprintLimit();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001578 concurrent_start_bytes_ = foot_print_limit - concurrent_start_size_;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001579 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001580 SetIdealFootprint(target_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001581}
1582
jeffhaoc1160702011-10-27 15:48:45 -07001583void Heap::ClearGrowthLimit() {
jeffhaoc1160702011-10-27 15:48:45 -07001584 WaitForConcurrentGcToComplete();
jeffhaoc1160702011-10-27 15:48:45 -07001585 alloc_space_->ClearGrowthLimit();
1586}
1587
Elliott Hughesadb460d2011-10-05 17:02:34 -07001588void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001589 MemberOffset reference_queue_offset,
1590 MemberOffset reference_queueNext_offset,
1591 MemberOffset reference_pendingNext_offset,
1592 MemberOffset finalizer_reference_zombie_offset) {
Elliott Hughesadb460d2011-10-05 17:02:34 -07001593 reference_referent_offset_ = reference_referent_offset;
1594 reference_queue_offset_ = reference_queue_offset;
1595 reference_queueNext_offset_ = reference_queueNext_offset;
1596 reference_pendingNext_offset_ = reference_pendingNext_offset;
1597 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
1598 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1599 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
1600 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
1601 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
1602 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
1603}
1604
1605Object* Heap::GetReferenceReferent(Object* reference) {
1606 DCHECK(reference != NULL);
1607 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1608 return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
1609}
1610
1611void Heap::ClearReferenceReferent(Object* reference) {
1612 DCHECK(reference != NULL);
1613 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1614 reference->SetFieldObject(reference_referent_offset_, NULL, true);
1615}
1616
1617// Returns true if the reference object has not yet been enqueued.
1618bool Heap::IsEnqueuable(const Object* ref) {
1619 DCHECK(ref != NULL);
1620 const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
1621 const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
1622 return (queue != NULL) && (queue_next == NULL);
1623}
1624
1625void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
1626 DCHECK(ref != NULL);
1627 CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
1628 CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
1629 EnqueuePendingReference(ref, cleared_reference_list);
1630}
1631
1632void Heap::EnqueuePendingReference(Object* ref, Object** list) {
1633 DCHECK(ref != NULL);
1634 DCHECK(list != NULL);
1635
1636 if (*list == NULL) {
1637 ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
1638 *list = ref;
1639 } else {
1640 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1641 ref->SetFieldObject(reference_pendingNext_offset_, head, false);
1642 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
1643 }
1644}
1645
1646Object* Heap::DequeuePendingReference(Object** list) {
1647 DCHECK(list != NULL);
1648 DCHECK(*list != NULL);
1649 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1650 Object* ref;
1651 if (*list == head) {
1652 ref = *list;
1653 *list = NULL;
1654 } else {
1655 Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1656 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
1657 ref = head;
1658 }
1659 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
1660 return ref;
1661}
1662
Ian Rogers5d4bdc22011-11-02 22:15:43 -07001663void Heap::AddFinalizerReference(Thread* self, Object* object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001664 ScopedObjectAccess soa(self);
Elliott Hughes77405792012-03-15 15:22:12 -07001665 JValue args[1];
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07001666 args[0].SetL(object);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001667 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, NULL, args,
1668 NULL);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001669}
1670
1671size_t Heap::GetBytesAllocated() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001672 return num_bytes_allocated_;
1673}
1674
1675size_t Heap::GetObjectsAllocated() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001676 return num_objects_allocated_;
1677}
1678
1679size_t Heap::GetConcurrentStartSize() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001680 return concurrent_start_size_;
1681}
1682
1683size_t Heap::GetConcurrentMinFree() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001684 return concurrent_min_free_;
Elliott Hughesadb460d2011-10-05 17:02:34 -07001685}
1686
1687void Heap::EnqueueClearedReferences(Object** cleared) {
1688 DCHECK(cleared != NULL);
1689 if (*cleared != NULL) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001690 ScopedObjectAccess soa(Thread::Current());
Elliott Hughes77405792012-03-15 15:22:12 -07001691 JValue args[1];
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07001692 args[0].SetL(*cleared);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001693 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(), NULL,
1694 args, NULL);
Elliott Hughesadb460d2011-10-05 17:02:34 -07001695 *cleared = NULL;
1696 }
1697}
1698
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001699void Heap::RequestConcurrentGC() {
Mathieu Chartier069387a2012-06-18 12:01:01 -07001700 // Make sure that we can do a concurrent GC.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001701 if (requesting_gc_ || !Runtime::Current()->IsFinishedStarting() ||
1702 Runtime::Current()->IsShuttingDown() || !Runtime::Current()->IsConcurrentGcEnabled()) {
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001703 return;
1704 }
1705
1706 requesting_gc_ = true;
1707 JNIEnv* env = Thread::Current()->GetJniEnv();
Mathieu Chartiera6399032012-06-11 18:49:50 -07001708 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
1709 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001710 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
1711 WellKnownClasses::java_lang_Daemons_requestGC);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001712 CHECK(!env->ExceptionCheck());
1713 requesting_gc_ = false;
1714}
1715
1716void Heap::ConcurrentGC() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001717 if (Runtime::Current()->IsShuttingDown() || !concurrent_gc_) {
Mathieu Chartier2542d662012-06-21 17:14:11 -07001718 return;
1719 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001720
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001721 // TODO: We shouldn't need a WaitForConcurrentGcToComplete here since only
1722 // concurrent GC resumes threads before the GC is completed and this function
1723 // is only called within the GC daemon thread.
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001724 if (!WaitForConcurrentGcToComplete()) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001725 // Start a concurrent GC as one wasn't in progress
1726 ScopedThreadStateChange tsc(Thread::Current(), kWaitingPerformingGc);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001727 if (alloc_space_->Size() > kMinAllocSpaceSizeForStickyGC) {
Mathieu Chartier0325e622012-09-05 14:22:51 -07001728 CollectGarbageInternal(kGcTypeSticky, false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001729 } else {
Mathieu Chartier0325e622012-09-05 14:22:51 -07001730 CollectGarbageInternal(kGcTypePartial, false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001731 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001732 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001733}
1734
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001735void Heap::Trim() {
Mathieu Chartiera6399032012-06-11 18:49:50 -07001736 WaitForConcurrentGcToComplete();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001737 alloc_space_->Trim();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001738}
1739
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001740void Heap::RequestHeapTrim() {
1741 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
1742 // because that only marks object heads, so a large array looks like lots of empty space. We
1743 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
1744 // to utilization (which is probably inversely proportional to how much benefit we can expect).
1745 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
1746 // not how much use we're making of those pages.
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001747 uint64_t ms_time = NsToMs(NanoTime());
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001748 {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001749 float utilization = static_cast<float>(num_bytes_allocated_) / alloc_space_->Size();
1750 if ((utilization > 0.75f) || ((ms_time - last_trim_time_) < 2 * 1000)) {
1751 // Don't bother trimming the heap if it's more than 75% utilized, or if a
1752 // heap trim occurred in the last two seconds.
1753 return;
1754 }
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001755 }
Mathieu Chartiera6399032012-06-11 18:49:50 -07001756 if (!Runtime::Current()->IsFinishedStarting() || Runtime::Current()->IsShuttingDown()) {
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001757 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
Mathieu Chartiera6399032012-06-11 18:49:50 -07001758 // Also: we do not wish to start a heap trim if the runtime is shutting down.
Ian Rogerse1d490c2012-02-03 09:09:07 -08001759 return;
1760 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001761 last_trim_time_ = ms_time;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001762 JNIEnv* env = Thread::Current()->GetJniEnv();
Mathieu Chartiera6399032012-06-11 18:49:50 -07001763 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
1764 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001765 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
1766 WellKnownClasses::java_lang_Daemons_requestHeapTrim);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001767 CHECK(!env->ExceptionCheck());
1768}
1769
Carl Shapiro69759ea2011-07-21 18:13:35 -07001770} // namespace art