blob: 517c7487711d26f7ec76db2791915b70e7141035 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Mathieu Chartier752a0e62013-06-27 11:03:27 -070019#define ATRACE_TAG ATRACE_TAG_DALVIK
20#include <cutils/trace.h>
Brian Carlstrom5643b782012-02-05 12:32:53 -080021
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
24
Mathieu Chartierb2f99362013-11-20 17:26:00 -080025#include "base/histogram-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080026#include "base/stl_util.h"
Mathieu Chartier987ccff2013-07-08 11:05:21 -070027#include "common_throws.h"
Ian Rogers48931882013-01-22 14:35:16 -080028#include "cutils/sched_policy.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070029#include "debugger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070030#include "gc/accounting/atomic_stack.h"
31#include "gc/accounting/card_table-inl.h"
32#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070033#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070034#include "gc/accounting/mod_union_table-inl.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080035#include "gc/accounting/remembered_set.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070036#include "gc/accounting/space_bitmap-inl.h"
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070037#include "gc/collector/concurrent_copying.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070038#include "gc/collector/mark_sweep-inl.h"
39#include "gc/collector/partial_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070040#include "gc/collector/semi_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070041#include "gc/collector/sticky_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070042#include "gc/space/bump_pointer_space.h"
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070043#include "gc/space/dlmalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070044#include "gc/space/image_space.h"
45#include "gc/space/large_object_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070046#include "gc/space/rosalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070047#include "gc/space/space-inl.h"
Mathieu Chartiera1602f22014-01-13 17:19:19 -080048#include "gc/space/zygote_space.h"
Mathieu Chartierd8891782014-03-02 13:28:37 -080049#include "entrypoints/quick/quick_alloc_entrypoints.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070050#include "heap-inl.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070051#include "image.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070052#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080053#include "mirror/class-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080054#include "mirror/object.h"
55#include "mirror/object-inl.h"
56#include "mirror/object_array-inl.h"
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070057#include "mirror/reference-inl.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080058#include "object_utils.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080059#include "os.h"
Ian Rogers53b8b092014-03-13 23:45:53 -070060#include "reflection.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080061#include "runtime.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070062#include "ScopedLocalRef.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070063#include "scoped_thread_state_change.h"
Ian Rogers1f539342012-10-03 21:09:42 -070064#include "sirt_ref.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070065#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070066#include "UniquePtr.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070067#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070068
69namespace art {
Mathieu Chartier50482232013-11-21 11:48:14 -080070
Ian Rogers1d54e732013-05-02 21:10:01 -070071namespace gc {
Carl Shapiro69759ea2011-07-21 18:13:35 -070072
Mathieu Chartier91e30632014-03-25 15:58:50 -070073static constexpr size_t kCollectorTransitionStressIterations = 0;
74static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
Mathieu Chartier720ef762013-08-17 14:46:54 -070075static constexpr bool kGCALotMode = false;
76static constexpr size_t kGcAlotInterval = KB;
Ian Rogers1d54e732013-05-02 21:10:01 -070077// Minimum amount of remaining bytes before a concurrent GC is triggered.
Mathieu Chartier720ef762013-08-17 14:46:54 -070078static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
Mathieu Chartier74762802014-01-24 10:21:35 -080079static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -070080// Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
81// relative to partial/full GC. This is desirable since sticky GCs interfere less with mutator
82// threads (lower pauses, use less memory bandwidth).
83static constexpr double kStickyGcThroughputAdjustment = 1.25;
Mathieu Chartier31f44142014-04-08 14:40:03 -070084// Whether or not we use the free list large object space.
85static constexpr bool kUseFreeListSpaceForLOS = false;
86// Whtehr or not we compact the zygote in PreZygoteFork.
87static constexpr bool kCompactZygote = kMovingCollector;
88static constexpr size_t kNonMovingSpaceCapacity = 64 * MB;
Mathieu Chartier0051be62012-10-12 17:47:11 -070089
Mathieu Chartier0051be62012-10-12 17:47:11 -070090Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -070091 double target_utilization, double foreground_heap_growth_multiplier, size_t capacity,
92 const std::string& image_file_name,
Mathieu Chartier31f44142014-04-08 14:40:03 -070093 CollectorType foreground_collector_type, CollectorType background_collector_type,
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080094 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
95 size_t long_pause_log_threshold, size_t long_gc_log_threshold,
Mathieu Chartier938a03b2014-01-16 15:10:31 -080096 bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap,
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080097 bool verify_post_gc_heap, bool verify_pre_gc_rosalloc,
98 bool verify_post_gc_rosalloc)
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080099 : non_moving_space_(nullptr),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800100 rosalloc_space_(nullptr),
101 dlmalloc_space_(nullptr),
Mathieu Chartierfc5b5282014-01-09 16:15:36 -0800102 main_space_(nullptr),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800103 collector_type_(kCollectorTypeNone),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700104 foreground_collector_type_(foreground_collector_type),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800105 background_collector_type_(background_collector_type),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700106 desired_collector_type_(foreground_collector_type_),
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800107 heap_trim_request_lock_(nullptr),
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700108 last_trim_time_(0),
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800109 heap_transition_target_time_(0),
110 heap_trim_request_pending_(false),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700111 parallel_gc_threads_(parallel_gc_threads),
112 conc_gc_threads_(conc_gc_threads),
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700113 low_memory_mode_(low_memory_mode),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700114 long_pause_log_threshold_(long_pause_log_threshold),
115 long_gc_log_threshold_(long_gc_log_threshold),
116 ignore_max_footprint_(ignore_max_footprint),
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700117 have_zygote_space_(false),
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800118 large_object_threshold_(std::numeric_limits<size_t>::max()), // Starts out disabled.
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800119 collector_type_running_(kCollectorTypeNone),
Ian Rogers1d54e732013-05-02 21:10:01 -0700120 last_gc_type_(collector::kGcTypeNone),
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -0700121 next_gc_type_(collector::kGcTypePartial),
Mathieu Chartier80de7a62012-11-27 17:21:50 -0800122 capacity_(capacity),
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700123 growth_limit_(growth_limit),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700124 max_allowed_footprint_(initial_size),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700125 native_footprint_gc_watermark_(initial_size),
126 native_footprint_limit_(2 * initial_size),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700127 native_need_to_run_finalization_(false),
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800128 // Initially assume we perceive jank in case the process state is never updated.
129 process_state_(kProcessStateJankPerceptible),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800130 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Ian Rogers1d54e732013-05-02 21:10:01 -0700131 total_bytes_freed_ever_(0),
132 total_objects_freed_ever_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800133 num_bytes_allocated_(0),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700134 native_bytes_allocated_(0),
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700135 gc_memory_overhead_(0),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700136 verify_missing_card_marks_(false),
137 verify_system_weaks_(false),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800138 verify_pre_gc_heap_(verify_pre_gc_heap),
139 verify_post_gc_heap_(verify_post_gc_heap),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700140 verify_mod_union_table_(false),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800141 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
142 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
Mathieu Chartier65db8802012-11-20 12:36:46 -0800143 allocation_rate_(0),
Mathieu Chartier0418ae22013-07-31 13:35:46 -0700144 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
145 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
146 * verification is enabled, we limit the size of allocation stacks to speed up their
147 * searching.
148 */
149 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
Mathieu Chartier4e305412014-02-19 10:54:44 -0800150 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? KB : MB),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800151 current_allocator_(kAllocatorTypeDlMalloc),
152 current_non_moving_allocator_(kAllocatorTypeNonMoving),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700153 bump_pointer_space_(nullptr),
154 temp_space_(nullptr),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700155 min_free_(min_free),
156 max_free_(max_free),
157 target_utilization_(target_utilization),
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700158 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700159 total_wait_time_(0),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700160 total_allocation_time_(0),
Mathieu Chartier4e305412014-02-19 10:54:44 -0800161 verify_object_mode_(kVerifyObjectModeDisabled),
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800162 disable_moving_gc_count_(0),
Mathieu Chartierda44d772014-04-01 15:01:46 -0700163 running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800164 use_tlab_(use_tlab) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800165 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800166 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700167 }
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700168 const bool is_zygote = Runtime::Current()->IsZygote();
Mathieu Chartier50482232013-11-21 11:48:14 -0800169 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
170 // entrypoints.
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700171 if (!is_zygote) {
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800172 large_object_threshold_ = kDefaultLargeObjectThreshold;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700173 // Background compaction is currently not supported for command line runs.
174 if (background_collector_type_ != foreground_collector_type_) {
175 LOG(WARNING) << "Disabling background compaction for non zygote";
176 background_collector_type_ = foreground_collector_type_;
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800177 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800178 }
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800179 ChangeCollector(desired_collector_type_);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800180
Ian Rogers1d54e732013-05-02 21:10:01 -0700181 live_bitmap_.reset(new accounting::HeapBitmap(this));
182 mark_bitmap_.reset(new accounting::HeapBitmap(this));
Ian Rogers30fab402012-01-23 15:43:46 -0800183 // Requested begin for the alloc space, to follow the mapped image and oat files
Mathieu Chartier50482232013-11-21 11:48:14 -0800184 byte* requested_alloc_space_begin = nullptr;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800185 if (!image_file_name.empty()) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700186 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str());
Mathieu Chartier50482232013-11-21 11:48:14 -0800187 CHECK(image_space != nullptr) << "Failed to create space for " << image_file_name;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700188 AddSpace(image_space);
Ian Rogers30fab402012-01-23 15:43:46 -0800189 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
190 // isn't going to get in the middle
Brian Carlstrom700c8d32012-11-05 10:42:02 -0800191 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
192 CHECK_GT(oat_file_end_addr, image_space->End());
Mathieu Chartier31f44142014-04-08 14:40:03 -0700193 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700194 }
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700195 if (is_zygote) {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700196 // Reserve the address range before we create the non moving space to make sure bitmaps don't
197 // take it.
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700198 std::string error_str;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700199 MemMap* mem_map = MemMap::MapAnonymous(
200 "main space", requested_alloc_space_begin + kNonMovingSpaceCapacity, capacity,
201 PROT_READ | PROT_WRITE, true, &error_str);
202 CHECK(mem_map != nullptr) << error_str;
203 // Non moving space is always dlmalloc since we currently don't have support for multiple
204 // rosalloc spaces.
205 non_moving_space_ = space::DlMallocSpace::Create(
206 "zygote / non moving space", initial_size, kNonMovingSpaceCapacity, kNonMovingSpaceCapacity,
207 requested_alloc_space_begin, false);
Mathieu Chartier78408882014-04-11 18:06:01 -0700208 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
Mathieu Chartier31f44142014-04-08 14:40:03 -0700209 CreateMainMallocSpace(mem_map, initial_size, growth_limit, capacity);
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700210 } else {
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700211 std::string error_str;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700212 MemMap* mem_map = MemMap::MapAnonymous("main/non-moving space", requested_alloc_space_begin,
213 capacity, PROT_READ | PROT_WRITE, true, &error_str);
214 CHECK(mem_map != nullptr) << error_str;
215 // Create the main free list space, which doubles as the non moving space. We can do this since
216 // non zygote means that we won't have any background compaction.
217 CreateMainMallocSpace(mem_map, initial_size, growth_limit, capacity);
218 non_moving_space_ = main_space_;
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700219 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700220 CHECK(non_moving_space_ != nullptr);
221
222 // We need to create the bump pointer if the foreground collector is a compacting GC. We only
223 // create the bump pointer space if we are not a moving foreground collector but have a moving
224 // background collector since the heap transition code will create the temp space by recycling
225 // the bitmap from the main space.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700226 if (kMovingCollector) {
227 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
Mathieu Chartier309e3bf2014-04-14 11:30:39 -0700228 // TODO: Not create all the bump pointer spaces if not necessary (currently only GSS needs all
229 // 2 of bump pointer spaces + main space) b/14059466. Divide by 2 for a temporary fix.
230 const size_t bump_pointer_space_capacity = capacity / 2;
231 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
232 bump_pointer_space_capacity, nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700233 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
234 AddSpace(bump_pointer_space_);
Mathieu Chartier309e3bf2014-04-14 11:30:39 -0700235 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
236 bump_pointer_space_capacity, nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700237 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
238 AddSpace(temp_space_);
239 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700240 if (non_moving_space_ != main_space_) {
241 AddSpace(non_moving_space_);
242 }
243 if (main_space_ != nullptr) {
244 AddSpace(main_space_);
245 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700246
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700247 // Allocate the large object space.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700248 if (kUseFreeListSpaceForLOS) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800249 large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700250 } else {
251 large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
252 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800253 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700254 AddSpace(large_object_space_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700255
Ian Rogers1d54e732013-05-02 21:10:01 -0700256 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
Mathieu Chartier590fee92013-09-13 13:46:47 -0700257 CHECK(!continuous_spaces_.empty());
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800258
Mathieu Chartier590fee92013-09-13 13:46:47 -0700259 // Relies on the spaces being sorted.
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -0800260 byte* heap_begin = continuous_spaces_.front()->Begin();
261 byte* heap_end = continuous_spaces_.back()->Limit();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700262 size_t heap_capacity = heap_end - heap_begin;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700263
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800264 // Allocate the card table.
Ian Rogers1d54e732013-05-02 21:10:01 -0700265 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700266 CHECK(card_table_.get() != NULL) << "Failed to create card table";
Ian Rogers5d76c432011-10-31 21:42:49 -0700267
Mathieu Chartier590fee92013-09-13 13:46:47 -0700268 // Card cache for now since it makes it easier for us to update the references to the copying
269 // spaces.
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700270 accounting::ModUnionTable* mod_union_table =
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700271 new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
272 GetImageSpace());
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700273 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
274 AddModUnionTable(mod_union_table);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700275
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800276 if (collector::SemiSpace::kUseRememberedSet) {
277 accounting::RememberedSet* non_moving_space_rem_set =
278 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
279 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
280 AddRememberedSet(non_moving_space_rem_set);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700281 if (main_space_ != nullptr && main_space_ != non_moving_space_) {
282 accounting::RememberedSet* main_space_rem_set =
283 new accounting::RememberedSet("Main space remembered set", this, main_space_);
284 CHECK(main_space_rem_set != nullptr) << "Failed to create main space remembered set";
285 AddRememberedSet(main_space_rem_set);
286 }
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800287 }
288
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700289 // TODO: Count objects in the image space here.
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700290 num_bytes_allocated_ = 0;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700291
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800292 // Default mark stack size in bytes.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700293 static const size_t default_mark_stack_size = 64 * KB;
Ian Rogers1d54e732013-05-02 21:10:01 -0700294 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size));
295 allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack",
296 max_allocation_stack_size_));
297 live_stack_.reset(accounting::ObjectStack::Create("live stack",
298 max_allocation_stack_size_));
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700299
Mathieu Chartier65db8802012-11-20 12:36:46 -0800300 // It's still too early to take a lock because there are no threads yet, but we can create locks
301 // now. We don't create it earlier to make it clear that you can't use locks during heap
302 // initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700303 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogersc604d732012-10-14 16:09:54 -0700304 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
305 *gc_complete_lock_));
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800306 heap_trim_request_lock_ = new Mutex("Heap trim request lock");
Mathieu Chartier65db8802012-11-20 12:36:46 -0800307 last_gc_size_ = GetBytesAllocated();
308
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700309 if (ignore_max_footprint_) {
310 SetIdealFootprint(std::numeric_limits<size_t>::max());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700311 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700312 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700313 CHECK_NE(max_allowed_footprint_, 0U);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700314
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800315 // Create our garbage collectors.
Mathieu Chartier50482232013-11-21 11:48:14 -0800316 for (size_t i = 0; i < 2; ++i) {
317 const bool concurrent = i != 0;
318 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
319 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
320 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
321 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800322 if (kMovingCollector) {
323 // TODO: Clean this up.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700324 bool generational = foreground_collector_type_ == kCollectorTypeGSS;
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700325 semi_space_collector_ = new collector::SemiSpace(this, generational,
326 generational ? "generational" : "");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700327 garbage_collectors_.push_back(semi_space_collector_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700328
329 concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
330 garbage_collectors_.push_back(concurrent_copying_collector_);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700331 }
332
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700333 if (running_on_valgrind_) {
Ian Rogersfa824272013-11-05 16:12:57 -0800334 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700335 }
336
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800337 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800338 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700339 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700340}
341
Mathieu Chartier31f44142014-04-08 14:40:03 -0700342void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
343 size_t capacity) {
344 // Is background compaction is enabled?
345 bool can_move_objects = IsMovingGc(background_collector_type_) !=
346 IsMovingGc(foreground_collector_type_);
347 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
348 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
349 // from the main space to the zygote space. If background compaction is enabled, always pass in
350 // that we can move objets.
351 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
352 // After the zygote we want this to be false if we don't have background compaction enabled so
353 // that getting primitive array elements is faster.
354 can_move_objects = !have_zygote_space_;
355 }
356 if (kUseRosAlloc) {
357 main_space_ = space::RosAllocSpace::CreateFromMemMap(mem_map, "main rosalloc space",
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700358 kDefaultStartingSize, initial_size,
359 growth_limit, capacity, low_memory_mode_,
360 can_move_objects);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700361 CHECK(main_space_ != nullptr) << "Failed to create rosalloc space";
362 } else {
363 main_space_ = space::DlMallocSpace::CreateFromMemMap(mem_map, "main dlmalloc space",
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700364 kDefaultStartingSize, initial_size,
365 growth_limit, capacity,
366 can_move_objects);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700367 CHECK(main_space_ != nullptr) << "Failed to create dlmalloc space";
368 }
369 main_space_->SetFootprintLimit(main_space_->Capacity());
370 VLOG(heap) << "Created main space " << main_space_;
371}
372
Mathieu Chartier50482232013-11-21 11:48:14 -0800373void Heap::ChangeAllocator(AllocatorType allocator) {
Mathieu Chartier50482232013-11-21 11:48:14 -0800374 if (current_allocator_ != allocator) {
Mathieu Chartierd8891782014-03-02 13:28:37 -0800375 // These two allocators are only used internally and don't have any entrypoints.
376 CHECK_NE(allocator, kAllocatorTypeLOS);
377 CHECK_NE(allocator, kAllocatorTypeNonMoving);
Mathieu Chartier50482232013-11-21 11:48:14 -0800378 current_allocator_ = allocator;
Mathieu Chartierd8891782014-03-02 13:28:37 -0800379 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
Mathieu Chartier50482232013-11-21 11:48:14 -0800380 SetQuickAllocEntryPointsAllocator(current_allocator_);
381 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
382 }
383}
384
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800385void Heap::DisableCompaction() {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700386 if (IsMovingGc(foreground_collector_type_)) {
387 foreground_collector_type_ = kCollectorTypeCMS;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800388 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700389 if (IsMovingGc(background_collector_type_)) {
390 background_collector_type_ = foreground_collector_type_;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800391 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700392 TransitionCollector(foreground_collector_type_);
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800393}
394
Mathieu Chartier15d34022014-02-26 17:16:38 -0800395std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
396 if (!IsValidContinuousSpaceObjectAddress(klass)) {
397 return StringPrintf("<non heap address klass %p>", klass);
398 }
399 mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
400 if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
401 std::string result("[");
402 result += SafeGetClassDescriptor(component_type);
403 return result;
404 } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
405 return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800406 } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
Mathieu Chartier15d34022014-02-26 17:16:38 -0800407 return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
408 } else {
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800409 mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
Mathieu Chartier15d34022014-02-26 17:16:38 -0800410 if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
411 return StringPrintf("<non heap address dex_cache %p>", dex_cache);
412 }
413 const DexFile* dex_file = dex_cache->GetDexFile();
414 uint16_t class_def_idx = klass->GetDexClassDefIndex();
415 if (class_def_idx == DexFile::kDexNoIndex16) {
416 return "<class def not found>";
417 }
418 const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
419 const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
420 return dex_file->GetTypeDescriptor(type_id);
421 }
422}
423
424std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
425 if (obj == nullptr) {
426 return "null";
427 }
428 mirror::Class* klass = obj->GetClass<kVerifyNone>();
429 if (klass == nullptr) {
430 return "(class=null)";
431 }
432 std::string result(SafeGetClassDescriptor(klass));
433 if (obj->IsClass()) {
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800434 result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
Mathieu Chartier15d34022014-02-26 17:16:38 -0800435 }
436 return result;
437}
438
439void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
440 if (obj == nullptr) {
441 stream << "(obj=null)";
442 return;
443 }
444 if (IsAligned<kObjectAlignment>(obj)) {
445 space::Space* space = nullptr;
446 // Don't use find space since it only finds spaces which actually contain objects instead of
447 // spaces which may contain objects (e.g. cleared bump pointer spaces).
448 for (const auto& cur_space : continuous_spaces_) {
449 if (cur_space->HasAddress(obj)) {
450 space = cur_space;
451 break;
452 }
453 }
Mathieu Chartier15d34022014-02-26 17:16:38 -0800454 // Unprotect all the spaces.
455 for (const auto& space : continuous_spaces_) {
456 mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
457 }
458 stream << "Object " << obj;
459 if (space != nullptr) {
460 stream << " in space " << *space;
461 }
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800462 mirror::Class* klass = obj->GetClass<kVerifyNone>();
Mathieu Chartier15d34022014-02-26 17:16:38 -0800463 stream << "\nclass=" << klass;
464 if (klass != nullptr) {
465 stream << " type= " << SafePrettyTypeOf(obj);
466 }
467 // Re-protect the address we faulted on.
468 mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
469 }
470}
471
Mathieu Chartier590fee92013-09-13 13:46:47 -0700472bool Heap::IsCompilingBoot() const {
473 for (const auto& space : continuous_spaces_) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800474 if (space->IsImageSpace() || space->IsZygoteSpace()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700475 return false;
476 }
477 }
478 return true;
479}
480
481bool Heap::HasImageSpace() const {
482 for (const auto& space : continuous_spaces_) {
483 if (space->IsImageSpace()) {
484 return true;
485 }
486 }
487 return false;
488}
489
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800490void Heap::IncrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700491 // Need to do this holding the lock to prevent races where the GC is about to run / running when
492 // we attempt to disable it.
Mathieu Chartiercaa82d62014-02-02 16:51:17 -0800493 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700494 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800495 ++disable_moving_gc_count_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700496 if (IsMovingGc(collector_type_running_)) {
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800497 WaitForGcToCompleteLocked(self);
498 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700499}
500
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800501void Heap::DecrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700502 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800503 CHECK_GE(disable_moving_gc_count_, 0U);
504 --disable_moving_gc_count_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700505}
506
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800507void Heap::UpdateProcessState(ProcessState process_state) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800508 if (process_state_ != process_state) {
509 process_state_ = process_state;
Mathieu Chartier91e30632014-03-25 15:58:50 -0700510 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
511 // Start at index 1 to avoid "is always false" warning.
512 // Have iteration 1 always transition the collector.
513 TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible))
Mathieu Chartier31f44142014-04-08 14:40:03 -0700514 ? foreground_collector_type_ : background_collector_type_);
Mathieu Chartier91e30632014-03-25 15:58:50 -0700515 usleep(kCollectorTransitionStressWait);
516 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800517 if (process_state_ == kProcessStateJankPerceptible) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800518 // Transition back to foreground right away to prevent jank.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700519 RequestCollectorTransition(foreground_collector_type_, 0);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800520 } else {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800521 // Don't delay for debug builds since we may want to stress test the GC.
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700522 RequestCollectorTransition(background_collector_type_, kIsDebugBuild ? 0 :
523 kCollectorTransitionWait);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800524 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800525 }
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800526}
527
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700528void Heap::CreateThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700529 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
530 if (num_threads != 0) {
Mathieu Chartierbcd5e9d2013-11-13 14:33:28 -0800531 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700532 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700533}
534
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800535void Heap::VisitObjects(ObjectCallback callback, void* arg) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700536 Thread* self = Thread::Current();
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800537 // GCs can move objects, so don't allow this.
538 const char* old_cause = self->StartAssertNoThreadSuspension("Visiting objects");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700539 if (bump_pointer_space_ != nullptr) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800540 // Visit objects in bump pointer space.
541 bump_pointer_space_->Walk(callback, arg);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700542 }
543 // TODO: Switch to standard begin and end to use ranged a based loop.
544 for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
545 it < end; ++it) {
546 mirror::Object* obj = *it;
Mathieu Chartierebdf3f32014-02-13 10:23:27 -0800547 if (obj != nullptr && obj->GetClass() != nullptr) {
548 // Avoid the race condition caused by the object not yet being written into the allocation
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800549 // stack or the class not yet being written in the object. Or, if kUseThreadLocalAllocationStack,
550 // there can be nulls on the allocation stack.
Mathieu Chartierebdf3f32014-02-13 10:23:27 -0800551 callback(obj, arg);
552 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700553 }
554 GetLiveBitmap()->Walk(callback, arg);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800555 self->EndAssertNoThreadSuspension(old_cause);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700556}
557
558void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800559 space::ContinuousSpace* space1 = rosalloc_space_ != nullptr ? rosalloc_space_ : non_moving_space_;
560 space::ContinuousSpace* space2 = dlmalloc_space_ != nullptr ? dlmalloc_space_ : non_moving_space_;
561 // This is just logic to handle a case of either not having a rosalloc or dlmalloc space.
562 // TODO: Generalize this to n bitmaps?
563 if (space1 == nullptr) {
564 DCHECK(space2 != nullptr);
565 space1 = space2;
566 }
567 if (space2 == nullptr) {
568 DCHECK(space1 != nullptr);
569 space2 = space1;
570 }
571 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700572 large_object_space_->GetLiveBitmap(), stack);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700573}
574
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700575void Heap::DeleteThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700576 thread_pool_.reset(nullptr);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700577}
578
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800579void Heap::AddSpace(space::Space* space, bool set_as_default) {
580 DCHECK(space != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700581 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
582 if (space->IsContinuousSpace()) {
583 DCHECK(!space->IsDiscontinuousSpace());
584 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
585 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700586 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
587 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700588 if (live_bitmap != nullptr) {
589 DCHECK(mark_bitmap != nullptr);
590 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
591 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700592 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700593 continuous_spaces_.push_back(continuous_space);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800594 if (set_as_default) {
595 if (continuous_space->IsDlMallocSpace()) {
596 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
597 } else if (continuous_space->IsRosAllocSpace()) {
598 rosalloc_space_ = continuous_space->AsRosAllocSpace();
599 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700600 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700601 // Ensure that spaces remain sorted in increasing order of start address.
602 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
603 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
604 return a->Begin() < b->Begin();
605 });
Mathieu Chartier590fee92013-09-13 13:46:47 -0700606 } else {
607 DCHECK(space->IsDiscontinuousSpace());
608 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700609 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
610 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700611 discontinuous_spaces_.push_back(discontinuous_space);
612 }
613 if (space->IsAllocSpace()) {
614 alloc_spaces_.push_back(space->AsAllocSpace());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700615 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800616}
617
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800618void Heap::RemoveSpace(space::Space* space) {
619 DCHECK(space != nullptr);
620 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
621 if (space->IsContinuousSpace()) {
622 DCHECK(!space->IsDiscontinuousSpace());
623 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
624 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700625 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
626 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800627 if (live_bitmap != nullptr) {
628 DCHECK(mark_bitmap != nullptr);
629 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
630 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
631 }
632 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
633 DCHECK(it != continuous_spaces_.end());
634 continuous_spaces_.erase(it);
635 if (continuous_space == dlmalloc_space_) {
636 dlmalloc_space_ = nullptr;
637 } else if (continuous_space == rosalloc_space_) {
638 rosalloc_space_ = nullptr;
639 }
Mathieu Chartierfc5b5282014-01-09 16:15:36 -0800640 if (continuous_space == main_space_) {
641 main_space_ = nullptr;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700642 } else if (continuous_space == bump_pointer_space_) {
643 bump_pointer_space_ = nullptr;
644 } else if (continuous_space == temp_space_) {
645 temp_space_ = nullptr;
Mathieu Chartierfc5b5282014-01-09 16:15:36 -0800646 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800647 } else {
648 DCHECK(space->IsDiscontinuousSpace());
649 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700650 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
651 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800652 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
653 discontinuous_space);
654 DCHECK(it != discontinuous_spaces_.end());
655 discontinuous_spaces_.erase(it);
656 }
657 if (space->IsAllocSpace()) {
658 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
659 DCHECK(it != alloc_spaces_.end());
660 alloc_spaces_.erase(it);
661 }
662}
663
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700664void Heap::RegisterGCAllocation(size_t bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700665 if (this != nullptr) {
Ian Rogersb122a4b2013-11-19 18:00:50 -0800666 gc_memory_overhead_.FetchAndAdd(bytes);
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700667 }
668}
669
670void Heap::RegisterGCDeAllocation(size_t bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700671 if (this != nullptr) {
Ian Rogersb122a4b2013-11-19 18:00:50 -0800672 gc_memory_overhead_.FetchAndSub(bytes);
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700673 }
674}
675
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700676void Heap::DumpGcPerformanceInfo(std::ostream& os) {
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700677 // Dump cumulative timings.
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700678 os << "Dumping cumulative Gc timings\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700679 uint64_t total_duration = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800680 // Dump cumulative loggers for each GC type.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800681 uint64_t total_paused_time = 0;
Mathieu Chartier5a487192014-04-08 11:14:54 -0700682 for (auto& collector : garbage_collectors_) {
Mathieu Chartierafe49982014-03-27 10:55:04 -0700683 const CumulativeLogger& logger = collector->GetCumulativeTimings();
Mathieu Chartierb6898f52014-04-09 11:41:49 -0700684 const size_t iterations = logger.GetIterations();
Hiroshi Yamauchid20aba12014-04-11 15:31:09 -0700685 const Histogram<uint64_t>& pause_histogram = collector->GetPauseHistogram();
686 if (iterations != 0 && pause_histogram.SampleSize() != 0) {
Mathieu Chartierafe49982014-03-27 10:55:04 -0700687 os << ConstDumpable<CumulativeLogger>(logger);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800688 const uint64_t total_ns = logger.GetTotalNs();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700689 const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800690 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
691 const uint64_t freed_bytes = collector->GetTotalFreedBytes();
692 const uint64_t freed_objects = collector->GetTotalFreedObjects();
Mathieu Chartierb2f99362013-11-20 17:26:00 -0800693 Histogram<uint64_t>::CumulativeData cumulative_data;
Hiroshi Yamauchid20aba12014-04-11 15:31:09 -0700694 pause_histogram.CreateHistogram(&cumulative_data);
695 pause_histogram.PrintConfidenceIntervals(os, 0.99, cumulative_data);
Mathieu Chartierb6898f52014-04-09 11:41:49 -0700696 os << collector->GetName() << " total time: " << PrettyDuration(total_ns)
697 << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700698 << collector->GetName() << " freed: " << freed_objects
699 << " objects with total size " << PrettySize(freed_bytes) << "\n"
700 << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
701 << PrettySize(freed_bytes / seconds) << "/s\n";
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800702 total_duration += total_ns;
703 total_paused_time += total_pause_ns;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700704 }
Mathieu Chartier5a487192014-04-08 11:14:54 -0700705 collector->ResetMeasurements();
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700706 }
707 uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700708 if (total_duration != 0) {
Brian Carlstrom2d888622013-07-18 17:02:00 -0700709 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700710 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
711 os << "Mean GC size throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700712 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700713 os << "Mean GC object throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700714 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700715 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800716 size_t total_objects_allocated = GetObjectsAllocatedEver();
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700717 os << "Total number of allocations: " << total_objects_allocated << "\n";
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800718 size_t total_bytes_allocated = GetBytesAllocatedEver();
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700719 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700720 if (kMeasureAllocationTime) {
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700721 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
722 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
723 << "\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700724 }
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700725 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
726 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700727 os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700728}
729
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800730Heap::~Heap() {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700731 VLOG(heap) << "Starting ~Heap()";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700732 STLDeleteElements(&garbage_collectors_);
733 // If we don't reset then the mark stack complains in its destructor.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700734 allocation_stack_->Reset();
735 live_stack_->Reset();
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700736 STLDeleteValues(&mod_union_tables_);
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700737 STLDeleteValues(&remembered_sets_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700738 STLDeleteElements(&continuous_spaces_);
739 STLDeleteElements(&discontinuous_spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700740 delete gc_complete_lock_;
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700741 delete heap_trim_request_lock_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700742 VLOG(heap) << "Finished ~Heap()";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700743}
744
Ian Rogers1d54e732013-05-02 21:10:01 -0700745space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
746 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700747 for (const auto& space : continuous_spaces_) {
748 if (space->Contains(obj)) {
749 return space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700750 }
751 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700752 if (!fail_ok) {
753 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
754 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700755 return NULL;
756}
757
Ian Rogers1d54e732013-05-02 21:10:01 -0700758space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
759 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700760 for (const auto& space : discontinuous_spaces_) {
761 if (space->Contains(obj)) {
762 return space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700763 }
764 }
765 if (!fail_ok) {
766 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
767 }
768 return NULL;
769}
770
771space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
772 space::Space* result = FindContinuousSpaceFromObject(obj, true);
773 if (result != NULL) {
774 return result;
775 }
776 return FindDiscontinuousSpaceFromObject(obj, true);
777}
778
Mathieu Chartier39e32612013-11-12 16:28:05 -0800779struct SoftReferenceArgs {
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800780 IsMarkedCallback* is_marked_callback_;
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800781 MarkObjectCallback* mark_callback_;
Mathieu Chartier39e32612013-11-12 16:28:05 -0800782 void* arg_;
783};
784
785mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800786 SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800787 // TODO: Not preserve all soft references.
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800788 return args->mark_callback_(obj, args->arg_);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800789}
790
Mathieu Chartier1ad27842014-03-19 17:08:17 -0700791void Heap::ProcessSoftReferences(TimingLogger& timings, bool clear_soft,
792 IsMarkedCallback* is_marked_callback,
793 MarkObjectCallback* mark_object_callback,
794 ProcessMarkStackCallback* process_mark_stack_callback, void* arg) {
795 // Unless required to clear soft references with white references, preserve some white referents.
796 if (!clear_soft) {
797 // Don't clear for sticky GC.
798 SoftReferenceArgs soft_reference_args;
799 soft_reference_args.is_marked_callback_ = is_marked_callback;
800 soft_reference_args.mark_callback_ = mark_object_callback;
801 soft_reference_args.arg_ = arg;
802 // References with a marked referent are removed from the list.
803 soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
804 &soft_reference_args);
805 process_mark_stack_callback(arg);
806 }
807}
808
Mathieu Chartier39e32612013-11-12 16:28:05 -0800809// Process reference class instances and schedule finalizations.
810void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800811 IsMarkedCallback* is_marked_callback,
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800812 MarkObjectCallback* mark_object_callback,
813 ProcessMarkStackCallback* process_mark_stack_callback, void* arg) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700814 timings.StartSplit("(Paused)ProcessReferences");
Mathieu Chartier1ad27842014-03-19 17:08:17 -0700815 ProcessSoftReferences(timings, clear_soft, is_marked_callback, mark_object_callback,
816 process_mark_stack_callback, arg);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800817 // Clear all remaining soft and weak references with white referents.
818 soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
819 weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
820 timings.EndSplit();
821 // Preserve all white objects with finalize methods and schedule them for finalization.
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800822 timings.StartSplit("(Paused)EnqueueFinalizerReferences");
Mathieu Chartier39e32612013-11-12 16:28:05 -0800823 finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800824 mark_object_callback, arg);
825 process_mark_stack_callback(arg);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800826 timings.EndSplit();
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800827 timings.StartSplit("(Paused)ProcessReferences");
Mathieu Chartier39e32612013-11-12 16:28:05 -0800828 // Clear all f-reachable soft and weak references with white referents.
829 soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
830 weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
831 // Clear all phantom references with white referents.
832 phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
833 // At this point all reference queues other than the cleared references should be empty.
834 DCHECK(soft_reference_queue_.IsEmpty());
835 DCHECK(weak_reference_queue_.IsEmpty());
836 DCHECK(finalizer_reference_queue_.IsEmpty());
837 DCHECK(phantom_reference_queue_.IsEmpty());
838 timings.EndSplit();
839}
840
Mathieu Chartier39e32612013-11-12 16:28:05 -0800841// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
842// marked, put it on the appropriate list in the heap for later processing.
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -0700843void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800844 IsMarkedCallback is_marked_callback, void* arg) {
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -0700845 // klass can be the class of the old object if the visitor already updated the class of ref.
846 DCHECK(klass->IsReferenceClass());
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -0700847 mirror::Object* referent = ref->GetReferent();
Mathieu Chartier39e32612013-11-12 16:28:05 -0800848 if (referent != nullptr) {
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800849 mirror::Object* forward_address = is_marked_callback(referent, arg);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800850 // Null means that the object is not currently marked.
851 if (forward_address == nullptr) {
852 Thread* self = Thread::Current();
853 // TODO: Remove these locks, and use atomic stacks for storing references?
854 // We need to check that the references haven't already been enqueued since we can end up
855 // scanning the same reference multiple times due to dirty cards.
856 if (klass->IsSoftReferenceClass()) {
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -0700857 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800858 } else if (klass->IsWeakReferenceClass()) {
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -0700859 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800860 } else if (klass->IsFinalizerReferenceClass()) {
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -0700861 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800862 } else if (klass->IsPhantomReferenceClass()) {
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -0700863 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800864 } else {
865 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
866 << klass->GetAccessFlags();
867 }
868 } else if (referent != forward_address) {
869 // Referent is already marked and we need to update it.
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -0700870 ref->SetReferent<false>(forward_address);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800871 }
872 }
873}
874
Ian Rogers1d54e732013-05-02 21:10:01 -0700875space::ImageSpace* Heap::GetImageSpace() const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700876 for (const auto& space : continuous_spaces_) {
877 if (space->IsImageSpace()) {
878 return space->AsImageSpace();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700879 }
880 }
881 return NULL;
882}
883
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700884static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700885 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700886 if (used_bytes < chunk_size) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700887 size_t chunk_free_bytes = chunk_size - used_bytes;
888 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
889 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700890 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700891}
892
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700893void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) {
894 std::ostringstream oss;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800895 size_t total_bytes_free = GetFreeMemory();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700896 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
897 << " free bytes";
898 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
899 if (!large_object_allocation && total_bytes_free >= byte_count) {
900 size_t max_contiguous_allocation = 0;
901 for (const auto& space : continuous_spaces_) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700902 if (space->IsMallocSpace()) {
903 // To allow the Walk/InspectAll() to exclusively-lock the mutator
904 // lock, temporarily release the shared access to the mutator
905 // lock here by transitioning to the suspended state.
906 Locks::mutator_lock_->AssertSharedHeld(self);
907 self->TransitionFromRunnableToSuspended(kSuspended);
908 space->AsMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
909 self->TransitionFromSuspendedToRunnable();
910 Locks::mutator_lock_->AssertSharedHeld(self);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700911 }
912 }
913 oss << "; failed due to fragmentation (largest possible contiguous allocation "
914 << max_contiguous_allocation << " bytes)";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700915 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700916 self->ThrowOutOfMemoryError(oss.str().c_str());
917}
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700918
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800919void Heap::DoPendingTransitionOrTrim() {
920 Thread* self = Thread::Current();
921 CollectorType desired_collector_type;
922 // Wait until we reach the desired transition time.
923 while (true) {
924 uint64_t wait_time;
925 {
926 MutexLock mu(self, *heap_trim_request_lock_);
927 desired_collector_type = desired_collector_type_;
928 uint64_t current_time = NanoTime();
929 if (current_time >= heap_transition_target_time_) {
930 break;
931 }
932 wait_time = heap_transition_target_time_ - current_time;
933 }
934 ScopedThreadStateChange tsc(self, kSleeping);
935 usleep(wait_time / 1000); // Usleep takes microseconds.
936 }
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700937 // Transition the collector if the desired collector type is not the same as the current
938 // collector type.
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800939 TransitionCollector(desired_collector_type);
Mathieu Chartier440e4ce2014-03-31 16:36:35 -0700940 if (!CareAboutPauseTimes()) {
941 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
942 // about pauses.
943 Runtime* runtime = Runtime::Current();
944 runtime->GetThreadList()->SuspendAll();
945 runtime->GetMonitorList()->DeflateMonitors();
946 runtime->GetThreadList()->ResumeAll();
947 // Do a heap trim if it is needed.
948 Trim();
949 }
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800950}
951
Mathieu Chartier590fee92013-09-13 13:46:47 -0700952void Heap::Trim() {
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800953 Thread* self = Thread::Current();
954 {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800955 MutexLock mu(self, *heap_trim_request_lock_);
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700956 if (!heap_trim_request_pending_ || last_trim_time_ + kHeapTrimWait >= NanoTime()) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800957 return;
958 }
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700959 last_trim_time_ = NanoTime();
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800960 heap_trim_request_pending_ = false;
961 }
962 {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -0800963 // Need to do this before acquiring the locks since we don't want to get suspended while
964 // holding any locks.
965 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800966 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
967 // trimming.
968 MutexLock mu(self, *gc_complete_lock_);
969 // Ensure there is only one GC at a time.
970 WaitForGcToCompleteLocked(self);
971 collector_type_running_ = kCollectorTypeHeapTrim;
972 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700973 uint64_t start_ns = NanoTime();
974 // Trim the managed spaces.
975 uint64_t total_alloc_space_allocated = 0;
976 uint64_t total_alloc_space_size = 0;
977 uint64_t managed_reclaimed = 0;
978 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800979 if (space->IsMallocSpace()) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700980 gc::space::MallocSpace* alloc_space = space->AsMallocSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700981 total_alloc_space_size += alloc_space->Size();
982 managed_reclaimed += alloc_space->Trim();
983 }
984 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700985 total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated();
986 if (bump_pointer_space_ != nullptr) {
987 total_alloc_space_allocated -= bump_pointer_space_->Size();
988 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700989 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
990 static_cast<float>(total_alloc_space_size);
991 uint64_t gc_heap_end_ns = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800992 // We never move things in the native heap, so we can finish the GC at this point.
993 FinishGC(self, collector::kGcTypeNone);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700994 // Trim the native heap.
995 dlmalloc_trim(0);
996 size_t native_reclaimed = 0;
997 dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
998 uint64_t end_ns = NanoTime();
999 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1000 << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
1001 << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
1002 << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
1003 << "%.";
1004}
1005
1006bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
1007 // Note: we deliberately don't take the lock here, and mustn't test anything that would require
1008 // taking the lock.
1009 if (obj == nullptr) {
Elliott Hughes88c5c352012-03-15 18:49:48 -07001010 return true;
1011 }
Mathieu Chartier15d34022014-02-26 17:16:38 -08001012 return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001013}
1014
Mathieu Chartierd68ac702014-02-11 14:50:51 -08001015bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
1016 return FindContinuousSpaceFromObject(obj, true) != nullptr;
1017}
1018
Mathieu Chartier15d34022014-02-26 17:16:38 -08001019bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
1020 if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
1021 return false;
1022 }
1023 for (const auto& space : continuous_spaces_) {
1024 if (space->HasAddress(obj)) {
1025 return true;
1026 }
1027 }
1028 return false;
Elliott Hughesa2501992011-08-26 19:39:54 -07001029}
1030
Ian Rogersef7d42f2014-01-06 12:55:46 -08001031bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001032 bool search_live_stack, bool sorted) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001033 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
1034 return false;
1035 }
1036 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001037 mirror::Class* klass = obj->GetClass<kVerifyNone>();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001038 if (obj == klass) {
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -08001039 // This case happens for java.lang.Class.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001040 return true;
1041 }
1042 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1043 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001044 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1045 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1046 return temp_space_->Contains(obj);
Ian Rogers1d54e732013-05-02 21:10:01 -07001047 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001048 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001049 space::DiscontinuousSpace* d_space = nullptr;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001050 if (c_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001051 if (c_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001052 return true;
1053 }
1054 } else {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001055 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001056 if (d_space != nullptr) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001057 if (d_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001058 return true;
1059 }
1060 }
1061 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001062 // This is covering the allocation/live stack swapping that is done without mutators suspended.
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001063 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1064 if (i > 0) {
1065 NanoSleep(MsToNs(10));
Ian Rogers1d54e732013-05-02 21:10:01 -07001066 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001067 if (search_allocation_stack) {
1068 if (sorted) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001069 if (allocation_stack_->ContainsSorted(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001070 return true;
1071 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001072 } else if (allocation_stack_->Contains(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001073 return true;
1074 }
1075 }
1076
1077 if (search_live_stack) {
1078 if (sorted) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001079 if (live_stack_->ContainsSorted(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001080 return true;
1081 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001082 } else if (live_stack_->Contains(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001083 return true;
1084 }
1085 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001086 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001087 // We need to check the bitmaps again since there is a race where we mark something as live and
1088 // then clear the stack containing it.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001089 if (c_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001090 if (c_space->GetLiveBitmap()->Test(obj)) {
1091 return true;
1092 }
1093 } else {
1094 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001095 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001096 return true;
1097 }
1098 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001099 return false;
Elliott Hughes6a5bd492011-10-28 14:33:57 -07001100}
1101
Mathieu Chartier590fee92013-09-13 13:46:47 -07001102void Heap::DumpSpaces(std::ostream& stream) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001103 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001104 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1105 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001106 stream << space << " " << *space << "\n";
1107 if (live_bitmap != nullptr) {
1108 stream << live_bitmap << " " << *live_bitmap << "\n";
1109 }
1110 if (mark_bitmap != nullptr) {
1111 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1112 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001113 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07001114 for (const auto& space : discontinuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001115 stream << space << " " << *space << "\n";
Mathieu Chartier128c52c2012-10-16 14:12:41 -07001116 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001117}
1118
Ian Rogersef7d42f2014-01-06 12:55:46 -08001119void Heap::VerifyObjectBody(mirror::Object* obj) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001120 if (this == nullptr && verify_object_mode_ == kVerifyObjectModeDisabled) {
1121 return;
1122 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001123 // Ignore early dawn of the universe verifications.
Ian Rogersb122a4b2013-11-19 18:00:50 -08001124 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.Load()) < 10 * KB)) {
Ian Rogers62d6c772013-02-27 08:32:07 -08001125 return;
1126 }
Mathieu Chartier4e305412014-02-19 10:54:44 -08001127 CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
1128 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(
1129 mirror::Object::ClassOffset(), false);
1130 CHECK(c != nullptr) << "Null class in object " << obj;
1131 CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001132 CHECK(VerifyClassClass(c));
Mathieu Chartier0325e622012-09-05 14:22:51 -07001133
Mathieu Chartier4e305412014-02-19 10:54:44 -08001134 if (verify_object_mode_ > kVerifyObjectModeFast) {
1135 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
Ian Rogers1d54e732013-05-02 21:10:01 -07001136 if (!IsLiveObjectLocked(obj)) {
1137 DumpSpaces();
1138 LOG(FATAL) << "Object is dead: " << obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001139 }
Mathieu Chartierdcf8d722012-08-02 14:55:54 -07001140 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001141}
1142
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001143void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001144 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001145}
1146
1147void Heap::VerifyHeap() {
Ian Rogers50b35e22012-10-04 10:09:15 -07001148 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001149 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001150}
1151
Mathieu Chartier601276a2014-03-20 15:12:30 -07001152void Heap::RecordFree(ssize_t freed_objects, ssize_t freed_bytes) {
1153 // Use signed comparison since freed bytes can be negative when background compaction foreground
1154 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1155 // free list backed space typically increasing memory footprint due to padding and binning.
1156 DCHECK_LE(freed_bytes, static_cast<ssize_t>(num_bytes_allocated_.Load()));
1157 DCHECK_GE(freed_objects, 0);
Ian Rogersb122a4b2013-11-19 18:00:50 -08001158 num_bytes_allocated_.FetchAndSub(freed_bytes);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001159 if (Runtime::Current()->HasStatsEnabled()) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001160 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001161 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -07001162 thread_stats->freed_bytes += freed_bytes;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001163 // TODO: Do this concurrently.
1164 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1165 global_stats->freed_objects += freed_objects;
1166 global_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001167 }
Carl Shapiro58551df2011-07-24 03:09:51 -07001168}
1169
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001170mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001171 size_t alloc_size, size_t* bytes_allocated,
Ian Rogers6fac4472014-02-25 17:01:10 -08001172 size_t* usable_size,
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001173 mirror::Class** klass) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001174 mirror::Object* ptr = nullptr;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001175 bool was_default_allocator = allocator == GetCurrentAllocator();
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001176 DCHECK(klass != nullptr);
1177 SirtRef<mirror::Class> sirt_klass(self, *klass);
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001178 // The allocation failed. If the GC is running, block until it completes, and then retry the
1179 // allocation.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001180 collector::GcType last_gc = WaitForGcToComplete(self);
Ian Rogers1d54e732013-05-02 21:10:01 -07001181 if (last_gc != collector::kGcTypeNone) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001182 // If we were the default allocator but the allocator changed while we were suspended,
1183 // abort the allocation.
1184 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1185 *klass = sirt_klass.get();
1186 return nullptr;
1187 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001188 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
Ian Rogers6fac4472014-02-25 17:01:10 -08001189 ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001190 }
1191
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001192 collector::GcType tried_type = next_gc_type_;
1193 if (ptr == nullptr) {
1194 const bool gc_ran =
1195 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1196 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1197 *klass = sirt_klass.get();
1198 return nullptr;
1199 }
1200 if (gc_ran) {
1201 ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
1202 }
1203 }
1204
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001205 // Loop through our different Gc types and try to Gc until we get enough free memory.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001206 for (collector::GcType gc_type : gc_plan_) {
1207 if (ptr != nullptr) {
1208 break;
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001209 }
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001210 if (gc_type == tried_type) {
1211 continue;
1212 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001213 // Attempt to run the collector, if we succeed, re-try the allocation.
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001214 const bool gc_ran =
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001215 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1216 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1217 *klass = sirt_klass.get();
1218 return nullptr;
1219 }
1220 if (gc_ran) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001221 // Did we free sufficient memory for the allocation to succeed?
Ian Rogers6fac4472014-02-25 17:01:10 -08001222 ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001223 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001224 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001225 // Allocations have failed after GCs; this is an exceptional state.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001226 if (ptr == nullptr) {
1227 // Try harder, growing the heap if necessary.
Ian Rogers6fac4472014-02-25 17:01:10 -08001228 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001229 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001230 if (ptr == nullptr) {
1231 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1232 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1233 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1234 // OOME.
1235 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1236 << " allocation";
1237 // TODO: Run finalization, but this may cause more allocations to occur.
1238 // We don't need a WaitForGcToComplete here either.
1239 DCHECK(!gc_plan_.empty());
1240 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001241 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1242 *klass = sirt_klass.get();
1243 return nullptr;
1244 }
Ian Rogers6fac4472014-02-25 17:01:10 -08001245 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001246 if (ptr == nullptr) {
1247 ThrowOutOfMemoryError(self, alloc_size, false);
1248 }
1249 }
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001250 *klass = sirt_klass.get();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001251 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001252}
1253
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001254void Heap::SetTargetHeapUtilization(float target) {
1255 DCHECK_GT(target, 0.0f); // asserted in Java code
1256 DCHECK_LT(target, 1.0f);
1257 target_utilization_ = target;
1258}
1259
Ian Rogers1d54e732013-05-02 21:10:01 -07001260size_t Heap::GetObjectsAllocated() const {
1261 size_t total = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001262 for (space::AllocSpace* space : alloc_spaces_) {
1263 total += space->GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001264 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001265 return total;
1266}
1267
Ian Rogers1d54e732013-05-02 21:10:01 -07001268size_t Heap::GetObjectsAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001269 return GetObjectsFreedEver() + GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001270}
1271
1272size_t Heap::GetBytesAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001273 return GetBytesFreedEver() + GetBytesAllocated();
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001274}
1275
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001276class InstanceCounter {
1277 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001278 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001279 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001280 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001281 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001282 static void Callback(mirror::Object* obj, void* arg)
1283 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1284 InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1285 mirror::Class* instance_class = obj->GetClass();
1286 CHECK(instance_class != nullptr);
1287 for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1288 if (instance_counter->use_is_assignable_from_) {
1289 if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) {
1290 ++instance_counter->counts_[i];
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001291 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001292 } else if (instance_class == instance_counter->classes_[i]) {
1293 ++instance_counter->counts_[i];
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001294 }
1295 }
1296 }
1297
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001298 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001299 const std::vector<mirror::Class*>& classes_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001300 bool use_is_assignable_from_;
1301 uint64_t* const counts_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001302 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001303};
1304
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001305void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001306 uint64_t* counts) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001307 // Can't do any GC in this function since this may move classes.
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001308 Thread* self = Thread::Current();
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001309 auto* old_cause = self->StartAssertNoThreadSuspension("CountInstances");
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001310 InstanceCounter counter(classes, use_is_assignable_from, counts);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001311 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1312 VisitObjects(InstanceCounter::Callback, &counter);
1313 self->EndAssertNoThreadSuspension(old_cause);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001314}
1315
Elliott Hughes3b78c942013-01-15 17:35:41 -08001316class InstanceCollector {
1317 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001318 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
Elliott Hughes3b78c942013-01-15 17:35:41 -08001319 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1320 : class_(c), max_count_(max_count), instances_(instances) {
1321 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001322 static void Callback(mirror::Object* obj, void* arg)
1323 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1324 DCHECK(arg != nullptr);
1325 InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
1326 mirror::Class* instance_class = obj->GetClass();
1327 if (instance_class == instance_collector->class_) {
1328 if (instance_collector->max_count_ == 0 ||
1329 instance_collector->instances_.size() < instance_collector->max_count_) {
1330 instance_collector->instances_.push_back(obj);
Elliott Hughes3b78c942013-01-15 17:35:41 -08001331 }
1332 }
1333 }
1334
1335 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001336 mirror::Class* class_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001337 uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001338 std::vector<mirror::Object*>& instances_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001339 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1340};
1341
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001342void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1343 std::vector<mirror::Object*>& instances) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001344 // Can't do any GC in this function since this may move classes.
Elliott Hughes3b78c942013-01-15 17:35:41 -08001345 Thread* self = Thread::Current();
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001346 auto* old_cause = self->StartAssertNoThreadSuspension("GetInstances");
Elliott Hughes3b78c942013-01-15 17:35:41 -08001347 InstanceCollector collector(c, max_count, instances);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001348 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1349 VisitObjects(&InstanceCollector::Callback, &collector);
1350 self->EndAssertNoThreadSuspension(old_cause);
Elliott Hughes3b78c942013-01-15 17:35:41 -08001351}
1352
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001353class ReferringObjectsFinder {
1354 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001355 ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1356 std::vector<mirror::Object*>& referring_objects)
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001357 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1358 : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1359 }
1360
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001361 static void Callback(mirror::Object* obj, void* arg)
1362 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1363 reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1364 }
1365
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001366 // For bitmap Visit.
1367 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1368 // annotalysis on visitors.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001369 void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001370 o->VisitReferences<true>(*this);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001371 }
1372
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -07001373 // For Object::VisitReferences.
Mathieu Chartier407f7022014-02-18 14:37:05 -08001374 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
1375 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1376 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
1377 if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1378 referring_objects_.push_back(obj);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001379 }
1380 }
1381
1382 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001383 mirror::Object* object_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001384 uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001385 std::vector<mirror::Object*>& referring_objects_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001386 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1387};
1388
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001389void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1390 std::vector<mirror::Object*>& referring_objects) {
Mathieu Chartier83c8ee02014-01-28 14:50:23 -08001391 // Can't do any GC in this function since this may move the object o.
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001392 Thread* self = Thread::Current();
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001393 auto* old_cause = self->StartAssertNoThreadSuspension("GetReferringObjects");
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001394 ReferringObjectsFinder finder(o, max_count, referring_objects);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001395 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1396 VisitObjects(&ReferringObjectsFinder::Callback, &finder);
1397 self->EndAssertNoThreadSuspension(old_cause);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001398}
1399
Ian Rogers30fab402012-01-23 15:43:46 -08001400void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001401 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1402 // last GC will not have necessarily been cleared.
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001403 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001404}
1405
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001406void Heap::TransitionCollector(CollectorType collector_type) {
1407 if (collector_type == collector_type_) {
1408 return;
1409 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001410 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1411 << " -> " << static_cast<int>(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001412 uint64_t start_time = NanoTime();
Ian Rogersef7d42f2014-01-06 12:55:46 -08001413 uint32_t before_allocated = num_bytes_allocated_.Load();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001414 ThreadList* tl = Runtime::Current()->GetThreadList();
1415 Thread* self = Thread::Current();
1416 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1417 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001418 const bool copying_transition =
Mathieu Chartier31f44142014-04-08 14:40:03 -07001419 IsMovingGc(background_collector_type_) || IsMovingGc(foreground_collector_type_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001420 // Busy wait until we can GC (StartGC can fail if we have a non-zero
1421 // compacting_gc_disable_count_, this should rarely occurs).
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001422 for (;;) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001423 {
1424 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1425 MutexLock mu(self, *gc_complete_lock_);
1426 // Ensure there is only one GC at a time.
1427 WaitForGcToCompleteLocked(self);
Mathieu Chartierb38d4832014-04-10 10:56:55 -07001428 // If someone else beat us to it and changed the collector before we could, exit.
1429 // This is safe to do before the suspend all since we set the collector_type_running_ before
1430 // we exit the loop. If another thread attempts to do the heap transition before we exit,
1431 // then it would get blocked on WaitForGcToCompleteLocked.
1432 if (collector_type == collector_type_) {
1433 return;
1434 }
Mathieu Chartier3c4a4342014-04-23 14:41:11 -07001435 if (Runtime::Current()->IsShuttingDown(self)) {
1436 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1437 // cause objects to get finalized.
1438 return;
1439 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001440 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1441 if (!copying_transition || disable_moving_gc_count_ == 0) {
1442 // TODO: Not hard code in semi-space collector?
1443 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
1444 break;
1445 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001446 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001447 usleep(1000);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001448 }
1449 tl->SuspendAll();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001450 PreGcRosAllocVerification(&semi_space_collector_->GetTimings());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001451 switch (collector_type) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001452 case kCollectorTypeSS:
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001453 // Fall-through.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001454 case kCollectorTypeGSS: {
Mathieu Chartier31f44142014-04-08 14:40:03 -07001455 if (!IsMovingGc(collector_type_)) {
1456 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
1457 // pointer space last transition it will be protected.
1458 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1459 Compact(bump_pointer_space_, main_space_);
1460 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001461 break;
1462 }
1463 case kCollectorTypeMS:
1464 // Fall through.
1465 case kCollectorTypeCMS: {
Mathieu Chartier31f44142014-04-08 14:40:03 -07001466 if (IsMovingGc(collector_type_)) {
1467 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
1468 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001469 Compact(main_space_, bump_pointer_space_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001470 }
1471 break;
1472 }
1473 default: {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001474 LOG(FATAL) << "Attempted to transition to invalid collector type "
1475 << static_cast<size_t>(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001476 break;
1477 }
1478 }
1479 ChangeCollector(collector_type);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001480 PostGcRosAllocVerification(&semi_space_collector_->GetTimings());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001481 tl->ResumeAll();
1482 // Can't call into java code with all threads suspended.
1483 EnqueueClearedReferences();
1484 uint64_t duration = NanoTime() - start_time;
Mathieu Chartierafe49982014-03-27 10:55:04 -07001485 GrowForUtilization(semi_space_collector_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001486 FinishGC(self, collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001487 int32_t after_allocated = num_bytes_allocated_.Load();
1488 int32_t delta_allocated = before_allocated - after_allocated;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001489 LOG(INFO) << "Heap transition to " << process_state_ << " took "
Mathieu Chartierdcee9ee2014-04-15 12:40:17 -07001490 << PrettyDuration(duration) << " saved at least " << PrettySize(delta_allocated);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001491}
1492
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001493void Heap::ChangeCollector(CollectorType collector_type) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001494 // TODO: Only do this with all mutators suspended to avoid races.
1495 if (collector_type != collector_type_) {
1496 collector_type_ = collector_type;
1497 gc_plan_.clear();
1498 switch (collector_type_) {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001499 case kCollectorTypeCC: // Fall-through.
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001500 case kCollectorTypeSS: // Fall-through.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001501 case kCollectorTypeGSS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001502 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001503 if (use_tlab_) {
1504 ChangeAllocator(kAllocatorTypeTLAB);
1505 } else {
1506 ChangeAllocator(kAllocatorTypeBumpPointer);
1507 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001508 break;
1509 }
1510 case kCollectorTypeMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001511 gc_plan_.push_back(collector::kGcTypeSticky);
1512 gc_plan_.push_back(collector::kGcTypePartial);
1513 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001514 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001515 break;
1516 }
1517 case kCollectorTypeCMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001518 gc_plan_.push_back(collector::kGcTypeSticky);
1519 gc_plan_.push_back(collector::kGcTypePartial);
1520 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001521 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001522 break;
1523 }
1524 default: {
1525 LOG(FATAL) << "Unimplemented";
1526 }
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001527 }
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001528 if (IsGcConcurrent()) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001529 concurrent_start_bytes_ =
1530 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
1531 } else {
1532 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001533 }
1534 }
1535}
1536
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001537// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
Ian Rogers6fac4472014-02-25 17:01:10 -08001538class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001539 public:
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001540 explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, false, "zygote collector"),
Ian Rogers6fac4472014-02-25 17:01:10 -08001541 bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001542 }
1543
1544 void BuildBins(space::ContinuousSpace* space) {
1545 bin_live_bitmap_ = space->GetLiveBitmap();
1546 bin_mark_bitmap_ = space->GetMarkBitmap();
1547 BinContext context;
1548 context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
1549 context.collector_ = this;
1550 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1551 // Note: This requires traversing the space in increasing order of object addresses.
1552 bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
1553 // Add the last bin which spans after the last object to the end of the space.
1554 AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
1555 }
1556
1557 private:
1558 struct BinContext {
1559 uintptr_t prev_; // The end of the previous object.
1560 ZygoteCompactingCollector* collector_;
1561 };
1562 // Maps from bin sizes to locations.
1563 std::multimap<size_t, uintptr_t> bins_;
1564 // Live bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001565 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001566 // Mark bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001567 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001568
1569 static void Callback(mirror::Object* obj, void* arg)
1570 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1571 DCHECK(arg != nullptr);
1572 BinContext* context = reinterpret_cast<BinContext*>(arg);
1573 ZygoteCompactingCollector* collector = context->collector_;
1574 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
1575 size_t bin_size = object_addr - context->prev_;
1576 // Add the bin consisting of the end of the previous object to the start of the current object.
1577 collector->AddBin(bin_size, context->prev_);
1578 context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
1579 }
1580
1581 void AddBin(size_t size, uintptr_t position) {
1582 if (size != 0) {
1583 bins_.insert(std::make_pair(size, position));
1584 }
1585 }
1586
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001587 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001588 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
1589 // allocator.
1590 return false;
1591 }
1592
1593 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
1594 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
1595 size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08001596 mirror::Object* forward_address;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001597 // Find the smallest bin which we can move obj in.
1598 auto it = bins_.lower_bound(object_size);
1599 if (it == bins_.end()) {
1600 // No available space in the bins, place it in the target space instead (grows the zygote
1601 // space).
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08001602 size_t bytes_allocated;
Ian Rogers6fac4472014-02-25 17:01:10 -08001603 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001604 if (to_space_live_bitmap_ != nullptr) {
1605 to_space_live_bitmap_->Set(forward_address);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001606 } else {
1607 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
1608 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001609 }
1610 } else {
1611 size_t size = it->first;
1612 uintptr_t pos = it->second;
1613 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
1614 forward_address = reinterpret_cast<mirror::Object*>(pos);
1615 // Set the live and mark bits so that sweeping system weaks works properly.
1616 bin_live_bitmap_->Set(forward_address);
1617 bin_mark_bitmap_->Set(forward_address);
1618 DCHECK_GE(size, object_size);
1619 AddBin(size - object_size, pos + object_size); // Add a new bin with the remaining space.
1620 }
1621 // Copy the object over to its new location.
1622 memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -07001623 if (kUseBakerOrBrooksReadBarrier) {
1624 obj->AssertReadBarrierPointer();
1625 if (kUseBrooksReadBarrier) {
1626 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
1627 forward_address->SetReadBarrierPointer(forward_address);
1628 }
1629 forward_address->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -08001630 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001631 return forward_address;
1632 }
1633};
1634
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001635void Heap::UnBindBitmaps() {
1636 for (const auto& space : GetContinuousSpaces()) {
1637 if (space->IsContinuousMemMapAllocSpace()) {
1638 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1639 if (alloc_space->HasBoundBitmaps()) {
1640 alloc_space->UnBindBitmaps();
1641 }
1642 }
1643 }
1644}
1645
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001646void Heap::PreZygoteFork() {
Mathieu Chartier1f3b5352014-02-03 14:00:42 -08001647 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001648 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
Ian Rogers81d425b2012-09-27 16:03:43 -07001649 Thread* self = Thread::Current();
1650 MutexLock mu(self, zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001651 // Try to see if we have any Zygote spaces.
1652 if (have_zygote_space_) {
1653 return;
1654 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001655 VLOG(heap) << "Starting PreZygoteFork";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001656 // Trim the pages at the end of the non moving space.
1657 non_moving_space_->Trim();
Mathieu Chartier31f44142014-04-08 14:40:03 -07001658 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
1659 // there.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001660 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001661 // Change the collector to the post zygote one.
Mathieu Chartier31f44142014-04-08 14:40:03 -07001662 if (kCompactZygote) {
1663 DCHECK(semi_space_collector_ != nullptr);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001664 // Temporarily disable rosalloc verification because the zygote
1665 // compaction will mess up the rosalloc internal metadata.
1666 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001667 ZygoteCompactingCollector zygote_collector(this);
1668 zygote_collector.BuildBins(non_moving_space_);
Mathieu Chartier50482232013-11-21 11:48:14 -08001669 // Create a new bump pointer space which we will compact into.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001670 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1671 non_moving_space_->Limit());
1672 // Compact the bump pointer space to a new zygote bump pointer space.
Mathieu Chartier31f44142014-04-08 14:40:03 -07001673 bool reset_main_space = false;
1674 if (IsMovingGc(collector_type_)) {
1675 zygote_collector.SetFromSpace(bump_pointer_space_);
1676 } else {
1677 CHECK(main_space_ != nullptr);
1678 // Copy from the main space.
1679 zygote_collector.SetFromSpace(main_space_);
1680 reset_main_space = true;
1681 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001682 zygote_collector.SetToSpace(&target_space);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001683
1684 Runtime::Current()->GetThreadList()->SuspendAll();
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001685 zygote_collector.Run(kGcCauseCollectorTransition, false);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001686 if (IsMovingGc(collector_type_)) {
1687 SwapSemiSpaces();
1688 }
1689 Runtime::Current()->GetThreadList()->ResumeAll();
1690
1691 if (reset_main_space) {
1692 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1693 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
1694 MemMap* mem_map = main_space_->ReleaseMemMap();
1695 RemoveSpace(main_space_);
1696 delete main_space_;
1697 main_space_ = nullptr;
1698 CreateMainMallocSpace(mem_map, kDefaultInitialSize, mem_map->Size(), mem_map->Size());
1699 AddSpace(main_space_);
1700 } else {
1701 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1702 }
1703 if (temp_space_ != nullptr) {
1704 CHECK(temp_space_->IsEmpty());
1705 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001706 total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
1707 total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
1708 // Update the end and write out image.
1709 non_moving_space_->SetEnd(target_space.End());
1710 non_moving_space_->SetLimit(target_space.Limit());
Mathieu Chartier31f44142014-04-08 14:40:03 -07001711 VLOG(heap) << "Zygote space size " << non_moving_space_->Size() << " bytes";
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001712 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07001713 ChangeCollector(foreground_collector_type_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001714 // Save the old space so that we can remove it after we complete creating the zygote space.
1715 space::MallocSpace* old_alloc_space = non_moving_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001716 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001717 // the remaining available space.
1718 // Remove the old space before creating the zygote space since creating the zygote space sets
1719 // the old alloc space's bitmaps to nullptr.
1720 RemoveSpace(old_alloc_space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001721 if (collector::SemiSpace::kUseRememberedSet) {
1722 // Sanity bound check.
1723 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
1724 // Remove the remembered set for the now zygote space (the old
1725 // non-moving space). Note now that we have compacted objects into
1726 // the zygote space, the data in the remembered set is no longer
1727 // needed. The zygote space will instead have a mod-union table
1728 // from this point on.
1729 RemoveRememberedSet(old_alloc_space);
1730 }
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001731 space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
1732 low_memory_mode_,
Mathieu Chartier31f44142014-04-08 14:40:03 -07001733 &non_moving_space_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001734 delete old_alloc_space;
1735 CHECK(zygote_space != nullptr) << "Failed creating zygote space";
1736 AddSpace(zygote_space, false);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001737 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
1738 AddSpace(non_moving_space_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001739 have_zygote_space_ = true;
Mathieu Chartierbd0a6532014-02-27 11:14:21 -08001740 // Enable large object space allocations.
1741 large_object_threshold_ = kDefaultLargeObjectThreshold;
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001742 // Create the zygote space mod union table.
1743 accounting::ModUnionTable* mod_union_table =
1744 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
1745 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
1746 AddModUnionTable(mod_union_table);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001747 if (collector::SemiSpace::kUseRememberedSet) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001748 // Add a new remembered set for the post-zygote non-moving space.
1749 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
1750 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
1751 non_moving_space_);
1752 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
1753 << "Failed to create post-zygote non-moving space remembered set";
1754 AddRememberedSet(post_zygote_non_moving_space_rem_set);
1755 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001756}
1757
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001758void Heap::FlushAllocStack() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001759 MarkAllocStackAsLive(allocation_stack_.get());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001760 allocation_stack_->Reset();
1761}
1762
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001763void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
1764 accounting::ContinuousSpaceBitmap* bitmap2,
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001765 accounting::LargeObjectBitmap* large_objects,
Ian Rogers1d54e732013-05-02 21:10:01 -07001766 accounting::ObjectStack* stack) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001767 DCHECK(bitmap1 != nullptr);
1768 DCHECK(bitmap2 != nullptr);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001769 mirror::Object** limit = stack->End();
1770 for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
1771 const mirror::Object* obj = *it;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001772 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
1773 if (bitmap1->HasAddress(obj)) {
1774 bitmap1->Set(obj);
1775 } else if (bitmap2->HasAddress(obj)) {
1776 bitmap2->Set(obj);
1777 } else {
1778 large_objects->Set(obj);
1779 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001780 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001781 }
1782}
1783
Mathieu Chartier590fee92013-09-13 13:46:47 -07001784void Heap::SwapSemiSpaces() {
Mathieu Chartier31f44142014-04-08 14:40:03 -07001785 CHECK(bump_pointer_space_ != nullptr);
1786 CHECK(temp_space_ != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001787 std::swap(bump_pointer_space_, temp_space_);
1788}
1789
1790void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
1791 space::ContinuousMemMapAllocSpace* source_space) {
1792 CHECK(kMovingCollector);
Mathieu Chartier50482232013-11-21 11:48:14 -08001793 CHECK_NE(target_space, source_space) << "In-place compaction currently unsupported";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001794 if (target_space != source_space) {
1795 semi_space_collector_->SetFromSpace(source_space);
1796 semi_space_collector_->SetToSpace(target_space);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001797 semi_space_collector_->Run(kGcCauseCollectorTransition, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001798 }
1799}
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001800
Ian Rogers1d54e732013-05-02 21:10:01 -07001801collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
1802 bool clear_soft_references) {
Ian Rogers81d425b2012-09-27 16:03:43 -07001803 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001804 Runtime* runtime = Runtime::Current();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001805 // If the heap can't run the GC, silently fail and return that no GC was run.
1806 switch (gc_type) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001807 case collector::kGcTypePartial: {
1808 if (!have_zygote_space_) {
1809 return collector::kGcTypeNone;
1810 }
1811 break;
1812 }
1813 default: {
1814 // Other GC types don't have any special cases which makes them not runnable. The main case
1815 // here is full GC.
1816 }
1817 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08001818 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Ian Rogers81d425b2012-09-27 16:03:43 -07001819 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers120f1c72012-09-28 17:17:10 -07001820 if (self->IsHandlingStackOverflow()) {
1821 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
1822 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001823 bool compacting_gc;
1824 {
1825 gc_complete_lock_->AssertNotHeld(self);
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001826 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001827 MutexLock mu(self, *gc_complete_lock_);
1828 // Ensure there is only one GC at a time.
1829 WaitForGcToCompleteLocked(self);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001830 compacting_gc = IsMovingGc(collector_type_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001831 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
1832 if (compacting_gc && disable_moving_gc_count_ != 0) {
1833 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
1834 return collector::kGcTypeNone;
1835 }
1836 collector_type_running_ = collector_type_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001837 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001838
Mathieu Chartier590fee92013-09-13 13:46:47 -07001839 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
1840 ++runtime->GetStats()->gc_for_alloc_count;
1841 ++self->GetStats()->gc_for_alloc_count;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001842 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001843 uint64_t gc_start_time_ns = NanoTime();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001844 uint64_t gc_start_size = GetBytesAllocated();
1845 // Approximate allocation rate in bytes / second.
Ian Rogers1d54e732013-05-02 21:10:01 -07001846 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001847 // Back to back GCs can cause 0 ms of wait time in between GC invocations.
1848 if (LIKELY(ms_delta != 0)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001849 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
Mathieu Chartier65db8802012-11-20 12:36:46 -08001850 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
1851 }
1852
Ian Rogers1d54e732013-05-02 21:10:01 -07001853 DCHECK_LT(gc_type, collector::kGcTypeMax);
1854 DCHECK_NE(gc_type, collector::kGcTypeNone);
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001855
Mathieu Chartier590fee92013-09-13 13:46:47 -07001856 collector::GarbageCollector* collector = nullptr;
Mathieu Chartier50482232013-11-21 11:48:14 -08001857 // TODO: Clean this up.
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001858 if (compacting_gc) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001859 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
1860 current_allocator_ == kAllocatorTypeTLAB);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001861 if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) {
1862 gc_type = semi_space_collector_->GetGcType();
1863 semi_space_collector_->SetFromSpace(bump_pointer_space_);
1864 semi_space_collector_->SetToSpace(temp_space_);
1865 collector = semi_space_collector_;
1866 } else if (collector_type_ == kCollectorTypeCC) {
1867 gc_type = concurrent_copying_collector_->GetGcType();
1868 collector = concurrent_copying_collector_;
1869 } else {
1870 LOG(FATAL) << "Unreachable - invalid collector type " << static_cast<size_t>(collector_type_);
1871 }
Mathieu Chartier15d34022014-02-26 17:16:38 -08001872 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001873 CHECK(temp_space_->IsEmpty());
Mathieu Chartier590fee92013-09-13 13:46:47 -07001874 gc_type = collector::kGcTypeFull;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001875 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
1876 current_allocator_ == kAllocatorTypeDlMalloc) {
Mathieu Chartierafe49982014-03-27 10:55:04 -07001877 collector = FindCollectorByGcType(gc_type);
Mathieu Chartier50482232013-11-21 11:48:14 -08001878 } else {
1879 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001880 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001881 CHECK(collector != nullptr)
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001882 << "Could not find garbage collector with collector_type="
1883 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001884 ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
Mathieu Chartier31f44142014-04-08 14:40:03 -07001885 if (compacting_gc) {
1886 runtime->GetThreadList()->SuspendAll();
1887 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
1888 SwapSemiSpaces();
1889 runtime->GetThreadList()->ResumeAll();
1890 } else {
1891 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
1892 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001893 total_objects_freed_ever_ += collector->GetFreedObjects();
1894 total_bytes_freed_ever_ += collector->GetFreedBytes();
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07001895 RequestHeapTrim();
Mathieu Chartier39e32612013-11-12 16:28:05 -08001896 // Enqueue cleared references.
1897 EnqueueClearedReferences();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001898 // Grow the heap so that we know when to perform the next GC.
Mathieu Chartierafe49982014-03-27 10:55:04 -07001899 GrowForUtilization(collector);
Mathieu Chartierca2a24d2013-11-25 15:12:12 -08001900 if (CareAboutPauseTimes()) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001901 const size_t duration = collector->GetDurationNs();
1902 std::vector<uint64_t> pauses = collector->GetPauseTimes();
1903 // GC for alloc pauses the allocating thread, so consider it as a pause.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001904 bool was_slow = duration > long_gc_log_threshold_ ||
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001905 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001906 if (!was_slow) {
1907 for (uint64_t pause : pauses) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001908 was_slow = was_slow || pause > long_pause_log_threshold_;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001909 }
1910 }
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001911 if (was_slow) {
1912 const size_t percent_free = GetPercentFree();
1913 const size_t current_heap_size = GetBytesAllocated();
1914 const size_t total_memory = GetTotalMemory();
1915 std::ostringstream pause_string;
1916 for (size_t i = 0; i < pauses.size(); ++i) {
1917 pause_string << PrettyDuration((pauses[i] / 1000) * 1000)
1918 << ((i != pauses.size() - 1) ? ", " : "");
1919 }
1920 LOG(INFO) << gc_cause << " " << collector->GetName()
1921 << " GC freed " << collector->GetFreedObjects() << "("
1922 << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
1923 << collector->GetFreedLargeObjects() << "("
1924 << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
1925 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
1926 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
1927 << " total " << PrettyDuration((duration / 1000) * 1000);
Mathieu Chartierafe49982014-03-27 10:55:04 -07001928 VLOG(heap) << ConstDumpable<TimingLogger>(collector->GetTimings());
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001929 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001930 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001931 FinishGC(self, gc_type);
Mathieu Chartier752a0e62013-06-27 11:03:27 -07001932 ATRACE_END();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001933
1934 // Inform DDMS that a GC completed.
Ian Rogers15bf2d32012-08-28 17:33:04 -07001935 Dbg::GcDidFinish();
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001936 return gc_type;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001937}
Mathieu Chartiera6399032012-06-11 18:49:50 -07001938
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001939void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
1940 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001941 collector_type_running_ = kCollectorTypeNone;
1942 if (gc_type != collector::kGcTypeNone) {
1943 last_gc_type_ = gc_type;
1944 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001945 // Wake anyone who may have been waiting for the GC to complete.
1946 gc_complete_cond_->Broadcast(self);
1947}
1948
Mathieu Chartier815873e2014-02-13 18:02:13 -08001949static void RootMatchesObjectVisitor(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
1950 RootType /*root_type*/) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001951 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
Mathieu Chartier815873e2014-02-13 18:02:13 -08001952 if (*root == obj) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001953 LOG(INFO) << "Object " << obj << " is a root";
1954 }
1955}
1956
1957class ScanVisitor {
1958 public:
Brian Carlstromdf629502013-07-17 22:39:56 -07001959 void operator()(const mirror::Object* obj) const {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001960 LOG(ERROR) << "Would have rescanned object " << obj;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001961 }
1962};
1963
Ian Rogers1d54e732013-05-02 21:10:01 -07001964// Verify a reference from an object.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001965class VerifyReferenceVisitor {
1966 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001967 explicit VerifyReferenceVisitor(Heap* heap)
Ian Rogers1d54e732013-05-02 21:10:01 -07001968 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001969 : heap_(heap), failed_(false) {}
Ian Rogers1d54e732013-05-02 21:10:01 -07001970
1971 bool Failed() const {
1972 return failed_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001973 }
1974
Mathieu Chartier407f7022014-02-18 14:37:05 -08001975 void operator()(mirror::Class* klass, mirror::Reference* ref) const
1976 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1977 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
1978 }
1979
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -07001980 void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
Mathieu Chartier407f7022014-02-18 14:37:05 -08001981 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1982 this->operator()(obj, obj->GetFieldObject<mirror::Object>(offset, false), offset);
1983 }
1984
1985 // TODO: Fix the no thread safety analysis.
1986 void operator()(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001987 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001988 if (ref == nullptr || IsLive(ref)) {
1989 // Verify that the reference is live.
1990 return;
1991 }
1992 if (!failed_) {
1993 // Print message on only on first failure to prevent spam.
1994 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
1995 failed_ = true;
1996 }
1997 if (obj != nullptr) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001998 accounting::CardTable* card_table = heap_->GetCardTable();
1999 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2000 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002001 byte* card_addr = card_table->CardFromAddr(obj);
2002 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2003 << offset << "\n card value = " << static_cast<int>(*card_addr);
2004 if (heap_->IsValidObjectAddress(obj->GetClass())) {
2005 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
2006 } else {
2007 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002008 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002009
2010 // Attmept to find the class inside of the recently freed objects.
2011 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2012 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2013 space::MallocSpace* space = ref_space->AsMallocSpace();
2014 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2015 if (ref_class != nullptr) {
2016 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2017 << PrettyClass(ref_class);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002018 } else {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002019 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002020 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002021 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002022
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002023 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2024 ref->GetClass()->IsClass()) {
2025 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
2026 } else {
2027 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2028 << ") is not a valid heap address";
2029 }
2030
2031 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
2032 void* cover_begin = card_table->AddrFromCard(card_addr);
2033 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2034 accounting::CardTable::kCardSize);
2035 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2036 << "-" << cover_end;
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002037 accounting::ContinuousSpaceBitmap* bitmap =
2038 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002039
2040 if (bitmap == nullptr) {
2041 LOG(ERROR) << "Object " << obj << " has no bitmap";
Mathieu Chartier4e305412014-02-19 10:54:44 -08002042 if (!VerifyClassClass(obj->GetClass())) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002043 LOG(ERROR) << "Object " << obj << " failed class verification!";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002044 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002045 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07002046 // Print out how the object is live.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002047 if (bitmap->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002048 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2049 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002050 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002051 LOG(ERROR) << "Object " << obj << " found in allocation stack";
2052 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002053 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002054 LOG(ERROR) << "Object " << obj << " found in live stack";
2055 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002056 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2057 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2058 }
2059 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2060 LOG(ERROR) << "Ref " << ref << " found in live stack";
2061 }
Ian Rogers1d54e732013-05-02 21:10:01 -07002062 // Attempt to see if the card table missed the reference.
2063 ScanVisitor scan_visitor;
2064 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
2065 card_table->Scan(bitmap, byte_cover_begin,
Mathieu Chartier184e3222013-08-03 14:02:57 -07002066 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002067 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002068
2069 // Search to see if any of the roots reference our object.
2070 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
Mathieu Chartier893263b2014-03-04 11:07:42 -08002071 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002072
2073 // Search to see if any of the roots reference our reference.
2074 arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
Mathieu Chartier893263b2014-03-04 11:07:42 -08002075 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002076 } else {
2077 LOG(ERROR) << "Root " << ref << " is dead with type " << PrettyTypeOf(ref);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002078 }
2079 }
2080
Ian Rogersef7d42f2014-01-06 12:55:46 -08002081 bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002082 return heap_->IsLiveObjectLocked(obj, true, false, true);
Ian Rogers1d54e732013-05-02 21:10:01 -07002083 }
2084
Mathieu Chartier815873e2014-02-13 18:02:13 -08002085 static void VerifyRoots(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
2086 RootType /*root_type*/) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002087 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
Mathieu Chartier407f7022014-02-18 14:37:05 -08002088 (*visitor)(nullptr, *root, MemberOffset(0));
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002089 }
2090
2091 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002092 Heap* const heap_;
2093 mutable bool failed_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002094};
2095
Ian Rogers1d54e732013-05-02 21:10:01 -07002096// Verify all references within an object, for use with HeapBitmap::Visit.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002097class VerifyObjectVisitor {
2098 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07002099 explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002100
Mathieu Chartier590fee92013-09-13 13:46:47 -07002101 void operator()(mirror::Object* obj) const
Ian Rogersb726dcb2012-09-05 08:57:23 -07002102 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002103 // Note: we are verifying the references in obj but not obj itself, this is because obj must
2104 // be live or else how did we find it in the live bitmap?
2105 VerifyReferenceVisitor visitor(heap_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002106 // The class doesn't count as a reference but we should verify it anyways.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002107 obj->VisitReferences<true>(visitor, visitor);
Ian Rogers1d54e732013-05-02 21:10:01 -07002108 failed_ = failed_ || visitor.Failed();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002109 }
2110
Mathieu Chartier590fee92013-09-13 13:46:47 -07002111 static void VisitCallback(mirror::Object* obj, void* arg)
2112 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2113 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
2114 visitor->operator()(obj);
2115 }
2116
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002117 bool Failed() const {
2118 return failed_;
2119 }
2120
2121 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002122 Heap* const heap_;
2123 mutable bool failed_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002124};
2125
2126// Must do this with mutators suspended since we are directly accessing the allocation stacks.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002127bool Heap::VerifyHeapReferences() {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002128 Thread* self = Thread::Current();
2129 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002130 // Lets sort our allocation stacks so that we can efficiently binary search them.
Ian Rogers1d54e732013-05-02 21:10:01 -07002131 allocation_stack_->Sort();
2132 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002133 // Since we sorted the allocation stack content, need to revoke all
2134 // thread-local allocation stacks.
2135 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002136 VerifyObjectVisitor visitor(this);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002137 // Verify objects in the allocation stack since these will be objects which were:
2138 // 1. Allocated prior to the GC (pre GC verification).
2139 // 2. Allocated during the GC (pre sweep GC verification).
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002140 // We don't want to verify the objects in the live stack since they themselves may be
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002141 // pointing to dead objects if they are not reachable.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002142 VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
2143 // Verify the roots:
Mathieu Chartier893263b2014-03-04 11:07:42 -08002144 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002145 if (visitor.Failed()) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002146 // Dump mod-union tables.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002147 for (const auto& table_pair : mod_union_tables_) {
2148 accounting::ModUnionTable* mod_union_table = table_pair.second;
2149 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
2150 }
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002151 // Dump remembered sets.
2152 for (const auto& table_pair : remembered_sets_) {
2153 accounting::RememberedSet* remembered_set = table_pair.second;
2154 remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
2155 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002156 DumpSpaces();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002157 return false;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002158 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002159 return true;
2160}
2161
2162class VerifyReferenceCardVisitor {
2163 public:
2164 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
2165 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
2166 Locks::heap_bitmap_lock_)
Ian Rogers1d54e732013-05-02 21:10:01 -07002167 : heap_(heap), failed_(failed) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002168 }
2169
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002170 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
2171 // annotalysis on visitors.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002172 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
2173 NO_THREAD_SAFETY_ANALYSIS {
2174 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002175 // Filter out class references since changing an object's class does not mark the card as dirty.
2176 // Also handles large objects, since the only reference they hold is a class reference.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002177 if (ref != nullptr && !ref->IsClass()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002178 accounting::CardTable* card_table = heap_->GetCardTable();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002179 // If the object is not dirty and it is referencing something in the live stack other than
2180 // class, then it must be on a dirty card.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07002181 if (!card_table->AddrIsInCardTable(obj)) {
2182 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
2183 *failed_ = true;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002184 } else if (!card_table->IsDirty(obj)) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002185 // TODO: Check mod-union tables.
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002186 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
2187 // kCardDirty - 1 if it didnt get touched since we aged it.
Ian Rogers1d54e732013-05-02 21:10:01 -07002188 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier407f7022014-02-18 14:37:05 -08002189 if (live_stack->ContainsSorted(ref)) {
2190 if (live_stack->ContainsSorted(obj)) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002191 LOG(ERROR) << "Object " << obj << " found in live stack";
2192 }
2193 if (heap_->GetLiveBitmap()->Test(obj)) {
2194 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2195 }
2196 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
2197 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
2198
2199 // Print which field of the object is dead.
2200 if (!obj->IsObjectArray()) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002201 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002202 CHECK(klass != NULL);
Ian Rogersef7d42f2014-01-06 12:55:46 -08002203 mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
2204 : klass->GetIFields();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002205 CHECK(fields != NULL);
2206 for (int32_t i = 0; i < fields->GetLength(); ++i) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002207 mirror::ArtField* cur = fields->Get(i);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002208 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
2209 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
2210 << PrettyField(cur);
2211 break;
2212 }
2213 }
2214 } else {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002215 mirror::ObjectArray<mirror::Object>* object_array =
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002216 obj->AsObjectArray<mirror::Object>();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002217 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
2218 if (object_array->Get(i) == ref) {
2219 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
2220 }
2221 }
2222 }
2223
2224 *failed_ = true;
2225 }
2226 }
2227 }
2228 }
2229
2230 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002231 Heap* const heap_;
2232 bool* const failed_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002233};
2234
2235class VerifyLiveStackReferences {
2236 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07002237 explicit VerifyLiveStackReferences(Heap* heap)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002238 : heap_(heap),
Brian Carlstrom93ba8932013-07-17 21:31:49 -07002239 failed_(false) {}
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002240
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002241 void operator()(mirror::Object* obj) const
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002242 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2243 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
Mathieu Chartier407f7022014-02-18 14:37:05 -08002244 obj->VisitReferences<true>(visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002245 }
2246
2247 bool Failed() const {
2248 return failed_;
2249 }
2250
2251 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002252 Heap* const heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002253 bool failed_;
2254};
2255
2256bool Heap::VerifyMissingCardMarks() {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002257 Thread* self = Thread::Current();
2258 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002259
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002260 // We need to sort the live stack since we binary search it.
Ian Rogers1d54e732013-05-02 21:10:01 -07002261 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002262 // Since we sorted the allocation stack content, need to revoke all
2263 // thread-local allocation stacks.
2264 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002265 VerifyLiveStackReferences visitor(this);
2266 GetLiveBitmap()->Visit(visitor);
2267
2268 // We can verify objects in the live stack since none of these should reference dead objects.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002269 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002270 if (!kUseThreadLocalAllocationStack || *it != nullptr) {
2271 visitor(*it);
2272 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002273 }
2274
2275 if (visitor.Failed()) {
2276 DumpSpaces();
2277 return false;
2278 }
2279 return true;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002280}
2281
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002282void Heap::SwapStacks(Thread* self) {
2283 if (kUseThreadLocalAllocationStack) {
2284 live_stack_->AssertAllZero();
2285 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -08002286 allocation_stack_.swap(live_stack_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002287}
2288
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002289void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002290 // This must be called only during the pause.
2291 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
2292 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
2293 MutexLock mu2(self, *Locks::thread_list_lock_);
2294 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2295 for (Thread* t : thread_list) {
2296 t->RevokeThreadLocalAllocationStack();
2297 }
2298}
2299
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07002300void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
2301 if (kIsDebugBuild) {
2302 if (bump_pointer_space_ != nullptr) {
2303 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
2304 }
2305 }
2306}
2307
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002308accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
2309 auto it = mod_union_tables_.find(space);
2310 if (it == mod_union_tables_.end()) {
2311 return nullptr;
2312 }
2313 return it->second;
2314}
2315
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002316accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
2317 auto it = remembered_sets_.find(space);
2318 if (it == remembered_sets_.end()) {
2319 return nullptr;
2320 }
2321 return it->second;
2322}
2323
2324void Heap::ProcessCards(TimingLogger& timings, bool use_rem_sets) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002325 // Clear cards and keep track of cards cleared in the mod-union table.
Mathieu Chartier02e25112013-08-14 16:14:24 -07002326 for (const auto& space : continuous_spaces_) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002327 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002328 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002329 if (table != nullptr) {
2330 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
2331 "ImageModUnionClearCards";
Ian Rogers5fe9af72013-11-14 00:17:20 -08002332 TimingLogger::ScopedSplit split(name, &timings);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002333 table->ClearCards();
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002334 } else if (use_rem_sets && rem_set != nullptr) {
2335 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
2336 << static_cast<int>(collector_type_);
2337 TimingLogger::ScopedSplit split("AllocSpaceRemSetClearCards", &timings);
2338 rem_set->ClearCards();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002339 } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
Ian Rogers5fe9af72013-11-14 00:17:20 -08002340 TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08002341 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
2342 // were dirty before the GC started.
Mathieu Chartierbd0a6532014-02-27 11:14:21 -08002343 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
2344 // -> clean(cleaning thread).
Mathieu Chartier590fee92013-09-13 13:46:47 -07002345 // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002346 // roots and then we scan / update mod union tables after. We will always scan either card.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002347 // If we end up with the non aged card, we scan it it in the pause.
Mathieu Chartierd22d5482012-11-06 17:14:12 -08002348 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07002349 }
2350 }
2351}
2352
Mathieu Chartier407f7022014-02-18 14:37:05 -08002353static void IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*, void*) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002354}
2355
Ian Rogers1d54e732013-05-02 21:10:01 -07002356void Heap::PreGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002357 ThreadList* thread_list = Runtime::Current()->GetThreadList();
2358 Thread* self = Thread::Current();
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002359
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002360 if (verify_pre_gc_heap_) {
2361 thread_list->SuspendAll();
2362 {
2363 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2364 if (!VerifyHeapReferences()) {
2365 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed";
2366 }
2367 }
2368 thread_list->ResumeAll();
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002369 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002370
2371 // Check that all objects which reference things in the live stack are on dirty cards.
2372 if (verify_missing_card_marks_) {
2373 thread_list->SuspendAll();
2374 {
2375 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002376 SwapStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002377 // Sort the live stack so that we can quickly binary search it later.
2378 if (!VerifyMissingCardMarks()) {
2379 LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
2380 }
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002381 SwapStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002382 }
2383 thread_list->ResumeAll();
2384 }
2385
2386 if (verify_mod_union_table_) {
2387 thread_list->SuspendAll();
2388 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002389 for (const auto& table_pair : mod_union_tables_) {
2390 accounting::ModUnionTable* mod_union_table = table_pair.second;
Mathieu Chartier407f7022014-02-18 14:37:05 -08002391 mod_union_table->UpdateAndMarkReferences(IdentityMarkHeapReferenceCallback, nullptr);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002392 mod_union_table->Verify();
2393 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002394 thread_list->ResumeAll();
2395 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002396}
2397
Ian Rogers1d54e732013-05-02 21:10:01 -07002398void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002399 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
2400 // reachable objects.
2401 if (verify_post_gc_heap_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002402 Thread* self = Thread::Current();
2403 CHECK_NE(self->GetState(), kRunnable);
Ian Rogers1d54e732013-05-02 21:10:01 -07002404 {
2405 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2406 // Swapping bound bitmaps does nothing.
2407 gc->SwapBitmaps();
Mathieu Chartierb272cd32014-04-11 16:42:46 -07002408 SwapSemiSpaces();
Ian Rogers1d54e732013-05-02 21:10:01 -07002409 if (!VerifyHeapReferences()) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002410 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed";
Ian Rogers1d54e732013-05-02 21:10:01 -07002411 }
Mathieu Chartierb272cd32014-04-11 16:42:46 -07002412 SwapSemiSpaces();
Ian Rogers1d54e732013-05-02 21:10:01 -07002413 gc->SwapBitmaps();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002414 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002415 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002416}
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002417
Ian Rogers1d54e732013-05-02 21:10:01 -07002418void Heap::PostGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002419 if (verify_system_weaks_) {
Anwar Ghuloum67f99412013-08-12 14:19:48 -07002420 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002421 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -07002422 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002423 mark_sweep->VerifySystemWeaks();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002424 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07002425}
2426
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002427void Heap::PreGcRosAllocVerification(TimingLogger* timings) {
2428 if (verify_pre_gc_rosalloc_) {
2429 TimingLogger::ScopedSplit split("PreGcRosAllocVerification", timings);
2430 for (const auto& space : continuous_spaces_) {
2431 if (space->IsRosAllocSpace()) {
2432 VLOG(heap) << "PreGcRosAllocVerification : " << space->GetName();
2433 space::RosAllocSpace* rosalloc_space = space->AsRosAllocSpace();
2434 rosalloc_space->Verify();
2435 }
2436 }
2437 }
2438}
2439
2440void Heap::PostGcRosAllocVerification(TimingLogger* timings) {
2441 if (verify_post_gc_rosalloc_) {
2442 TimingLogger::ScopedSplit split("PostGcRosAllocVerification", timings);
2443 for (const auto& space : continuous_spaces_) {
2444 if (space->IsRosAllocSpace()) {
2445 VLOG(heap) << "PostGcRosAllocVerification : " << space->GetName();
2446 space::RosAllocSpace* rosalloc_space = space->AsRosAllocSpace();
2447 rosalloc_space->Verify();
2448 }
2449 }
2450 }
2451}
2452
Mathieu Chartier590fee92013-09-13 13:46:47 -07002453collector::GcType Heap::WaitForGcToComplete(Thread* self) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002454 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002455 MutexLock mu(self, *gc_complete_lock_);
2456 return WaitForGcToCompleteLocked(self);
2457}
2458
2459collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002460 collector::GcType last_gc_type = collector::kGcTypeNone;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002461 uint64_t wait_start = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002462 while (collector_type_running_ != kCollectorTypeNone) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002463 ATRACE_BEGIN("GC: Wait For Completion");
2464 // We must wait, change thread state then sleep on gc_complete_cond_;
2465 gc_complete_cond_->Wait(self);
2466 last_gc_type = last_gc_type_;
Mathieu Chartier752a0e62013-06-27 11:03:27 -07002467 ATRACE_END();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002468 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002469 uint64_t wait_time = NanoTime() - wait_start;
2470 total_wait_time_ += wait_time;
2471 if (wait_time > long_pause_log_threshold_) {
2472 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time);
2473 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07002474 return last_gc_type;
Carl Shapiro69759ea2011-07-21 18:13:35 -07002475}
2476
Elliott Hughesc967f782012-04-16 10:23:15 -07002477void Heap::DumpForSigQuit(std::ostream& os) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002478 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002479 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -07002480 DumpGcPerformanceInfo(os);
Elliott Hughesc967f782012-04-16 10:23:15 -07002481}
2482
2483size_t Heap::GetPercentFree() {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002484 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory());
Elliott Hughesc967f782012-04-16 10:23:15 -07002485}
2486
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08002487void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002488 if (max_allowed_footprint > GetMaxMemory()) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002489 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002490 << PrettySize(GetMaxMemory());
2491 max_allowed_footprint = GetMaxMemory();
2492 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07002493 max_allowed_footprint_ = max_allowed_footprint;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07002494}
2495
Mathieu Chartier590fee92013-09-13 13:46:47 -07002496bool Heap::IsMovableObject(const mirror::Object* obj) const {
2497 if (kMovingCollector) {
Mathieu Chartier31f44142014-04-08 14:40:03 -07002498 space::Space* space = FindContinuousSpaceFromObject(obj, true);
2499 if (space != nullptr) {
2500 // TODO: Check large object?
2501 return space->CanMoveObjects();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002502 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002503 }
2504 return false;
2505}
2506
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002507void Heap::UpdateMaxNativeFootprint() {
2508 size_t native_size = native_bytes_allocated_;
2509 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
2510 size_t target_size = native_size / GetTargetHeapUtilization();
2511 if (target_size > native_size + max_free_) {
2512 target_size = native_size + max_free_;
2513 } else if (target_size < native_size + min_free_) {
2514 target_size = native_size + min_free_;
2515 }
2516 native_footprint_gc_watermark_ = target_size;
2517 native_footprint_limit_ = 2 * target_size - native_size;
2518}
2519
Mathieu Chartierafe49982014-03-27 10:55:04 -07002520collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
2521 for (const auto& collector : garbage_collectors_) {
2522 if (collector->GetCollectorType() == collector_type_ &&
2523 collector->GetGcType() == gc_type) {
2524 return collector;
2525 }
2526 }
2527 return nullptr;
2528}
2529
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002530double Heap::HeapGrowthMultiplier() const {
2531 // If we don't care about pause times we are background, so return 1.0.
2532 if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
2533 return 1.0;
2534 }
2535 return foreground_heap_growth_multiplier_;
2536}
2537
Mathieu Chartierafe49982014-03-27 10:55:04 -07002538void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002539 // We know what our utilization is at this moment.
2540 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002541 const uint64_t bytes_allocated = GetBytesAllocated();
Mathieu Chartier65db8802012-11-20 12:36:46 -08002542 last_gc_size_ = bytes_allocated;
Ian Rogers1d54e732013-05-02 21:10:01 -07002543 last_gc_time_ns_ = NanoTime();
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002544 uint64_t target_size;
Mathieu Chartierafe49982014-03-27 10:55:04 -07002545 collector::GcType gc_type = collector_ran->GetGcType();
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002546 if (gc_type != collector::kGcTypeSticky) {
2547 // Grow the heap for non sticky GC.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002548 const float multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
2549 // foreground.
2550 intptr_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
2551 CHECK_GE(delta, 0);
2552 target_size = bytes_allocated + delta * multiplier;
2553 target_size = std::min(target_size,
2554 bytes_allocated + static_cast<uint64_t>(max_free_ * multiplier));
2555 target_size = std::max(target_size,
2556 bytes_allocated + static_cast<uint64_t>(min_free_ * multiplier));
Mathieu Chartier590fee92013-09-13 13:46:47 -07002557 native_need_to_run_finalization_ = true;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002558 next_gc_type_ = collector::kGcTypeSticky;
2559 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002560 collector::GcType non_sticky_gc_type =
2561 have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
2562 // Find what the next non sticky collector will be.
2563 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
2564 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
2565 // do another sticky collection next.
2566 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
2567 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
2568 // if the sticky GC throughput always remained >= the full/partial throughput.
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -07002569 if (collector_ran->GetEstimatedLastIterationThroughput() * kStickyGcThroughputAdjustment >=
Mathieu Chartierafe49982014-03-27 10:55:04 -07002570 non_sticky_collector->GetEstimatedMeanThroughput() &&
2571 non_sticky_collector->GetIterations() > 0 &&
2572 bytes_allocated <= max_allowed_footprint_) {
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002573 next_gc_type_ = collector::kGcTypeSticky;
2574 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002575 next_gc_type_ = non_sticky_gc_type;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002576 }
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002577 // If we have freed enough memory, shrink the heap back down.
2578 if (bytes_allocated + max_free_ < max_allowed_footprint_) {
2579 target_size = bytes_allocated + max_free_;
2580 } else {
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002581 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002582 }
2583 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002584 if (!ignore_max_footprint_) {
2585 SetIdealFootprint(target_size);
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002586 if (IsGcConcurrent()) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002587 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002588 // Calculate the estimated GC duration.
Mathieu Chartierafe49982014-03-27 10:55:04 -07002589 const double gc_duration_seconds = NsToMs(collector_ran->GetDurationNs()) / 1000.0;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002590 // Estimate how many remaining bytes we will have when we need to start the next GC.
2591 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
Mathieu Chartier74762802014-01-24 10:21:35 -08002592 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002593 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
2594 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
2595 // A never going to happen situation that from the estimated allocation rate we will exceed
2596 // the applications entire footprint with the given estimated allocation rate. Schedule
Mathieu Chartier74762802014-01-24 10:21:35 -08002597 // another GC nearly straight away.
2598 remaining_bytes = kMinConcurrentRemainingBytes;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002599 }
Mathieu Chartier74762802014-01-24 10:21:35 -08002600 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002601 DCHECK_LE(max_allowed_footprint_, growth_limit_);
Mathieu Chartier74762802014-01-24 10:21:35 -08002602 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
2603 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
2604 // right away.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002605 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
2606 static_cast<size_t>(bytes_allocated));
Mathieu Chartier65db8802012-11-20 12:36:46 -08002607 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002608 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07002609}
2610
jeffhaoc1160702011-10-27 15:48:45 -07002611void Heap::ClearGrowthLimit() {
Mathieu Chartier80de7a62012-11-27 17:21:50 -08002612 growth_limit_ = capacity_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002613 non_moving_space_->ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -07002614}
2615
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002616void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002617 ScopedObjectAccess soa(self);
Ian Rogers53b8b092014-03-13 23:45:53 -07002618 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(object));
2619 jvalue args[1];
2620 args[0].l = arg.get();
2621 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002622}
2623
Mathieu Chartier39e32612013-11-12 16:28:05 -08002624void Heap::EnqueueClearedReferences() {
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002625 Thread* self = Thread::Current();
2626 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartier39e32612013-11-12 16:28:05 -08002627 if (!cleared_references_.IsEmpty()) {
Ian Rogers64b6d142012-10-29 16:34:15 -07002628 // When a runtime isn't started there are no reference queues to care about so ignore.
2629 if (LIKELY(Runtime::Current()->IsStarted())) {
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002630 ScopedObjectAccess soa(self);
Ian Rogers53b8b092014-03-13 23:45:53 -07002631 ScopedLocalRef<jobject> arg(self->GetJniEnv(),
2632 soa.AddLocalReference<jobject>(cleared_references_.GetList()));
2633 jvalue args[1];
2634 args[0].l = arg.get();
2635 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
Ian Rogers64b6d142012-10-29 16:34:15 -07002636 }
Mathieu Chartier39e32612013-11-12 16:28:05 -08002637 cleared_references_.Clear();
Elliott Hughesadb460d2011-10-05 17:02:34 -07002638 }
2639}
2640
Ian Rogers1f539342012-10-03 21:09:42 -07002641void Heap::RequestConcurrentGC(Thread* self) {
Mathieu Chartier069387a2012-06-18 12:01:01 -07002642 // Make sure that we can do a concurrent GC.
Ian Rogers120f1c72012-09-28 17:17:10 -07002643 Runtime* runtime = Runtime::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002644 if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
2645 self->IsHandlingStackOverflow()) {
Ian Rogers120f1c72012-09-28 17:17:10 -07002646 return;
2647 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002648 // We already have a request pending, no reason to start more until we update
2649 // concurrent_start_bytes_.
2650 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Ian Rogers120f1c72012-09-28 17:17:10 -07002651 JNIEnv* env = self->GetJniEnv();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002652 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2653 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002654 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2655 WellKnownClasses::java_lang_Daemons_requestGC);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002656 CHECK(!env->ExceptionCheck());
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002657}
2658
Ian Rogers81d425b2012-09-27 16:03:43 -07002659void Heap::ConcurrentGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002660 if (Runtime::Current()->IsShuttingDown(self)) {
2661 return;
Mathieu Chartier2542d662012-06-21 17:14:11 -07002662 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002663 // Wait for any GCs currently running to finish.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002664 if (WaitForGcToComplete(self) == collector::kGcTypeNone) {
Mathieu Chartierf9ed0d32013-11-21 16:42:47 -08002665 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
2666 // instead. E.g. can't do partial, so do full instead.
2667 if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
2668 collector::kGcTypeNone) {
2669 for (collector::GcType gc_type : gc_plan_) {
2670 // Attempt to run the collector, if we succeed, we are done.
2671 if (gc_type > next_gc_type_ &&
2672 CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
2673 break;
2674 }
2675 }
2676 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002677 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002678}
2679
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07002680void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08002681 Thread* self = Thread::Current();
2682 {
2683 MutexLock mu(self, *heap_trim_request_lock_);
2684 if (desired_collector_type_ == desired_collector_type) {
2685 return;
2686 }
2687 heap_transition_target_time_ = std::max(heap_transition_target_time_, NanoTime() + delta_time);
2688 desired_collector_type_ = desired_collector_type;
2689 }
2690 SignalHeapTrimDaemon(self);
2691}
2692
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07002693void Heap::RequestHeapTrim() {
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07002694 // Request a heap trim only if we do not currently care about pause times.
2695 if (CareAboutPauseTimes()) {
2696 return;
2697 }
Ian Rogers48931882013-01-22 14:35:16 -08002698 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
2699 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
2700 // a space it will hold its lock and can become a cause of jank.
2701 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
2702 // forking.
2703
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002704 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
2705 // because that only marks object heads, so a large array looks like lots of empty space. We
2706 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
2707 // to utilization (which is probably inversely proportional to how much benefit we can expect).
2708 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
2709 // not how much use we're making of those pages.
Ian Rogers120f1c72012-09-28 17:17:10 -07002710
2711 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002712 Runtime* runtime = Runtime::Current();
2713 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) {
2714 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
2715 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
2716 // as we don't hold the lock while requesting the trim).
2717 return;
Ian Rogerse1d490c2012-02-03 09:09:07 -08002718 }
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07002719 {
2720 MutexLock mu(self, *heap_trim_request_lock_);
2721 if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
2722 // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
2723 // just yet.
2724 return;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08002725 }
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07002726 heap_trim_request_pending_ = true;
Mathieu Chartierc39e3422013-08-07 16:41:36 -07002727 }
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07002728 // Notify the daemon thread which will actually do the heap trim.
2729 SignalHeapTrimDaemon(self);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002730}
2731
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08002732void Heap::SignalHeapTrimDaemon(Thread* self) {
2733 JNIEnv* env = self->GetJniEnv();
2734 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2735 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != nullptr);
2736 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2737 WellKnownClasses::java_lang_Daemons_requestHeapTrim);
2738 CHECK(!env->ExceptionCheck());
2739}
2740
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002741void Heap::RevokeThreadLocalBuffers(Thread* thread) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002742 if (rosalloc_space_ != nullptr) {
2743 rosalloc_space_->RevokeThreadLocalBuffers(thread);
2744 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002745 if (bump_pointer_space_ != nullptr) {
2746 bump_pointer_space_->RevokeThreadLocalBuffers(thread);
2747 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002748}
2749
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07002750void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
2751 if (rosalloc_space_ != nullptr) {
2752 rosalloc_space_->RevokeThreadLocalBuffers(thread);
2753 }
2754}
2755
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002756void Heap::RevokeAllThreadLocalBuffers() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002757 if (rosalloc_space_ != nullptr) {
2758 rosalloc_space_->RevokeAllThreadLocalBuffers();
2759 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002760 if (bump_pointer_space_ != nullptr) {
2761 bump_pointer_space_->RevokeAllThreadLocalBuffers();
2762 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002763}
2764
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002765bool Heap::IsGCRequestPending() const {
2766 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
2767}
2768
Mathieu Chartier590fee92013-09-13 13:46:47 -07002769void Heap::RunFinalization(JNIEnv* env) {
2770 // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
2771 if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
2772 CHECK(WellKnownClasses::java_lang_System != nullptr);
2773 WellKnownClasses::java_lang_System_runFinalization =
2774 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
2775 CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
2776 }
2777 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
2778 WellKnownClasses::java_lang_System_runFinalization);
2779}
2780
Ian Rogers1eb512d2013-10-18 15:42:20 -07002781void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002782 Thread* self = ThreadForEnv(env);
2783 if (native_need_to_run_finalization_) {
2784 RunFinalization(env);
2785 UpdateMaxNativeFootprint();
2786 native_need_to_run_finalization_ = false;
2787 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002788 // Total number of native bytes allocated.
Ian Rogersb122a4b2013-11-19 18:00:50 -08002789 native_bytes_allocated_.FetchAndAdd(bytes);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002790 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002791 collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
2792 collector::kGcTypeFull;
2793
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002794 // The second watermark is higher than the gc watermark. If you hit this it means you are
2795 // allocating native objects faster than the GC can keep up with.
2796 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002797 if (WaitForGcToComplete(self) != collector::kGcTypeNone) {
2798 // Just finished a GC, attempt to run finalizers.
2799 RunFinalization(env);
2800 CHECK(!env->ExceptionCheck());
2801 }
2802 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
2803 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08002804 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002805 RunFinalization(env);
2806 native_need_to_run_finalization_ = false;
2807 CHECK(!env->ExceptionCheck());
2808 }
2809 // We have just run finalizers, update the native watermark since it is very likely that
2810 // finalizers released native managed allocations.
2811 UpdateMaxNativeFootprint();
2812 } else if (!IsGCRequestPending()) {
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002813 if (IsGcConcurrent()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002814 RequestConcurrentGC(self);
2815 } else {
Hiroshi Yamauchid20aba12014-04-11 15:31:09 -07002816 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002817 }
2818 }
2819 }
2820}
2821
Ian Rogers1eb512d2013-10-18 15:42:20 -07002822void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002823 int expected_size, new_size;
2824 do {
Ian Rogersb122a4b2013-11-19 18:00:50 -08002825 expected_size = native_bytes_allocated_.Load();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002826 new_size = expected_size - bytes;
2827 if (UNLIKELY(new_size < 0)) {
2828 ScopedObjectAccess soa(env);
2829 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
2830 StringPrintf("Attempted to free %d native bytes with only %d native bytes "
2831 "registered as allocated", bytes, expected_size).c_str());
2832 break;
2833 }
Ian Rogersb122a4b2013-11-19 18:00:50 -08002834 } while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size));
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002835}
2836
Ian Rogersef7d42f2014-01-06 12:55:46 -08002837size_t Heap::GetTotalMemory() const {
2838 size_t ret = 0;
Mathieu Chartier02e25112013-08-14 16:14:24 -07002839 for (const auto& space : continuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002840 // Currently don't include the image space.
2841 if (!space->IsImageSpace()) {
2842 ret += space->Size();
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002843 }
2844 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07002845 for (const auto& space : discontinuous_spaces_) {
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002846 if (space->IsLargeObjectSpace()) {
2847 ret += space->AsLargeObjectSpace()->GetBytesAllocated();
2848 }
2849 }
2850 return ret;
2851}
2852
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002853void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
2854 DCHECK(mod_union_table != nullptr);
2855 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
2856}
2857
Mathieu Chartierc645f1d2014-03-06 18:11:53 -08002858void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
2859 CHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
2860 (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
2861 strlen(ClassHelper(c).GetDescriptor()) == 0);
2862 CHECK_GE(byte_count, sizeof(mirror::Object));
2863}
2864
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002865void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
2866 CHECK(remembered_set != nullptr);
2867 space::Space* space = remembered_set->GetSpace();
2868 CHECK(space != nullptr);
2869 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
2870 remembered_sets_.Put(space, remembered_set);
2871 CHECK(remembered_sets_.find(space) != remembered_sets_.end());
2872}
2873
2874void Heap::RemoveRememberedSet(space::Space* space) {
2875 CHECK(space != nullptr);
2876 auto it = remembered_sets_.find(space);
2877 CHECK(it != remembered_sets_.end());
2878 remembered_sets_.erase(it);
2879 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
2880}
2881
Mathieu Chartier4aeec172014-03-27 16:09:46 -07002882void Heap::ClearMarkedObjects() {
2883 // Clear all of the spaces' mark bitmaps.
2884 for (const auto& space : GetContinuousSpaces()) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002885 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07002886 if (space->GetLiveBitmap() != mark_bitmap) {
2887 mark_bitmap->Clear();
2888 }
2889 }
2890 // Clear the marked objects in the discontinous space object sets.
2891 for (const auto& space : GetDiscontinuousSpaces()) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07002892 space->GetMarkBitmap()->Clear();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07002893 }
2894}
2895
Ian Rogers1d54e732013-05-02 21:10:01 -07002896} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07002897} // namespace art