blob: 6730dfe309c651520c65aa24fea0242707697574 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Mathieu Chartier752a0e62013-06-27 11:03:27 -070019#define ATRACE_TAG ATRACE_TAG_DALVIK
20#include <cutils/trace.h>
Brian Carlstrom5643b782012-02-05 12:32:53 -080021
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Ian Rogers700a4022014-05-19 16:49:03 -070023#include <memory>
Carl Shapiro58551df2011-07-24 03:09:51 -070024#include <vector>
25
Mathieu Chartierbad02672014-08-25 13:08:22 -070026#include "base/allocator.h"
Ian Rogersc7dd2952014-10-21 23:31:19 -070027#include "base/dumpable.h"
Mathieu Chartierb2f99362013-11-20 17:26:00 -080028#include "base/histogram-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080029#include "base/stl_util.h"
Mathieu Chartier987ccff2013-07-08 11:05:21 -070030#include "common_throws.h"
Ian Rogers48931882013-01-22 14:35:16 -080031#include "cutils/sched_policy.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070032#include "debugger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070033#include "gc/accounting/atomic_stack.h"
34#include "gc/accounting/card_table-inl.h"
35#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070036#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070037#include "gc/accounting/mod_union_table-inl.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080038#include "gc/accounting/remembered_set.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070039#include "gc/accounting/space_bitmap-inl.h"
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070040#include "gc/collector/concurrent_copying.h"
Mathieu Chartier52e4b432014-06-10 11:22:31 -070041#include "gc/collector/mark_compact.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070042#include "gc/collector/mark_sweep-inl.h"
43#include "gc/collector/partial_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070044#include "gc/collector/semi_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070045#include "gc/collector/sticky_mark_sweep.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070046#include "gc/reference_processor.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070047#include "gc/space/bump_pointer_space.h"
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070048#include "gc/space/dlmalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070049#include "gc/space/image_space.h"
50#include "gc/space/large_object_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070051#include "gc/space/rosalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070052#include "gc/space/space-inl.h"
Mathieu Chartiera1602f22014-01-13 17:19:19 -080053#include "gc/space/zygote_space.h"
Mathieu Chartierd8891782014-03-02 13:28:37 -080054#include "entrypoints/quick/quick_alloc_entrypoints.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070055#include "heap-inl.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070056#include "image.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070057#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080058#include "mirror/class-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080059#include "mirror/object.h"
60#include "mirror/object-inl.h"
61#include "mirror/object_array-inl.h"
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070062#include "mirror/reference-inl.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080063#include "os.h"
Ian Rogers53b8b092014-03-13 23:45:53 -070064#include "reflection.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080065#include "runtime.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070066#include "ScopedLocalRef.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070067#include "scoped_thread_state_change.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070068#include "handle_scope-inl.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070069#include "thread_list.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070070#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070071
72namespace art {
Mathieu Chartier50482232013-11-21 11:48:14 -080073
Ian Rogers1d54e732013-05-02 21:10:01 -070074namespace gc {
Carl Shapiro69759ea2011-07-21 18:13:35 -070075
Mathieu Chartier91e30632014-03-25 15:58:50 -070076static constexpr size_t kCollectorTransitionStressIterations = 0;
77static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
Mathieu Chartier720ef762013-08-17 14:46:54 -070078static constexpr bool kGCALotMode = false;
79static constexpr size_t kGcAlotInterval = KB;
Ian Rogers1d54e732013-05-02 21:10:01 -070080// Minimum amount of remaining bytes before a concurrent GC is triggered.
Mathieu Chartier720ef762013-08-17 14:46:54 -070081static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
Mathieu Chartier74762802014-01-24 10:21:35 -080082static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -070083// Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
Mathieu Chartier73d1e172014-04-11 17:53:48 -070084// relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -070085// threads (lower pauses, use less memory bandwidth).
Mathieu Chartier73d1e172014-04-11 17:53:48 -070086static constexpr double kStickyGcThroughputAdjustment = 1.0;
Mathieu Chartierc1790162014-05-23 10:54:50 -070087// Whether or not we compact the zygote in PreZygoteFork.
Mathieu Chartier31f44142014-04-08 14:40:03 -070088static constexpr bool kCompactZygote = kMovingCollector;
Mathieu Chartierc1790162014-05-23 10:54:50 -070089// How many reserve entries are at the end of the allocation stack, these are only needed if the
90// allocation stack overflows.
91static constexpr size_t kAllocationStackReserveSize = 1024;
92// Default mark stack size in bytes.
93static const size_t kDefaultMarkStackSize = 64 * KB;
Zuo Wangf37a88b2014-07-10 04:26:41 -070094// Define space name.
95static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
96static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
97static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
Mathieu Chartierb363f662014-07-16 13:28:58 -070098static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
Mathieu Chartier0051be62012-10-12 17:47:11 -070099
Mathieu Chartier0051be62012-10-12 17:47:11 -0700100Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700101 double target_utilization, double foreground_heap_growth_multiplier,
102 size_t capacity, size_t non_moving_space_capacity, const std::string& image_file_name,
103 const InstructionSet image_instruction_set, CollectorType foreground_collector_type,
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700104 CollectorType background_collector_type,
105 space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold,
106 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800107 size_t long_pause_log_threshold, size_t long_gc_log_threshold,
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700108 bool ignore_max_footprint, bool use_tlab,
109 bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
110 bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
Zuo Wangf37a88b2014-07-10 04:26:41 -0700111 bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction_for_oom,
112 uint64_t min_interval_homogeneous_space_compaction_by_oom)
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800113 : non_moving_space_(nullptr),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800114 rosalloc_space_(nullptr),
115 dlmalloc_space_(nullptr),
Mathieu Chartierfc5b5282014-01-09 16:15:36 -0800116 main_space_(nullptr),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800117 collector_type_(kCollectorTypeNone),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700118 foreground_collector_type_(foreground_collector_type),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800119 background_collector_type_(background_collector_type),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700120 desired_collector_type_(foreground_collector_type_),
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800121 heap_trim_request_lock_(nullptr),
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700122 last_trim_time_(0),
Mathieu Chartierb2728552014-09-08 20:08:41 +0000123 heap_transition_or_trim_target_time_(0),
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800124 heap_trim_request_pending_(false),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700125 parallel_gc_threads_(parallel_gc_threads),
126 conc_gc_threads_(conc_gc_threads),
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700127 low_memory_mode_(low_memory_mode),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700128 long_pause_log_threshold_(long_pause_log_threshold),
129 long_gc_log_threshold_(long_gc_log_threshold),
130 ignore_max_footprint_(ignore_max_footprint),
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -0700131 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700132 zygote_space_(nullptr),
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700133 large_object_threshold_(large_object_threshold),
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800134 collector_type_running_(kCollectorTypeNone),
Ian Rogers1d54e732013-05-02 21:10:01 -0700135 last_gc_type_(collector::kGcTypeNone),
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -0700136 next_gc_type_(collector::kGcTypePartial),
Mathieu Chartier80de7a62012-11-27 17:21:50 -0800137 capacity_(capacity),
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700138 growth_limit_(growth_limit),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700139 max_allowed_footprint_(initial_size),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700140 native_footprint_gc_watermark_(initial_size),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700141 native_need_to_run_finalization_(false),
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800142 // Initially assume we perceive jank in case the process state is never updated.
143 process_state_(kProcessStateJankPerceptible),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800144 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Ian Rogers1d54e732013-05-02 21:10:01 -0700145 total_bytes_freed_ever_(0),
146 total_objects_freed_ever_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800147 num_bytes_allocated_(0),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700148 native_bytes_allocated_(0),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700149 verify_missing_card_marks_(false),
150 verify_system_weaks_(false),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800151 verify_pre_gc_heap_(verify_pre_gc_heap),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700152 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800153 verify_post_gc_heap_(verify_post_gc_heap),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700154 verify_mod_union_table_(false),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800155 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700156 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800157 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
Mathieu Chartieraff59a82014-06-06 17:51:16 -0700158 last_gc_time_ns_(NanoTime()),
Mathieu Chartier65db8802012-11-20 12:36:46 -0800159 allocation_rate_(0),
Mathieu Chartier0418ae22013-07-31 13:35:46 -0700160 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
161 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
162 * verification is enabled, we limit the size of allocation stacks to speed up their
163 * searching.
164 */
165 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
Mathieu Chartier4e305412014-02-19 10:54:44 -0800166 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? KB : MB),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800167 current_allocator_(kAllocatorTypeDlMalloc),
168 current_non_moving_allocator_(kAllocatorTypeNonMoving),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700169 bump_pointer_space_(nullptr),
170 temp_space_(nullptr),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700171 min_free_(min_free),
172 max_free_(max_free),
173 target_utilization_(target_utilization),
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700174 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700175 total_wait_time_(0),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700176 total_allocation_time_(0),
Mathieu Chartier4e305412014-02-19 10:54:44 -0800177 verify_object_mode_(kVerifyObjectModeDisabled),
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800178 disable_moving_gc_count_(0),
Mathieu Chartierda44d772014-04-01 15:01:46 -0700179 running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
Zuo Wangf37a88b2014-07-10 04:26:41 -0700180 use_tlab_(use_tlab),
181 main_space_backup_(nullptr),
Mathieu Chartierb363f662014-07-16 13:28:58 -0700182 min_interval_homogeneous_space_compaction_by_oom_(
183 min_interval_homogeneous_space_compaction_by_oom),
Zuo Wangf37a88b2014-07-10 04:26:41 -0700184 last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
185 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800186 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800187 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700188 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800189 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
190 // entrypoints.
Mathieu Chartier8e219ae2014-08-19 14:29:46 -0700191 const bool is_zygote = Runtime::Current()->IsZygote();
192 if (!is_zygote) {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700193 // Background compaction is currently not supported for command line runs.
194 if (background_collector_type_ != foreground_collector_type_) {
Mathieu Chartier52ba1992014-05-07 14:39:21 -0700195 VLOG(heap) << "Disabling background compaction for non zygote";
Mathieu Chartier31f44142014-04-08 14:40:03 -0700196 background_collector_type_ = foreground_collector_type_;
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800197 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800198 }
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800199 ChangeCollector(desired_collector_type_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700200 live_bitmap_.reset(new accounting::HeapBitmap(this));
201 mark_bitmap_.reset(new accounting::HeapBitmap(this));
Ian Rogers30fab402012-01-23 15:43:46 -0800202 // Requested begin for the alloc space, to follow the mapped image and oat files
Ian Rogers13735952014-10-08 12:43:28 -0700203 uint8_t* requested_alloc_space_begin = nullptr;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800204 if (!image_file_name.empty()) {
Alex Light64ad14d2014-08-19 14:23:13 -0700205 std::string error_msg;
Narayan Kamath11d9f062014-04-23 20:24:57 +0100206 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str(),
Alex Light64ad14d2014-08-19 14:23:13 -0700207 image_instruction_set,
208 &error_msg);
209 if (image_space != nullptr) {
210 AddSpace(image_space);
211 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
212 // isn't going to get in the middle
Ian Rogers13735952014-10-08 12:43:28 -0700213 uint8_t* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
Alex Light64ad14d2014-08-19 14:23:13 -0700214 CHECK_GT(oat_file_end_addr, image_space->End());
215 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
216 } else {
217 LOG(WARNING) << "Could not create image space with image file '" << image_file_name << "'. "
218 << "Attempting to fall back to imageless running. Error was: " << error_msg;
219 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700220 }
Zuo Wangf37a88b2014-07-10 04:26:41 -0700221 /*
222 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700223 +- nonmoving space (non_moving_space_capacity)+-
Zuo Wangf37a88b2014-07-10 04:26:41 -0700224 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartier8e219ae2014-08-19 14:29:46 -0700225 +-????????????????????????????????????????????+-
226 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartierb363f662014-07-16 13:28:58 -0700227 +-main alloc space / bump space 1 (capacity_) +-
Zuo Wangf37a88b2014-07-10 04:26:41 -0700228 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartierb363f662014-07-16 13:28:58 -0700229 +-????????????????????????????????????????????+-
230 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
231 +-main alloc space2 / bump space 2 (capacity_)+-
Zuo Wangf37a88b2014-07-10 04:26:41 -0700232 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
233 */
Mathieu Chartierb363f662014-07-16 13:28:58 -0700234 bool support_homogeneous_space_compaction =
Mathieu Chartier0deeb812014-08-21 18:28:20 -0700235 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
Zuo Wangf37a88b2014-07-10 04:26:41 -0700236 use_homogeneous_space_compaction_for_oom;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700237 // We may use the same space the main space for the non moving space if we don't need to compact
238 // from the main space.
239 // This is not the case if we support homogeneous compaction or have a moving background
240 // collector type.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700241 bool separate_non_moving_space = is_zygote ||
242 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
243 IsMovingGc(background_collector_type_);
244 if (foreground_collector_type == kCollectorTypeGSS) {
245 separate_non_moving_space = false;
246 }
247 std::unique_ptr<MemMap> main_mem_map_1;
248 std::unique_ptr<MemMap> main_mem_map_2;
Ian Rogers13735952014-10-08 12:43:28 -0700249 uint8_t* request_begin = requested_alloc_space_begin;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700250 if (request_begin != nullptr && separate_non_moving_space) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700251 request_begin += non_moving_space_capacity;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700252 }
253 std::string error_str;
254 std::unique_ptr<MemMap> non_moving_space_mem_map;
255 if (separate_non_moving_space) {
256 // Reserve the non moving mem map before the other two since it needs to be at a specific
257 // address.
258 non_moving_space_mem_map.reset(
259 MemMap::MapAnonymous("non moving space", requested_alloc_space_begin,
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700260 non_moving_space_capacity, PROT_READ | PROT_WRITE, true, &error_str));
Mathieu Chartierb363f662014-07-16 13:28:58 -0700261 CHECK(non_moving_space_mem_map != nullptr) << error_str;
Mathieu Chartierc44ce2e2014-08-25 16:32:41 -0700262 // Try to reserve virtual memory at a lower address if we have a separate non moving space.
Ian Rogers13735952014-10-08 12:43:28 -0700263 request_begin = reinterpret_cast<uint8_t*>(300 * MB);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700264 }
265 // Attempt to create 2 mem maps at or after the requested begin.
266 main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
267 PROT_READ | PROT_WRITE, &error_str));
268 CHECK(main_mem_map_1.get() != nullptr) << error_str;
269 if (support_homogeneous_space_compaction ||
270 background_collector_type_ == kCollectorTypeSS ||
271 foreground_collector_type_ == kCollectorTypeSS) {
272 main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
273 capacity_, PROT_READ | PROT_WRITE,
274 &error_str));
275 CHECK(main_mem_map_2.get() != nullptr) << error_str;
276 }
277 // Create the non moving space first so that bitmaps don't take up the address range.
278 if (separate_non_moving_space) {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700279 // Non moving space is always dlmalloc since we currently don't have support for multiple
Zuo Wangf37a88b2014-07-10 04:26:41 -0700280 // active rosalloc spaces.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700281 const size_t size = non_moving_space_mem_map->Size();
282 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700283 non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
Mathieu Chartierb363f662014-07-16 13:28:58 -0700284 initial_size, size, size, false);
Mathieu Chartier78408882014-04-11 18:06:01 -0700285 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
Mathieu Chartierb363f662014-07-16 13:28:58 -0700286 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
287 << requested_alloc_space_begin;
288 AddSpace(non_moving_space_);
289 }
290 // Create other spaces based on whether or not we have a moving GC.
291 if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) {
292 // Create bump pointer spaces.
293 // We only to create the bump pointer if the foreground collector is a compacting GC.
294 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
295 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
296 main_mem_map_1.release());
297 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
298 AddSpace(bump_pointer_space_);
299 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
300 main_mem_map_2.release());
301 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
302 AddSpace(temp_space_);
303 CHECK(separate_non_moving_space);
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700304 } else {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700305 CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
306 CHECK(main_space_ != nullptr);
307 AddSpace(main_space_);
308 if (!separate_non_moving_space) {
Zuo Wangf37a88b2014-07-10 04:26:41 -0700309 non_moving_space_ = main_space_;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700310 CHECK(!non_moving_space_->CanMoveObjects());
311 }
312 if (foreground_collector_type_ == kCollectorTypeGSS) {
313 CHECK_EQ(foreground_collector_type_, background_collector_type_);
314 // Create bump pointer spaces instead of a backup space.
315 main_mem_map_2.release();
316 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
317 kGSSBumpPointerSpaceCapacity, nullptr);
318 CHECK(bump_pointer_space_ != nullptr);
319 AddSpace(bump_pointer_space_);
320 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
321 kGSSBumpPointerSpaceCapacity, nullptr);
322 CHECK(temp_space_ != nullptr);
323 AddSpace(temp_space_);
324 } else if (main_mem_map_2.get() != nullptr) {
325 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
326 main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
327 growth_limit_, capacity_, name, true));
328 CHECK(main_space_backup_.get() != nullptr);
329 // Add the space so its accounted for in the heap_begin and heap_end.
330 AddSpace(main_space_backup_.get());
Zuo Wangf37a88b2014-07-10 04:26:41 -0700331 }
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700332 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700333 CHECK(non_moving_space_ != nullptr);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700334 CHECK(!non_moving_space_->CanMoveObjects());
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700335 // Allocate the large object space.
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700336 if (large_object_space_type == space::kLargeObjectSpaceTypeFreeList) {
337 large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
338 capacity_);
339 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
340 } else if (large_object_space_type == space::kLargeObjectSpaceTypeMap) {
341 large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
342 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700343 } else {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700344 // Disable the large object space by making the cutoff excessively large.
345 large_object_threshold_ = std::numeric_limits<size_t>::max();
346 large_object_space_ = nullptr;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700347 }
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700348 if (large_object_space_ != nullptr) {
349 AddSpace(large_object_space_);
350 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700351 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
Mathieu Chartier590fee92013-09-13 13:46:47 -0700352 CHECK(!continuous_spaces_.empty());
353 // Relies on the spaces being sorted.
Ian Rogers13735952014-10-08 12:43:28 -0700354 uint8_t* heap_begin = continuous_spaces_.front()->Begin();
355 uint8_t* heap_end = continuous_spaces_.back()->Limit();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700356 size_t heap_capacity = heap_end - heap_begin;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700357 // Remove the main backup space since it slows down the GC to have unused extra spaces.
358 if (main_space_backup_.get() != nullptr) {
359 RemoveSpace(main_space_backup_.get());
360 }
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800361 // Allocate the card table.
Ian Rogers1d54e732013-05-02 21:10:01 -0700362 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700363 CHECK(card_table_.get() != NULL) << "Failed to create card table";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700364 // Card cache for now since it makes it easier for us to update the references to the copying
365 // spaces.
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700366 accounting::ModUnionTable* mod_union_table =
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700367 new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
368 GetImageSpace());
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700369 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
370 AddModUnionTable(mod_union_table);
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700371 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800372 accounting::RememberedSet* non_moving_space_rem_set =
373 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
374 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
375 AddRememberedSet(non_moving_space_rem_set);
376 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700377 // TODO: Count objects in the image space here?
Ian Rogers3e5cf302014-05-20 16:40:37 -0700378 num_bytes_allocated_.StoreRelaxed(0);
Mathieu Chartierc1790162014-05-23 10:54:50 -0700379 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
380 kDefaultMarkStackSize));
381 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
382 allocation_stack_.reset(accounting::ObjectStack::Create(
383 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
384 live_stack_.reset(accounting::ObjectStack::Create(
385 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
Mathieu Chartier65db8802012-11-20 12:36:46 -0800386 // It's still too early to take a lock because there are no threads yet, but we can create locks
387 // now. We don't create it earlier to make it clear that you can't use locks during heap
388 // initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700389 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogersc604d732012-10-14 16:09:54 -0700390 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
391 *gc_complete_lock_));
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800392 heap_trim_request_lock_ = new Mutex("Heap trim request lock");
Mathieu Chartier65db8802012-11-20 12:36:46 -0800393 last_gc_size_ = GetBytesAllocated();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700394 if (ignore_max_footprint_) {
395 SetIdealFootprint(std::numeric_limits<size_t>::max());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700396 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700397 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700398 CHECK_NE(max_allowed_footprint_, 0U);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800399 // Create our garbage collectors.
Mathieu Chartier50482232013-11-21 11:48:14 -0800400 for (size_t i = 0; i < 2; ++i) {
401 const bool concurrent = i != 0;
402 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
403 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
404 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
405 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800406 if (kMovingCollector) {
407 // TODO: Clean this up.
Zuo Wangf37a88b2014-07-10 04:26:41 -0700408 const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700409 semi_space_collector_ = new collector::SemiSpace(this, generational,
410 generational ? "generational" : "");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700411 garbage_collectors_.push_back(semi_space_collector_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700412 concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
413 garbage_collectors_.push_back(concurrent_copying_collector_);
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700414 mark_compact_collector_ = new collector::MarkCompact(this);
415 garbage_collectors_.push_back(mark_compact_collector_);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700416 }
Andreas Gampee1cb2982014-08-27 11:01:09 -0700417 if (GetImageSpace() != nullptr && non_moving_space_ != nullptr &&
418 (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700419 // Check that there's no gap between the image space and the non moving space so that the
Andreas Gampee1cb2982014-08-27 11:01:09 -0700420 // immune region won't break (eg. due to a large object allocated in the gap). This is only
421 // required when we're the zygote or using GSS.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700422 bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
423 non_moving_space_->GetMemMap());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700424 if (!no_gap) {
425 MemMap::DumpMaps(LOG(ERROR));
426 LOG(FATAL) << "There's a gap between the image space and the main space";
427 }
428 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700429 if (running_on_valgrind_) {
Mathieu Chartier9ef78b52014-09-25 17:03:12 -0700430 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700431 }
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800432 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800433 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700434 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700435}
436
Ian Rogers13735952014-10-08 12:43:28 -0700437MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, size_t capacity,
Mathieu Chartierb363f662014-07-16 13:28:58 -0700438 int prot_flags, std::string* out_error_str) {
439 while (true) {
Kyungmin Leeef32b8f2014-10-23 09:32:05 +0900440 MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
Mathieu Chartierb363f662014-07-16 13:28:58 -0700441 PROT_READ | PROT_WRITE, true, out_error_str);
442 if (map != nullptr || request_begin == nullptr) {
443 return map;
444 }
445 // Retry a second time with no specified request begin.
446 request_begin = nullptr;
447 }
448 return nullptr;
449}
450
Zuo Wangf37a88b2014-07-10 04:26:41 -0700451space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
452 size_t growth_limit, size_t capacity,
453 const char* name, bool can_move_objects) {
454 space::MallocSpace* malloc_space = nullptr;
455 if (kUseRosAlloc) {
456 // Create rosalloc space.
457 malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
458 initial_size, growth_limit, capacity,
459 low_memory_mode_, can_move_objects);
460 } else {
461 malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
462 initial_size, growth_limit, capacity,
463 can_move_objects);
464 }
465 if (collector::SemiSpace::kUseRememberedSet) {
466 accounting::RememberedSet* rem_set =
467 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
468 CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
469 AddRememberedSet(rem_set);
470 }
471 CHECK(malloc_space != nullptr) << "Failed to create " << name;
472 malloc_space->SetFootprintLimit(malloc_space->Capacity());
473 return malloc_space;
474}
475
Mathieu Chartier31f44142014-04-08 14:40:03 -0700476void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
477 size_t capacity) {
478 // Is background compaction is enabled?
479 bool can_move_objects = IsMovingGc(background_collector_type_) !=
Zuo Wangf37a88b2014-07-10 04:26:41 -0700480 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700481 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
482 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
483 // from the main space to the zygote space. If background compaction is enabled, always pass in
484 // that we can move objets.
485 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
486 // After the zygote we want this to be false if we don't have background compaction enabled so
487 // that getting primitive array elements is faster.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700488 // We never have homogeneous compaction with GSS and don't need a space with movable objects.
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700489 can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700490 }
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700491 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
492 RemoveRememberedSet(main_space_);
493 }
Zuo Wangf37a88b2014-07-10 04:26:41 -0700494 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
495 main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
496 can_move_objects);
497 SetSpaceAsDefault(main_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700498 VLOG(heap) << "Created main space " << main_space_;
499}
500
Mathieu Chartier50482232013-11-21 11:48:14 -0800501void Heap::ChangeAllocator(AllocatorType allocator) {
Mathieu Chartier50482232013-11-21 11:48:14 -0800502 if (current_allocator_ != allocator) {
Mathieu Chartierd8891782014-03-02 13:28:37 -0800503 // These two allocators are only used internally and don't have any entrypoints.
504 CHECK_NE(allocator, kAllocatorTypeLOS);
505 CHECK_NE(allocator, kAllocatorTypeNonMoving);
Mathieu Chartier50482232013-11-21 11:48:14 -0800506 current_allocator_ = allocator;
Mathieu Chartierd8891782014-03-02 13:28:37 -0800507 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
Mathieu Chartier50482232013-11-21 11:48:14 -0800508 SetQuickAllocEntryPointsAllocator(current_allocator_);
509 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
510 }
511}
512
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700513void Heap::DisableMovingGc() {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700514 if (IsMovingGc(foreground_collector_type_)) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700515 foreground_collector_type_ = kCollectorTypeCMS;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800516 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700517 if (IsMovingGc(background_collector_type_)) {
518 background_collector_type_ = foreground_collector_type_;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800519 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700520 TransitionCollector(foreground_collector_type_);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700521 ThreadList* tl = Runtime::Current()->GetThreadList();
522 Thread* self = Thread::Current();
523 ScopedThreadStateChange tsc(self, kSuspended);
524 tl->SuspendAll();
525 // Something may have caused the transition to fail.
Mathieu Chartiere4927f62014-08-23 13:56:03 -0700526 if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700527 CHECK(main_space_ != nullptr);
528 // The allocation stack may have non movable objects in it. We need to flush it since the GC
529 // can't only handle marking allocation stack objects of one non moving space and one main
530 // space.
531 {
532 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
533 FlushAllocStack();
534 }
535 main_space_->DisableMovingObjects();
536 non_moving_space_ = main_space_;
537 CHECK(!non_moving_space_->CanMoveObjects());
538 }
539 tl->ResumeAll();
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800540}
541
Mathieu Chartier15d34022014-02-26 17:16:38 -0800542std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
543 if (!IsValidContinuousSpaceObjectAddress(klass)) {
544 return StringPrintf("<non heap address klass %p>", klass);
545 }
546 mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
547 if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
548 std::string result("[");
549 result += SafeGetClassDescriptor(component_type);
550 return result;
551 } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
552 return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800553 } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
Mathieu Chartier15d34022014-02-26 17:16:38 -0800554 return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
555 } else {
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800556 mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
Mathieu Chartier15d34022014-02-26 17:16:38 -0800557 if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
558 return StringPrintf("<non heap address dex_cache %p>", dex_cache);
559 }
560 const DexFile* dex_file = dex_cache->GetDexFile();
561 uint16_t class_def_idx = klass->GetDexClassDefIndex();
562 if (class_def_idx == DexFile::kDexNoIndex16) {
563 return "<class def not found>";
564 }
565 const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
566 const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
567 return dex_file->GetTypeDescriptor(type_id);
568 }
569}
570
571std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
572 if (obj == nullptr) {
573 return "null";
574 }
575 mirror::Class* klass = obj->GetClass<kVerifyNone>();
576 if (klass == nullptr) {
577 return "(class=null)";
578 }
579 std::string result(SafeGetClassDescriptor(klass));
580 if (obj->IsClass()) {
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800581 result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
Mathieu Chartier15d34022014-02-26 17:16:38 -0800582 }
583 return result;
584}
585
586void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
587 if (obj == nullptr) {
588 stream << "(obj=null)";
589 return;
590 }
591 if (IsAligned<kObjectAlignment>(obj)) {
592 space::Space* space = nullptr;
593 // Don't use find space since it only finds spaces which actually contain objects instead of
594 // spaces which may contain objects (e.g. cleared bump pointer spaces).
595 for (const auto& cur_space : continuous_spaces_) {
596 if (cur_space->HasAddress(obj)) {
597 space = cur_space;
598 break;
599 }
600 }
Mathieu Chartier15d34022014-02-26 17:16:38 -0800601 // Unprotect all the spaces.
602 for (const auto& space : continuous_spaces_) {
603 mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
604 }
605 stream << "Object " << obj;
606 if (space != nullptr) {
607 stream << " in space " << *space;
608 }
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800609 mirror::Class* klass = obj->GetClass<kVerifyNone>();
Mathieu Chartier15d34022014-02-26 17:16:38 -0800610 stream << "\nclass=" << klass;
611 if (klass != nullptr) {
612 stream << " type= " << SafePrettyTypeOf(obj);
613 }
614 // Re-protect the address we faulted on.
615 mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
616 }
617}
618
Mathieu Chartier590fee92013-09-13 13:46:47 -0700619bool Heap::IsCompilingBoot() const {
Alex Light64ad14d2014-08-19 14:23:13 -0700620 if (!Runtime::Current()->IsCompiler()) {
621 return false;
622 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700623 for (const auto& space : continuous_spaces_) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800624 if (space->IsImageSpace() || space->IsZygoteSpace()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700625 return false;
626 }
627 }
628 return true;
629}
630
631bool Heap::HasImageSpace() const {
632 for (const auto& space : continuous_spaces_) {
633 if (space->IsImageSpace()) {
634 return true;
635 }
636 }
637 return false;
638}
639
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800640void Heap::IncrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700641 // Need to do this holding the lock to prevent races where the GC is about to run / running when
642 // we attempt to disable it.
Mathieu Chartiercaa82d62014-02-02 16:51:17 -0800643 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700644 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800645 ++disable_moving_gc_count_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700646 if (IsMovingGc(collector_type_running_)) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -0700647 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800648 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700649}
650
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800651void Heap::DecrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700652 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800653 CHECK_GE(disable_moving_gc_count_, 0U);
654 --disable_moving_gc_count_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700655}
656
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800657void Heap::UpdateProcessState(ProcessState process_state) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800658 if (process_state_ != process_state) {
659 process_state_ = process_state;
Mathieu Chartier91e30632014-03-25 15:58:50 -0700660 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
661 // Start at index 1 to avoid "is always false" warning.
662 // Have iteration 1 always transition the collector.
663 TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible))
Mathieu Chartier31f44142014-04-08 14:40:03 -0700664 ? foreground_collector_type_ : background_collector_type_);
Mathieu Chartier91e30632014-03-25 15:58:50 -0700665 usleep(kCollectorTransitionStressWait);
666 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800667 if (process_state_ == kProcessStateJankPerceptible) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800668 // Transition back to foreground right away to prevent jank.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700669 RequestCollectorTransition(foreground_collector_type_, 0);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800670 } else {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800671 // Don't delay for debug builds since we may want to stress test the GC.
Zuo Wangf37a88b2014-07-10 04:26:41 -0700672 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
673 // special handling which does a homogenous space compaction once but then doesn't transition
674 // the collector.
675 RequestCollectorTransition(background_collector_type_,
676 kIsDebugBuild ? 0 : kCollectorTransitionWait);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800677 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800678 }
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800679}
680
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700681void Heap::CreateThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700682 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
683 if (num_threads != 0) {
Mathieu Chartierbcd5e9d2013-11-13 14:33:28 -0800684 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700685 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700686}
687
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800688void Heap::VisitObjects(ObjectCallback callback, void* arg) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800689 // GCs can move objects, so don't allow this.
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -0700690 ScopedAssertNoThreadSuspension ants(Thread::Current(), "Visiting objects");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700691 if (bump_pointer_space_ != nullptr) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800692 // Visit objects in bump pointer space.
693 bump_pointer_space_->Walk(callback, arg);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700694 }
695 // TODO: Switch to standard begin and end to use ranged a based loop.
696 for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
697 it < end; ++it) {
698 mirror::Object* obj = *it;
Mathieu Chartierebdf3f32014-02-13 10:23:27 -0800699 if (obj != nullptr && obj->GetClass() != nullptr) {
700 // Avoid the race condition caused by the object not yet being written into the allocation
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800701 // stack or the class not yet being written in the object. Or, if kUseThreadLocalAllocationStack,
702 // there can be nulls on the allocation stack.
Mathieu Chartierebdf3f32014-02-13 10:23:27 -0800703 callback(obj, arg);
704 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700705 }
706 GetLiveBitmap()->Walk(callback, arg);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700707}
708
709void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
Mathieu Chartier00b59152014-07-25 10:13:51 -0700710 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
711 space::ContinuousSpace* space2 = non_moving_space_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800712 // TODO: Generalize this to n bitmaps?
Mathieu Chartier00b59152014-07-25 10:13:51 -0700713 CHECK(space1 != nullptr);
714 CHECK(space2 != nullptr);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800715 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700716 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
717 stack);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700718}
719
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700720void Heap::DeleteThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700721 thread_pool_.reset(nullptr);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700722}
723
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700724void Heap::AddSpace(space::Space* space) {
Zuo Wangf37a88b2014-07-10 04:26:41 -0700725 CHECK(space != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700726 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
727 if (space->IsContinuousSpace()) {
728 DCHECK(!space->IsDiscontinuousSpace());
729 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
730 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700731 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
732 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700733 if (live_bitmap != nullptr) {
Mathieu Chartier2796a162014-07-25 11:50:47 -0700734 CHECK(mark_bitmap != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700735 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
736 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700737 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700738 continuous_spaces_.push_back(continuous_space);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700739 // Ensure that spaces remain sorted in increasing order of start address.
740 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
741 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
742 return a->Begin() < b->Begin();
743 });
Mathieu Chartier590fee92013-09-13 13:46:47 -0700744 } else {
Mathieu Chartier2796a162014-07-25 11:50:47 -0700745 CHECK(space->IsDiscontinuousSpace());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700746 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700747 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
748 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700749 discontinuous_spaces_.push_back(discontinuous_space);
750 }
751 if (space->IsAllocSpace()) {
752 alloc_spaces_.push_back(space->AsAllocSpace());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700753 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800754}
755
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700756void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
757 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
758 if (continuous_space->IsDlMallocSpace()) {
759 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
760 } else if (continuous_space->IsRosAllocSpace()) {
761 rosalloc_space_ = continuous_space->AsRosAllocSpace();
762 }
763}
764
765void Heap::RemoveSpace(space::Space* space) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800766 DCHECK(space != nullptr);
767 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
768 if (space->IsContinuousSpace()) {
769 DCHECK(!space->IsDiscontinuousSpace());
770 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
771 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700772 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
773 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800774 if (live_bitmap != nullptr) {
775 DCHECK(mark_bitmap != nullptr);
776 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
777 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
778 }
779 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
780 DCHECK(it != continuous_spaces_.end());
781 continuous_spaces_.erase(it);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800782 } else {
783 DCHECK(space->IsDiscontinuousSpace());
784 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700785 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
786 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800787 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
788 discontinuous_space);
789 DCHECK(it != discontinuous_spaces_.end());
790 discontinuous_spaces_.erase(it);
791 }
792 if (space->IsAllocSpace()) {
793 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
794 DCHECK(it != alloc_spaces_.end());
795 alloc_spaces_.erase(it);
796 }
797}
798
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700799void Heap::DumpGcPerformanceInfo(std::ostream& os) {
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700800 // Dump cumulative timings.
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700801 os << "Dumping cumulative Gc timings\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700802 uint64_t total_duration = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800803 // Dump cumulative loggers for each GC type.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800804 uint64_t total_paused_time = 0;
Mathieu Chartier5a487192014-04-08 11:14:54 -0700805 for (auto& collector : garbage_collectors_) {
Mathieu Chartier104fa0c2014-08-07 14:26:27 -0700806 total_duration += collector->GetCumulativeTimings().GetTotalNs();
807 total_paused_time += collector->GetTotalPausedTimeNs();
808 collector->DumpPerformanceInfo(os);
Mathieu Chartier5a487192014-04-08 11:14:54 -0700809 collector->ResetMeasurements();
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700810 }
Ian Rogers3e5cf302014-05-20 16:40:37 -0700811 uint64_t allocation_time =
812 static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700813 if (total_duration != 0) {
Brian Carlstrom2d888622013-07-18 17:02:00 -0700814 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700815 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
816 os << "Mean GC size throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700817 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700818 os << "Mean GC object throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700819 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700820 }
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700821 uint64_t total_objects_allocated = GetObjectsAllocatedEver();
Mathieu Chartierc30a7252014-08-12 10:13:48 -0700822 os << "Total number of allocations " << total_objects_allocated << "\n";
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700823 uint64_t total_bytes_allocated = GetBytesAllocatedEver();
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700824 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
Mathieu Chartierc30a7252014-08-12 10:13:48 -0700825 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700826 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
827 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
Mathieu Chartierc30a7252014-08-12 10:13:48 -0700828 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
829 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700830 if (kMeasureAllocationTime) {
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700831 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
832 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
833 << "\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700834 }
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700835 if (HasZygoteSpace()) {
836 os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
837 }
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700838 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
839 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700840 BaseMutex::DumpAll(os);
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700841}
842
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800843Heap::~Heap() {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700844 VLOG(heap) << "Starting ~Heap()";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700845 STLDeleteElements(&garbage_collectors_);
846 // If we don't reset then the mark stack complains in its destructor.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700847 allocation_stack_->Reset();
848 live_stack_->Reset();
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700849 STLDeleteValues(&mod_union_tables_);
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700850 STLDeleteValues(&remembered_sets_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700851 STLDeleteElements(&continuous_spaces_);
852 STLDeleteElements(&discontinuous_spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700853 delete gc_complete_lock_;
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700854 delete heap_trim_request_lock_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700855 VLOG(heap) << "Finished ~Heap()";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700856}
857
Ian Rogers1d54e732013-05-02 21:10:01 -0700858space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
859 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700860 for (const auto& space : continuous_spaces_) {
861 if (space->Contains(obj)) {
862 return space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700863 }
864 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700865 if (!fail_ok) {
866 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
867 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700868 return NULL;
869}
870
Ian Rogers1d54e732013-05-02 21:10:01 -0700871space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
872 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700873 for (const auto& space : discontinuous_spaces_) {
874 if (space->Contains(obj)) {
875 return space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700876 }
877 }
878 if (!fail_ok) {
879 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
880 }
881 return NULL;
882}
883
884space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
885 space::Space* result = FindContinuousSpaceFromObject(obj, true);
886 if (result != NULL) {
887 return result;
888 }
889 return FindDiscontinuousSpaceFromObject(obj, true);
890}
891
892space::ImageSpace* Heap::GetImageSpace() const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700893 for (const auto& space : continuous_spaces_) {
894 if (space->IsImageSpace()) {
895 return space->AsImageSpace();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700896 }
897 }
898 return NULL;
899}
900
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700901void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700902 std::ostringstream oss;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800903 size_t total_bytes_free = GetFreeMemory();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700904 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700905 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM";
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700906 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
Zuo Wangf37a88b2014-07-10 04:26:41 -0700907 if (total_bytes_free >= byte_count) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700908 space::AllocSpace* space = nullptr;
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700909 if (allocator_type == kAllocatorTypeNonMoving) {
910 space = non_moving_space_;
911 } else if (allocator_type == kAllocatorTypeRosAlloc ||
912 allocator_type == kAllocatorTypeDlMalloc) {
913 space = main_space_;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700914 } else if (allocator_type == kAllocatorTypeBumpPointer ||
915 allocator_type == kAllocatorTypeTLAB) {
916 space = bump_pointer_space_;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700917 }
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700918 if (space != nullptr) {
919 space->LogFragmentationAllocFailure(oss, byte_count);
920 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700921 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700922 self->ThrowOutOfMemoryError(oss.str().c_str());
923}
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700924
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800925void Heap::DoPendingTransitionOrTrim() {
Mathieu Chartierb2728552014-09-08 20:08:41 +0000926 Thread* self = Thread::Current();
927 CollectorType desired_collector_type;
928 // Wait until we reach the desired transition time.
929 while (true) {
930 uint64_t wait_time;
931 {
932 MutexLock mu(self, *heap_trim_request_lock_);
933 desired_collector_type = desired_collector_type_;
934 uint64_t current_time = NanoTime();
935 if (current_time >= heap_transition_or_trim_target_time_) {
936 break;
937 }
938 wait_time = heap_transition_or_trim_target_time_ - current_time;
939 }
940 ScopedThreadStateChange tsc(self, kSleeping);
941 usleep(wait_time / 1000); // Usleep takes microseconds.
942 }
943 // Launch homogeneous space compaction if it is desired.
944 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
945 if (!CareAboutPauseTimes()) {
946 PerformHomogeneousSpaceCompact();
947 }
948 // No need to Trim(). Homogeneous space compaction may free more virtual and physical memory.
949 desired_collector_type = collector_type_;
950 return;
951 }
952 // Transition the collector if the desired collector type is not the same as the current
953 // collector type.
954 TransitionCollector(desired_collector_type);
Mathieu Chartier440e4ce2014-03-31 16:36:35 -0700955 if (!CareAboutPauseTimes()) {
956 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
957 // about pauses.
958 Runtime* runtime = Runtime::Current();
959 runtime->GetThreadList()->SuspendAll();
Mathieu Chartier48ab6872014-06-24 11:21:59 -0700960 uint64_t start_time = NanoTime();
961 size_t count = runtime->GetMonitorList()->DeflateMonitors();
962 VLOG(heap) << "Deflating " << count << " monitors took "
963 << PrettyDuration(NanoTime() - start_time);
Mathieu Chartier440e4ce2014-03-31 16:36:35 -0700964 runtime->GetThreadList()->ResumeAll();
Mathieu Chartier440e4ce2014-03-31 16:36:35 -0700965 }
Mathieu Chartierb2728552014-09-08 20:08:41 +0000966 // Do a heap trim if it is needed.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700967 Trim();
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800968}
969
Mathieu Chartier590fee92013-09-13 13:46:47 -0700970void Heap::Trim() {
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800971 Thread* self = Thread::Current();
972 {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800973 MutexLock mu(self, *heap_trim_request_lock_);
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700974 if (!heap_trim_request_pending_ || last_trim_time_ + kHeapTrimWait >= NanoTime()) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800975 return;
976 }
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700977 last_trim_time_ = NanoTime();
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800978 heap_trim_request_pending_ = false;
979 }
980 {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -0800981 // Need to do this before acquiring the locks since we don't want to get suspended while
982 // holding any locks.
983 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800984 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
985 // trimming.
986 MutexLock mu(self, *gc_complete_lock_);
987 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -0700988 WaitForGcToCompleteLocked(kGcCauseTrim, self);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800989 collector_type_running_ = kCollectorTypeHeapTrim;
990 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700991 uint64_t start_ns = NanoTime();
992 // Trim the managed spaces.
993 uint64_t total_alloc_space_allocated = 0;
994 uint64_t total_alloc_space_size = 0;
995 uint64_t managed_reclaimed = 0;
996 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800997 if (space->IsMallocSpace()) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700998 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
999 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1000 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1001 // for a long period of time.
1002 managed_reclaimed += malloc_space->Trim();
1003 }
1004 total_alloc_space_size += malloc_space->Size();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001005 }
1006 }
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001007 total_alloc_space_allocated = GetBytesAllocated();
1008 if (large_object_space_ != nullptr) {
1009 total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1010 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07001011 if (bump_pointer_space_ != nullptr) {
1012 total_alloc_space_allocated -= bump_pointer_space_->Size();
1013 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001014 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1015 static_cast<float>(total_alloc_space_size);
1016 uint64_t gc_heap_end_ns = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001017 // We never move things in the native heap, so we can finish the GC at this point.
1018 FinishGC(self, collector::kGcTypeNone);
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001019 size_t native_reclaimed = 0;
Ian Rogers872dd822014-10-30 11:19:14 -07001020
1021#ifdef HAVE_ANDROID_OS
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001022 // Only trim the native heap if we don't care about pauses.
1023 if (!CareAboutPauseTimes()) {
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001024#if defined(USE_DLMALLOC)
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001025 // Trim the native heap.
1026 dlmalloc_trim(0);
1027 dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001028#elif defined(USE_JEMALLOC)
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001029 // Jemalloc does it's own internal trimming.
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001030#else
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001031 UNIMPLEMENTED(WARNING) << "Add trimming support";
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001032#endif
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001033 }
Ian Rogers872dd822014-10-30 11:19:14 -07001034#endif // HAVE_ANDROID_OS
Mathieu Chartier590fee92013-09-13 13:46:47 -07001035 uint64_t end_ns = NanoTime();
1036 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1037 << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
1038 << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
1039 << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
1040 << "%.";
1041}
1042
1043bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
1044 // Note: we deliberately don't take the lock here, and mustn't test anything that would require
1045 // taking the lock.
1046 if (obj == nullptr) {
Elliott Hughes88c5c352012-03-15 18:49:48 -07001047 return true;
1048 }
Mathieu Chartier15d34022014-02-26 17:16:38 -08001049 return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001050}
1051
Mathieu Chartierd68ac702014-02-11 14:50:51 -08001052bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
1053 return FindContinuousSpaceFromObject(obj, true) != nullptr;
1054}
1055
Mathieu Chartier15d34022014-02-26 17:16:38 -08001056bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
1057 if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
1058 return false;
1059 }
1060 for (const auto& space : continuous_spaces_) {
1061 if (space->HasAddress(obj)) {
1062 return true;
1063 }
1064 }
1065 return false;
Elliott Hughesa2501992011-08-26 19:39:54 -07001066}
1067
Ian Rogersef7d42f2014-01-06 12:55:46 -08001068bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001069 bool search_live_stack, bool sorted) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001070 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
1071 return false;
1072 }
1073 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001074 mirror::Class* klass = obj->GetClass<kVerifyNone>();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001075 if (obj == klass) {
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -08001076 // This case happens for java.lang.Class.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001077 return true;
1078 }
1079 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1080 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001081 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1082 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1083 return temp_space_->Contains(obj);
Ian Rogers1d54e732013-05-02 21:10:01 -07001084 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001085 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001086 space::DiscontinuousSpace* d_space = nullptr;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001087 if (c_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001088 if (c_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001089 return true;
1090 }
1091 } else {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001092 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001093 if (d_space != nullptr) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001094 if (d_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001095 return true;
1096 }
1097 }
1098 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001099 // This is covering the allocation/live stack swapping that is done without mutators suspended.
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001100 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1101 if (i > 0) {
1102 NanoSleep(MsToNs(10));
Ian Rogers1d54e732013-05-02 21:10:01 -07001103 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001104 if (search_allocation_stack) {
1105 if (sorted) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001106 if (allocation_stack_->ContainsSorted(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001107 return true;
1108 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001109 } else if (allocation_stack_->Contains(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001110 return true;
1111 }
1112 }
1113
1114 if (search_live_stack) {
1115 if (sorted) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001116 if (live_stack_->ContainsSorted(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001117 return true;
1118 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001119 } else if (live_stack_->Contains(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001120 return true;
1121 }
1122 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001123 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001124 // We need to check the bitmaps again since there is a race where we mark something as live and
1125 // then clear the stack containing it.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001126 if (c_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001127 if (c_space->GetLiveBitmap()->Test(obj)) {
1128 return true;
1129 }
1130 } else {
1131 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001132 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001133 return true;
1134 }
1135 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001136 return false;
Elliott Hughes6a5bd492011-10-28 14:33:57 -07001137}
1138
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07001139std::string Heap::DumpSpaces() const {
1140 std::ostringstream oss;
1141 DumpSpaces(oss);
1142 return oss.str();
1143}
1144
1145void Heap::DumpSpaces(std::ostream& stream) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001146 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001147 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1148 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001149 stream << space << " " << *space << "\n";
1150 if (live_bitmap != nullptr) {
1151 stream << live_bitmap << " " << *live_bitmap << "\n";
1152 }
1153 if (mark_bitmap != nullptr) {
1154 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1155 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001156 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07001157 for (const auto& space : discontinuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001158 stream << space << " " << *space << "\n";
Mathieu Chartier128c52c2012-10-16 14:12:41 -07001159 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001160}
1161
Ian Rogersef7d42f2014-01-06 12:55:46 -08001162void Heap::VerifyObjectBody(mirror::Object* obj) {
Stephen Hines22c6a812014-07-16 11:03:43 -07001163 if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1164 return;
1165 }
1166
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001167 // Ignore early dawn of the universe verifications.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001168 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
Ian Rogers62d6c772013-02-27 08:32:07 -08001169 return;
1170 }
Mathieu Chartier4e305412014-02-19 10:54:44 -08001171 CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001172 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
Mathieu Chartier4e305412014-02-19 10:54:44 -08001173 CHECK(c != nullptr) << "Null class in object " << obj;
1174 CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001175 CHECK(VerifyClassClass(c));
Mathieu Chartier0325e622012-09-05 14:22:51 -07001176
Mathieu Chartier4e305412014-02-19 10:54:44 -08001177 if (verify_object_mode_ > kVerifyObjectModeFast) {
1178 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07001179 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
Mathieu Chartierdcf8d722012-08-02 14:55:54 -07001180 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001181}
1182
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001183void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001184 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001185}
1186
1187void Heap::VerifyHeap() {
Ian Rogers50b35e22012-10-04 10:09:15 -07001188 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001189 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001190}
1191
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001192void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
Mathieu Chartier601276a2014-03-20 15:12:30 -07001193 // Use signed comparison since freed bytes can be negative when background compaction foreground
1194 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1195 // free list backed space typically increasing memory footprint due to padding and binning.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001196 DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001197 // Note: This relies on 2s complement for handling negative freed_bytes.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001198 num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001199 if (Runtime::Current()->HasStatsEnabled()) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001200 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001201 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -07001202 thread_stats->freed_bytes += freed_bytes;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001203 // TODO: Do this concurrently.
1204 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1205 global_stats->freed_objects += freed_objects;
1206 global_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001207 }
Carl Shapiro58551df2011-07-24 03:09:51 -07001208}
1209
Zuo Wangf37a88b2014-07-10 04:26:41 -07001210space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1211 for (const auto& space : continuous_spaces_) {
1212 if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1213 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1214 return space->AsContinuousSpace()->AsRosAllocSpace();
1215 }
1216 }
1217 }
1218 return nullptr;
1219}
1220
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001221mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001222 size_t alloc_size, size_t* bytes_allocated,
Ian Rogers6fac4472014-02-25 17:01:10 -08001223 size_t* usable_size,
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001224 mirror::Class** klass) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001225 bool was_default_allocator = allocator == GetCurrentAllocator();
Mathieu Chartierf4f38432014-09-03 11:21:08 -07001226 // Make sure there is no pending exception since we may need to throw an OOME.
1227 self->AssertNoPendingException();
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001228 DCHECK(klass != nullptr);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001229 StackHandleScope<1> hs(self);
1230 HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
1231 klass = nullptr; // Invalidate for safety.
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001232 // The allocation failed. If the GC is running, block until it completes, and then retry the
1233 // allocation.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001234 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
Ian Rogers1d54e732013-05-02 21:10:01 -07001235 if (last_gc != collector::kGcTypeNone) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001236 // If we were the default allocator but the allocator changed while we were suspended,
1237 // abort the allocation.
1238 if (was_default_allocator && allocator != GetCurrentAllocator()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001239 return nullptr;
1240 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001241 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001242 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1243 usable_size);
1244 if (ptr != nullptr) {
1245 return ptr;
1246 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001247 }
1248
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001249 collector::GcType tried_type = next_gc_type_;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001250 const bool gc_ran =
1251 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1252 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1253 return nullptr;
1254 }
1255 if (gc_ran) {
1256 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1257 usable_size);
1258 if (ptr != nullptr) {
1259 return ptr;
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001260 }
1261 }
1262
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001263 // Loop through our different Gc types and try to Gc until we get enough free memory.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001264 for (collector::GcType gc_type : gc_plan_) {
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001265 if (gc_type == tried_type) {
1266 continue;
1267 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001268 // Attempt to run the collector, if we succeed, re-try the allocation.
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001269 const bool gc_ran =
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001270 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1271 if (was_default_allocator && allocator != GetCurrentAllocator()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001272 return nullptr;
1273 }
1274 if (gc_ran) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001275 // Did we free sufficient memory for the allocation to succeed?
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001276 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1277 usable_size);
1278 if (ptr != nullptr) {
1279 return ptr;
1280 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001281 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001282 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001283 // Allocations have failed after GCs; this is an exceptional state.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001284 // Try harder, growing the heap if necessary.
1285 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1286 usable_size);
1287 if (ptr != nullptr) {
1288 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001289 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001290 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1291 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1292 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1293 // OOME.
1294 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1295 << " allocation";
1296 // TODO: Run finalization, but this may cause more allocations to occur.
1297 // We don't need a WaitForGcToComplete here either.
1298 DCHECK(!gc_plan_.empty());
1299 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1300 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1301 return nullptr;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001302 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001303 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001304 if (ptr == nullptr) {
Zuo Wangf37a88b2014-07-10 04:26:41 -07001305 const uint64_t current_time = NanoTime();
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001306 switch (allocator) {
1307 case kAllocatorTypeRosAlloc:
1308 // Fall-through.
1309 case kAllocatorTypeDlMalloc: {
1310 if (use_homogeneous_space_compaction_for_oom_ &&
1311 current_time - last_time_homogeneous_space_compaction_by_oom_ >
1312 min_interval_homogeneous_space_compaction_by_oom_) {
1313 last_time_homogeneous_space_compaction_by_oom_ = current_time;
1314 HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1315 switch (result) {
1316 case HomogeneousSpaceCompactResult::kSuccess:
1317 // If the allocation succeeded, we delayed an oom.
1318 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1319 usable_size);
1320 if (ptr != nullptr) {
1321 count_delayed_oom_++;
1322 }
1323 break;
1324 case HomogeneousSpaceCompactResult::kErrorReject:
1325 // Reject due to disabled moving GC.
1326 break;
1327 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1328 // Throw OOM by default.
1329 break;
1330 default: {
Ian Rogers2c4257b2014-10-24 14:20:06 -07001331 UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1332 << static_cast<size_t>(result);
1333 UNREACHABLE();
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001334 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07001335 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001336 // Always print that we ran homogeneous space compation since this can cause jank.
1337 VLOG(heap) << "Ran heap homogeneous space compaction, "
1338 << " requested defragmentation "
1339 << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1340 << " performed defragmentation "
1341 << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1342 << " ignored homogeneous space compaction "
1343 << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1344 << " delayed count = "
1345 << count_delayed_oom_.LoadSequentiallyConsistent();
Zuo Wangf37a88b2014-07-10 04:26:41 -07001346 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001347 break;
Zuo Wangf37a88b2014-07-10 04:26:41 -07001348 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001349 case kAllocatorTypeNonMoving: {
1350 // Try to transition the heap if the allocation failure was due to the space being full.
1351 if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
1352 // If we aren't out of memory then the OOM was probably from the non moving space being
1353 // full. Attempt to disable compaction and turn the main space into a non moving space.
1354 DisableMovingGc();
1355 // If we are still a moving GC then something must have caused the transition to fail.
1356 if (IsMovingGc(collector_type_)) {
1357 MutexLock mu(self, *gc_complete_lock_);
1358 // If we couldn't disable moving GC, just throw OOME and return null.
1359 LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1360 << disable_moving_gc_count_;
1361 } else {
1362 LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1363 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1364 usable_size);
1365 }
1366 }
1367 break;
1368 }
1369 default: {
1370 // Do nothing for others allocators.
1371 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07001372 }
1373 }
1374 // If the allocation hasn't succeeded by this point, throw an OOM error.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001375 if (ptr == nullptr) {
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001376 ThrowOutOfMemoryError(self, alloc_size, allocator);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001377 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001378 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001379}
1380
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001381void Heap::SetTargetHeapUtilization(float target) {
1382 DCHECK_GT(target, 0.0f); // asserted in Java code
1383 DCHECK_LT(target, 1.0f);
1384 target_utilization_ = target;
1385}
1386
Ian Rogers1d54e732013-05-02 21:10:01 -07001387size_t Heap::GetObjectsAllocated() const {
1388 size_t total = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001389 for (space::AllocSpace* space : alloc_spaces_) {
1390 total += space->GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001391 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001392 return total;
1393}
1394
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001395uint64_t Heap::GetObjectsAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001396 return GetObjectsFreedEver() + GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001397}
1398
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001399uint64_t Heap::GetBytesAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001400 return GetBytesFreedEver() + GetBytesAllocated();
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001401}
1402
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001403class InstanceCounter {
1404 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001405 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001406 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001407 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001408 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001409 static void Callback(mirror::Object* obj, void* arg)
1410 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1411 InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1412 mirror::Class* instance_class = obj->GetClass();
1413 CHECK(instance_class != nullptr);
1414 for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1415 if (instance_counter->use_is_assignable_from_) {
1416 if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) {
1417 ++instance_counter->counts_[i];
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001418 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001419 } else if (instance_class == instance_counter->classes_[i]) {
1420 ++instance_counter->counts_[i];
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001421 }
1422 }
1423 }
1424
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001425 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001426 const std::vector<mirror::Class*>& classes_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001427 bool use_is_assignable_from_;
1428 uint64_t* const counts_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001429 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001430};
1431
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001432void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001433 uint64_t* counts) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001434 // Can't do any GC in this function since this may move classes.
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001435 ScopedAssertNoThreadSuspension ants(Thread::Current(), "CountInstances");
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001436 InstanceCounter counter(classes, use_is_assignable_from, counts);
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001437 ReaderMutexLock mu(ants.Self(), *Locks::heap_bitmap_lock_);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001438 VisitObjects(InstanceCounter::Callback, &counter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001439}
1440
Elliott Hughes3b78c942013-01-15 17:35:41 -08001441class InstanceCollector {
1442 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001443 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
Elliott Hughes3b78c942013-01-15 17:35:41 -08001444 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1445 : class_(c), max_count_(max_count), instances_(instances) {
1446 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001447 static void Callback(mirror::Object* obj, void* arg)
1448 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1449 DCHECK(arg != nullptr);
1450 InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001451 if (obj->GetClass() == instance_collector->class_) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001452 if (instance_collector->max_count_ == 0 ||
1453 instance_collector->instances_.size() < instance_collector->max_count_) {
1454 instance_collector->instances_.push_back(obj);
Elliott Hughes3b78c942013-01-15 17:35:41 -08001455 }
1456 }
1457 }
1458
1459 private:
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001460 const mirror::Class* const class_;
1461 const uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001462 std::vector<mirror::Object*>& instances_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001463 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1464};
1465
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001466void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1467 std::vector<mirror::Object*>& instances) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001468 // Can't do any GC in this function since this may move classes.
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001469 ScopedAssertNoThreadSuspension ants(Thread::Current(), "GetInstances");
Elliott Hughes3b78c942013-01-15 17:35:41 -08001470 InstanceCollector collector(c, max_count, instances);
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001471 ReaderMutexLock mu(ants.Self(), *Locks::heap_bitmap_lock_);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001472 VisitObjects(&InstanceCollector::Callback, &collector);
Elliott Hughes3b78c942013-01-15 17:35:41 -08001473}
1474
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001475class ReferringObjectsFinder {
1476 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001477 ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1478 std::vector<mirror::Object*>& referring_objects)
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001479 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1480 : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1481 }
1482
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001483 static void Callback(mirror::Object* obj, void* arg)
1484 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1485 reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1486 }
1487
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001488 // For bitmap Visit.
1489 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1490 // annotalysis on visitors.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001491 void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001492 o->VisitReferences<true>(*this, VoidFunctor());
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001493 }
1494
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -07001495 // For Object::VisitReferences.
Mathieu Chartier407f7022014-02-18 14:37:05 -08001496 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
1497 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001498 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartier407f7022014-02-18 14:37:05 -08001499 if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1500 referring_objects_.push_back(obj);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001501 }
1502 }
1503
1504 private:
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001505 const mirror::Object* const object_;
1506 const uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001507 std::vector<mirror::Object*>& referring_objects_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001508 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1509};
1510
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001511void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1512 std::vector<mirror::Object*>& referring_objects) {
Mathieu Chartier83c8ee02014-01-28 14:50:23 -08001513 // Can't do any GC in this function since this may move the object o.
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001514 ScopedAssertNoThreadSuspension ants(Thread::Current(), "GetReferringObjects");
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001515 ReferringObjectsFinder finder(o, max_count, referring_objects);
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001516 ReaderMutexLock mu(ants.Self(), *Locks::heap_bitmap_lock_);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001517 VisitObjects(&ReferringObjectsFinder::Callback, &finder);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001518}
1519
Ian Rogers30fab402012-01-23 15:43:46 -08001520void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001521 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1522 // last GC will not have necessarily been cleared.
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001523 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001524}
1525
Zuo Wangf37a88b2014-07-10 04:26:41 -07001526HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
1527 Thread* self = Thread::Current();
1528 // Inc requested homogeneous space compaction.
1529 count_requested_homogeneous_space_compaction_++;
1530 // Store performed homogeneous space compaction at a new request arrival.
1531 ThreadList* tl = Runtime::Current()->GetThreadList();
1532 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1533 Locks::mutator_lock_->AssertNotHeld(self);
1534 {
1535 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1536 MutexLock mu(self, *gc_complete_lock_);
1537 // Ensure there is only one GC at a time.
1538 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
1539 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
1540 // is non zero.
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001541 // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
Zuo Wangf37a88b2014-07-10 04:26:41 -07001542 // exit.
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001543 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
1544 !main_space_->CanMoveObjects()) {
Zuo Wangf37a88b2014-07-10 04:26:41 -07001545 return HomogeneousSpaceCompactResult::kErrorReject;
1546 }
1547 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
1548 }
1549 if (Runtime::Current()->IsShuttingDown(self)) {
1550 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1551 // cause objects to get finalized.
1552 FinishGC(self, collector::kGcTypeNone);
1553 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
1554 }
1555 // Suspend all threads.
1556 tl->SuspendAll();
1557 uint64_t start_time = NanoTime();
1558 // Launch compaction.
Mathieu Chartierb363f662014-07-16 13:28:58 -07001559 space::MallocSpace* to_space = main_space_backup_.release();
Zuo Wangf37a88b2014-07-10 04:26:41 -07001560 space::MallocSpace* from_space = main_space_;
1561 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1562 const uint64_t space_size_before_compaction = from_space->Size();
Mathieu Chartierb363f662014-07-16 13:28:58 -07001563 AddSpace(to_space);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001564 Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
1565 // Leave as prot read so that we can still run ROSAlloc verification on this space.
1566 from_space->GetMemMap()->Protect(PROT_READ);
1567 const uint64_t space_size_after_compaction = to_space->Size();
Mathieu Chartierb363f662014-07-16 13:28:58 -07001568 main_space_ = to_space;
1569 main_space_backup_.reset(from_space);
1570 RemoveSpace(from_space);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001571 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
1572 // Update performed homogeneous space compaction count.
1573 count_performed_homogeneous_space_compaction_++;
1574 // Print statics log and resume all threads.
1575 uint64_t duration = NanoTime() - start_time;
Mathieu Chartier98172a62014-09-02 12:33:25 -07001576 VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
1577 << PrettySize(space_size_before_compaction) << " -> "
1578 << PrettySize(space_size_after_compaction) << " compact-ratio: "
1579 << std::fixed << static_cast<double>(space_size_after_compaction) /
1580 static_cast<double>(space_size_before_compaction);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001581 tl->ResumeAll();
1582 // Finish GC.
1583 reference_processor_.EnqueueClearedReferences(self);
1584 GrowForUtilization(semi_space_collector_);
1585 FinishGC(self, collector::kGcTypeFull);
1586 return HomogeneousSpaceCompactResult::kSuccess;
1587}
1588
1589
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001590void Heap::TransitionCollector(CollectorType collector_type) {
1591 if (collector_type == collector_type_) {
1592 return;
1593 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001594 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1595 << " -> " << static_cast<int>(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001596 uint64_t start_time = NanoTime();
Ian Rogers3e5cf302014-05-20 16:40:37 -07001597 uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001598 Runtime* const runtime = Runtime::Current();
1599 ThreadList* const tl = runtime->GetThreadList();
1600 Thread* const self = Thread::Current();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001601 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1602 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001603 // Busy wait until we can GC (StartGC can fail if we have a non-zero
1604 // compacting_gc_disable_count_, this should rarely occurs).
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001605 for (;;) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001606 {
1607 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1608 MutexLock mu(self, *gc_complete_lock_);
1609 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001610 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
Mathieu Chartiere4927f62014-08-23 13:56:03 -07001611 // Currently we only need a heap transition if we switch from a moving collector to a
1612 // non-moving one, or visa versa.
1613 const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
Mathieu Chartierb38d4832014-04-10 10:56:55 -07001614 // If someone else beat us to it and changed the collector before we could, exit.
1615 // This is safe to do before the suspend all since we set the collector_type_running_ before
1616 // we exit the loop. If another thread attempts to do the heap transition before we exit,
1617 // then it would get blocked on WaitForGcToCompleteLocked.
1618 if (collector_type == collector_type_) {
1619 return;
1620 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001621 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1622 if (!copying_transition || disable_moving_gc_count_ == 0) {
1623 // TODO: Not hard code in semi-space collector?
1624 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
1625 break;
1626 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001627 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001628 usleep(1000);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001629 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001630 if (runtime->IsShuttingDown(self)) {
Hiroshi Yamauchia6a8d142014-05-12 16:57:33 -07001631 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1632 // cause objects to get finalized.
1633 FinishGC(self, collector::kGcTypeNone);
1634 return;
1635 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001636 tl->SuspendAll();
1637 switch (collector_type) {
Mathieu Chartierb363f662014-07-16 13:28:58 -07001638 case kCollectorTypeSS: {
Mathieu Chartier31f44142014-04-08 14:40:03 -07001639 if (!IsMovingGc(collector_type_)) {
Mathieu Chartierb363f662014-07-16 13:28:58 -07001640 // Create the bump pointer space from the backup space.
1641 CHECK(main_space_backup_ != nullptr);
1642 std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
Mathieu Chartier31f44142014-04-08 14:40:03 -07001643 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
1644 // pointer space last transition it will be protected.
Mathieu Chartierb363f662014-07-16 13:28:58 -07001645 CHECK(mem_map != nullptr);
1646 mem_map->Protect(PROT_READ | PROT_WRITE);
1647 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
1648 mem_map.release());
1649 AddSpace(bump_pointer_space_);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001650 Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
Mathieu Chartierb363f662014-07-16 13:28:58 -07001651 // Use the now empty main space mem map for the bump pointer temp space.
1652 mem_map.reset(main_space_->ReleaseMemMap());
Mathieu Chartier00b59152014-07-25 10:13:51 -07001653 // Unset the pointers just in case.
1654 if (dlmalloc_space_ == main_space_) {
1655 dlmalloc_space_ = nullptr;
1656 } else if (rosalloc_space_ == main_space_) {
1657 rosalloc_space_ = nullptr;
1658 }
Mathieu Chartier2796a162014-07-25 11:50:47 -07001659 // Remove the main space so that we don't try to trim it, this doens't work for debug
1660 // builds since RosAlloc attempts to read the magic number from a protected page.
1661 RemoveSpace(main_space_);
Mathieu Chartierc5a83472014-07-23 18:45:17 -07001662 RemoveRememberedSet(main_space_);
Mathieu Chartier2796a162014-07-25 11:50:47 -07001663 delete main_space_; // Delete the space since it has been removed.
Mathieu Chartierc5a83472014-07-23 18:45:17 -07001664 main_space_ = nullptr;
Mathieu Chartier2796a162014-07-25 11:50:47 -07001665 RemoveRememberedSet(main_space_backup_.get());
1666 main_space_backup_.reset(nullptr); // Deletes the space.
Mathieu Chartierb363f662014-07-16 13:28:58 -07001667 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
1668 mem_map.release());
1669 AddSpace(temp_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001670 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001671 break;
1672 }
1673 case kCollectorTypeMS:
1674 // Fall through.
1675 case kCollectorTypeCMS: {
Mathieu Chartier31f44142014-04-08 14:40:03 -07001676 if (IsMovingGc(collector_type_)) {
Mathieu Chartierb363f662014-07-16 13:28:58 -07001677 CHECK(temp_space_ != nullptr);
1678 std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
1679 RemoveSpace(temp_space_);
1680 temp_space_ = nullptr;
Mathieu Chartier36dab362014-07-30 14:59:56 -07001681 mem_map->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartierb363f662014-07-16 13:28:58 -07001682 CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize, mem_map->Size(),
1683 mem_map->Size());
1684 mem_map.release();
Mathieu Chartier31f44142014-04-08 14:40:03 -07001685 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001686 AddSpace(main_space_);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001687 Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
Mathieu Chartierb363f662014-07-16 13:28:58 -07001688 mem_map.reset(bump_pointer_space_->ReleaseMemMap());
1689 RemoveSpace(bump_pointer_space_);
1690 bump_pointer_space_ = nullptr;
1691 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
Hiroshi Yamauchic1276c82014-08-07 10:27:17 -07001692 // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
1693 if (kIsDebugBuild && kUseRosAlloc) {
1694 mem_map->Protect(PROT_READ | PROT_WRITE);
1695 }
Mathieu Chartierb363f662014-07-16 13:28:58 -07001696 main_space_backup_.reset(CreateMallocSpaceFromMemMap(mem_map.get(), kDefaultInitialSize,
1697 mem_map->Size(), mem_map->Size(),
1698 name, true));
Hiroshi Yamauchic1276c82014-08-07 10:27:17 -07001699 if (kIsDebugBuild && kUseRosAlloc) {
1700 mem_map->Protect(PROT_NONE);
1701 }
Mathieu Chartierb363f662014-07-16 13:28:58 -07001702 mem_map.release();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001703 }
1704 break;
1705 }
1706 default: {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001707 LOG(FATAL) << "Attempted to transition to invalid collector type "
1708 << static_cast<size_t>(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001709 break;
1710 }
1711 }
1712 ChangeCollector(collector_type);
1713 tl->ResumeAll();
1714 // Can't call into java code with all threads suspended.
Mathieu Chartier308351a2014-06-15 12:39:02 -07001715 reference_processor_.EnqueueClearedReferences(self);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001716 uint64_t duration = NanoTime() - start_time;
Mathieu Chartierafe49982014-03-27 10:55:04 -07001717 GrowForUtilization(semi_space_collector_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001718 FinishGC(self, collector::kGcTypeFull);
Ian Rogers3e5cf302014-05-20 16:40:37 -07001719 int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001720 int32_t delta_allocated = before_allocated - after_allocated;
Mathieu Chartier19d46b42014-06-17 15:04:40 -07001721 std::string saved_str;
1722 if (delta_allocated >= 0) {
1723 saved_str = " saved at least " + PrettySize(delta_allocated);
1724 } else {
1725 saved_str = " expanded " + PrettySize(-delta_allocated);
1726 }
Mathieu Chartier98172a62014-09-02 12:33:25 -07001727 VLOG(heap) << "Heap transition to " << process_state_ << " took "
Mathieu Chartier19d46b42014-06-17 15:04:40 -07001728 << PrettyDuration(duration) << saved_str;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001729}
1730
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001731void Heap::ChangeCollector(CollectorType collector_type) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001732 // TODO: Only do this with all mutators suspended to avoid races.
1733 if (collector_type != collector_type_) {
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001734 if (collector_type == kCollectorTypeMC) {
1735 // Don't allow mark compact unless support is compiled in.
1736 CHECK(kMarkCompactSupport);
1737 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001738 collector_type_ = collector_type;
1739 gc_plan_.clear();
1740 switch (collector_type_) {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001741 case kCollectorTypeCC: // Fall-through.
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001742 case kCollectorTypeMC: // Fall-through.
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001743 case kCollectorTypeSS: // Fall-through.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001744 case kCollectorTypeGSS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001745 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001746 if (use_tlab_) {
1747 ChangeAllocator(kAllocatorTypeTLAB);
1748 } else {
1749 ChangeAllocator(kAllocatorTypeBumpPointer);
1750 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001751 break;
1752 }
1753 case kCollectorTypeMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001754 gc_plan_.push_back(collector::kGcTypeSticky);
1755 gc_plan_.push_back(collector::kGcTypePartial);
1756 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001757 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001758 break;
1759 }
1760 case kCollectorTypeCMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001761 gc_plan_.push_back(collector::kGcTypeSticky);
1762 gc_plan_.push_back(collector::kGcTypePartial);
1763 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001764 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001765 break;
1766 }
1767 default: {
Ian Rogers2c4257b2014-10-24 14:20:06 -07001768 UNIMPLEMENTED(FATAL);
1769 UNREACHABLE();
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001770 }
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001771 }
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001772 if (IsGcConcurrent()) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001773 concurrent_start_bytes_ =
1774 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
1775 } else {
1776 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001777 }
1778 }
1779}
1780
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001781// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
Ian Rogers6fac4472014-02-25 17:01:10 -08001782class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001783 public:
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001784 explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, false, "zygote collector"),
Ian Rogers6fac4472014-02-25 17:01:10 -08001785 bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001786 }
1787
1788 void BuildBins(space::ContinuousSpace* space) {
1789 bin_live_bitmap_ = space->GetLiveBitmap();
1790 bin_mark_bitmap_ = space->GetMarkBitmap();
1791 BinContext context;
1792 context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
1793 context.collector_ = this;
1794 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1795 // Note: This requires traversing the space in increasing order of object addresses.
1796 bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
1797 // Add the last bin which spans after the last object to the end of the space.
1798 AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
1799 }
1800
1801 private:
1802 struct BinContext {
1803 uintptr_t prev_; // The end of the previous object.
1804 ZygoteCompactingCollector* collector_;
1805 };
1806 // Maps from bin sizes to locations.
1807 std::multimap<size_t, uintptr_t> bins_;
1808 // Live bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001809 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001810 // Mark bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001811 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001812
1813 static void Callback(mirror::Object* obj, void* arg)
1814 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1815 DCHECK(arg != nullptr);
1816 BinContext* context = reinterpret_cast<BinContext*>(arg);
1817 ZygoteCompactingCollector* collector = context->collector_;
1818 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
1819 size_t bin_size = object_addr - context->prev_;
1820 // Add the bin consisting of the end of the previous object to the start of the current object.
1821 collector->AddBin(bin_size, context->prev_);
1822 context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
1823 }
1824
1825 void AddBin(size_t size, uintptr_t position) {
1826 if (size != 0) {
1827 bins_.insert(std::make_pair(size, position));
1828 }
1829 }
1830
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001831 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001832 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
1833 // allocator.
1834 return false;
1835 }
1836
1837 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
1838 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
1839 size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08001840 mirror::Object* forward_address;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001841 // Find the smallest bin which we can move obj in.
1842 auto it = bins_.lower_bound(object_size);
1843 if (it == bins_.end()) {
1844 // No available space in the bins, place it in the target space instead (grows the zygote
1845 // space).
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08001846 size_t bytes_allocated;
Ian Rogers6fac4472014-02-25 17:01:10 -08001847 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001848 if (to_space_live_bitmap_ != nullptr) {
1849 to_space_live_bitmap_->Set(forward_address);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001850 } else {
1851 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
1852 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001853 }
1854 } else {
1855 size_t size = it->first;
1856 uintptr_t pos = it->second;
1857 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
1858 forward_address = reinterpret_cast<mirror::Object*>(pos);
1859 // Set the live and mark bits so that sweeping system weaks works properly.
1860 bin_live_bitmap_->Set(forward_address);
1861 bin_mark_bitmap_->Set(forward_address);
1862 DCHECK_GE(size, object_size);
1863 AddBin(size - object_size, pos + object_size); // Add a new bin with the remaining space.
1864 }
1865 // Copy the object over to its new location.
1866 memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -07001867 if (kUseBakerOrBrooksReadBarrier) {
1868 obj->AssertReadBarrierPointer();
1869 if (kUseBrooksReadBarrier) {
1870 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
1871 forward_address->SetReadBarrierPointer(forward_address);
1872 }
1873 forward_address->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -08001874 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001875 return forward_address;
1876 }
1877};
1878
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001879void Heap::UnBindBitmaps() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001880 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001881 for (const auto& space : GetContinuousSpaces()) {
1882 if (space->IsContinuousMemMapAllocSpace()) {
1883 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1884 if (alloc_space->HasBoundBitmaps()) {
1885 alloc_space->UnBindBitmaps();
1886 }
1887 }
1888 }
1889}
1890
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001891void Heap::PreZygoteFork() {
Mathieu Chartier1f3b5352014-02-03 14:00:42 -08001892 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
Ian Rogers81d425b2012-09-27 16:03:43 -07001893 Thread* self = Thread::Current();
1894 MutexLock mu(self, zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001895 // Try to see if we have any Zygote spaces.
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001896 if (HasZygoteSpace()) {
1897 LOG(WARNING) << __FUNCTION__ << " called when we already have a zygote space.";
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001898 return;
1899 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001900 VLOG(heap) << "Starting PreZygoteFork";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001901 // Trim the pages at the end of the non moving space.
1902 non_moving_space_->Trim();
Mathieu Chartier31f44142014-04-08 14:40:03 -07001903 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
1904 // there.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001905 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001906 const bool same_space = non_moving_space_ == main_space_;
Mathieu Chartier31f44142014-04-08 14:40:03 -07001907 if (kCompactZygote) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001908 // Can't compact if the non moving space is the same as the main space.
Mathieu Chartier31f44142014-04-08 14:40:03 -07001909 DCHECK(semi_space_collector_ != nullptr);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001910 // Temporarily disable rosalloc verification because the zygote
1911 // compaction will mess up the rosalloc internal metadata.
1912 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001913 ZygoteCompactingCollector zygote_collector(this);
1914 zygote_collector.BuildBins(non_moving_space_);
Mathieu Chartier50482232013-11-21 11:48:14 -08001915 // Create a new bump pointer space which we will compact into.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001916 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1917 non_moving_space_->Limit());
1918 // Compact the bump pointer space to a new zygote bump pointer space.
Mathieu Chartier31f44142014-04-08 14:40:03 -07001919 bool reset_main_space = false;
1920 if (IsMovingGc(collector_type_)) {
1921 zygote_collector.SetFromSpace(bump_pointer_space_);
1922 } else {
1923 CHECK(main_space_ != nullptr);
1924 // Copy from the main space.
1925 zygote_collector.SetFromSpace(main_space_);
1926 reset_main_space = true;
1927 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001928 zygote_collector.SetToSpace(&target_space);
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001929 zygote_collector.SetSwapSemiSpaces(false);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001930 zygote_collector.Run(kGcCauseCollectorTransition, false);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001931 if (reset_main_space) {
1932 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1933 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
1934 MemMap* mem_map = main_space_->ReleaseMemMap();
1935 RemoveSpace(main_space_);
Mathieu Chartier96bcd452014-06-17 09:50:02 -07001936 space::Space* old_main_space = main_space_;
Mathieu Chartier31f44142014-04-08 14:40:03 -07001937 CreateMainMallocSpace(mem_map, kDefaultInitialSize, mem_map->Size(), mem_map->Size());
Mathieu Chartier96bcd452014-06-17 09:50:02 -07001938 delete old_main_space;
Mathieu Chartier31f44142014-04-08 14:40:03 -07001939 AddSpace(main_space_);
1940 } else {
1941 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1942 }
1943 if (temp_space_ != nullptr) {
1944 CHECK(temp_space_->IsEmpty());
1945 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001946 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
1947 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001948 // Update the end and write out image.
1949 non_moving_space_->SetEnd(target_space.End());
1950 non_moving_space_->SetLimit(target_space.Limit());
Mathieu Chartier31f44142014-04-08 14:40:03 -07001951 VLOG(heap) << "Zygote space size " << non_moving_space_->Size() << " bytes";
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001952 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001953 // Change the collector to the post zygote one.
Mathieu Chartier31f44142014-04-08 14:40:03 -07001954 ChangeCollector(foreground_collector_type_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001955 // Save the old space so that we can remove it after we complete creating the zygote space.
1956 space::MallocSpace* old_alloc_space = non_moving_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001957 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001958 // the remaining available space.
1959 // Remove the old space before creating the zygote space since creating the zygote space sets
1960 // the old alloc space's bitmaps to nullptr.
1961 RemoveSpace(old_alloc_space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001962 if (collector::SemiSpace::kUseRememberedSet) {
1963 // Sanity bound check.
1964 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
1965 // Remove the remembered set for the now zygote space (the old
1966 // non-moving space). Note now that we have compacted objects into
1967 // the zygote space, the data in the remembered set is no longer
1968 // needed. The zygote space will instead have a mod-union table
1969 // from this point on.
1970 RemoveRememberedSet(old_alloc_space);
1971 }
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001972 zygote_space_ = old_alloc_space->CreateZygoteSpace("alloc space", low_memory_mode_,
1973 &non_moving_space_);
Mathieu Chartierb363f662014-07-16 13:28:58 -07001974 CHECK(!non_moving_space_->CanMoveObjects());
1975 if (same_space) {
1976 main_space_ = non_moving_space_;
1977 SetSpaceAsDefault(main_space_);
1978 }
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001979 delete old_alloc_space;
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001980 CHECK(HasZygoteSpace()) << "Failed creating zygote space";
1981 AddSpace(zygote_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001982 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
1983 AddSpace(non_moving_space_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001984 // Create the zygote space mod union table.
1985 accounting::ModUnionTable* mod_union_table =
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001986 new accounting::ModUnionTableCardCache("zygote space mod-union table", this,
1987 zygote_space_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001988 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001989 // Set all the cards in the mod-union table since we don't know which objects contain references
1990 // to large objects.
1991 mod_union_table->SetCards();
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001992 AddModUnionTable(mod_union_table);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001993 if (collector::SemiSpace::kUseRememberedSet) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001994 // Add a new remembered set for the post-zygote non-moving space.
1995 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
1996 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
1997 non_moving_space_);
1998 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
1999 << "Failed to create post-zygote non-moving space remembered set";
2000 AddRememberedSet(post_zygote_non_moving_space_rem_set);
2001 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002002}
2003
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002004void Heap::FlushAllocStack() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002005 MarkAllocStackAsLive(allocation_stack_.get());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002006 allocation_stack_->Reset();
2007}
2008
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002009void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2010 accounting::ContinuousSpaceBitmap* bitmap2,
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07002011 accounting::LargeObjectBitmap* large_objects,
Ian Rogers1d54e732013-05-02 21:10:01 -07002012 accounting::ObjectStack* stack) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002013 DCHECK(bitmap1 != nullptr);
2014 DCHECK(bitmap2 != nullptr);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002015 mirror::Object** limit = stack->End();
2016 for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
2017 const mirror::Object* obj = *it;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002018 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2019 if (bitmap1->HasAddress(obj)) {
2020 bitmap1->Set(obj);
2021 } else if (bitmap2->HasAddress(obj)) {
2022 bitmap2->Set(obj);
2023 } else {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07002024 DCHECK(large_objects != nullptr);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002025 large_objects->Set(obj);
2026 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07002027 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002028 }
2029}
2030
Mathieu Chartier590fee92013-09-13 13:46:47 -07002031void Heap::SwapSemiSpaces() {
Mathieu Chartier31f44142014-04-08 14:40:03 -07002032 CHECK(bump_pointer_space_ != nullptr);
2033 CHECK(temp_space_ != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002034 std::swap(bump_pointer_space_, temp_space_);
2035}
2036
2037void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
Zuo Wangf37a88b2014-07-10 04:26:41 -07002038 space::ContinuousMemMapAllocSpace* source_space,
2039 GcCause gc_cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002040 CHECK(kMovingCollector);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002041 if (target_space != source_space) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002042 // Don't swap spaces since this isn't a typical semi space collection.
2043 semi_space_collector_->SetSwapSemiSpaces(false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002044 semi_space_collector_->SetFromSpace(source_space);
2045 semi_space_collector_->SetToSpace(target_space);
Zuo Wangf37a88b2014-07-10 04:26:41 -07002046 semi_space_collector_->Run(gc_cause, false);
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002047 } else {
2048 CHECK(target_space->IsBumpPointerSpace())
2049 << "In-place compaction is only supported for bump pointer spaces";
2050 mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2051 mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002052 }
2053}
Anwar Ghuloum67f99412013-08-12 14:19:48 -07002054
Ian Rogers1d54e732013-05-02 21:10:01 -07002055collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
2056 bool clear_soft_references) {
Ian Rogers81d425b2012-09-27 16:03:43 -07002057 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002058 Runtime* runtime = Runtime::Current();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002059 // If the heap can't run the GC, silently fail and return that no GC was run.
2060 switch (gc_type) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002061 case collector::kGcTypePartial: {
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002062 if (!HasZygoteSpace()) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002063 return collector::kGcTypeNone;
2064 }
2065 break;
2066 }
2067 default: {
2068 // Other GC types don't have any special cases which makes them not runnable. The main case
2069 // here is full GC.
2070 }
2071 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002072 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Ian Rogers81d425b2012-09-27 16:03:43 -07002073 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers120f1c72012-09-28 17:17:10 -07002074 if (self->IsHandlingStackOverflow()) {
2075 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
2076 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002077 bool compacting_gc;
2078 {
2079 gc_complete_lock_->AssertNotHeld(self);
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002080 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002081 MutexLock mu(self, *gc_complete_lock_);
2082 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002083 WaitForGcToCompleteLocked(gc_cause, self);
Mathieu Chartier31f44142014-04-08 14:40:03 -07002084 compacting_gc = IsMovingGc(collector_type_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002085 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2086 if (compacting_gc && disable_moving_gc_count_ != 0) {
2087 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2088 return collector::kGcTypeNone;
2089 }
2090 collector_type_running_ = collector_type_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002091 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002092
Mathieu Chartier590fee92013-09-13 13:46:47 -07002093 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2094 ++runtime->GetStats()->gc_for_alloc_count;
2095 ++self->GetStats()->gc_for_alloc_count;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002096 }
Ian Rogers1d54e732013-05-02 21:10:01 -07002097 uint64_t gc_start_time_ns = NanoTime();
Mathieu Chartier65db8802012-11-20 12:36:46 -08002098 uint64_t gc_start_size = GetBytesAllocated();
2099 // Approximate allocation rate in bytes / second.
Ian Rogers1d54e732013-05-02 21:10:01 -07002100 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002101 // Back to back GCs can cause 0 ms of wait time in between GC invocations.
2102 if (LIKELY(ms_delta != 0)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002103 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
Mathieu Chartier1b636c62014-08-13 10:08:05 -07002104 ATRACE_INT("Allocation rate KB/s", allocation_rate_ / KB);
Mathieu Chartier65db8802012-11-20 12:36:46 -08002105 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
2106 }
2107
Ian Rogers1d54e732013-05-02 21:10:01 -07002108 DCHECK_LT(gc_type, collector::kGcTypeMax);
2109 DCHECK_NE(gc_type, collector::kGcTypeNone);
Anwar Ghuloum67f99412013-08-12 14:19:48 -07002110
Mathieu Chartier590fee92013-09-13 13:46:47 -07002111 collector::GarbageCollector* collector = nullptr;
Mathieu Chartier50482232013-11-21 11:48:14 -08002112 // TODO: Clean this up.
Mathieu Chartier1d27b342014-01-28 12:51:09 -08002113 if (compacting_gc) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002114 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2115 current_allocator_ == kAllocatorTypeTLAB);
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002116 switch (collector_type_) {
2117 case kCollectorTypeSS:
2118 // Fall-through.
2119 case kCollectorTypeGSS:
2120 semi_space_collector_->SetFromSpace(bump_pointer_space_);
2121 semi_space_collector_->SetToSpace(temp_space_);
2122 semi_space_collector_->SetSwapSemiSpaces(true);
2123 collector = semi_space_collector_;
2124 break;
2125 case kCollectorTypeCC:
2126 collector = concurrent_copying_collector_;
2127 break;
2128 case kCollectorTypeMC:
2129 mark_compact_collector_->SetSpace(bump_pointer_space_);
2130 collector = mark_compact_collector_;
2131 break;
2132 default:
2133 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002134 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002135 if (collector != mark_compact_collector_) {
2136 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2137 CHECK(temp_space_->IsEmpty());
2138 }
2139 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002140 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2141 current_allocator_ == kAllocatorTypeDlMalloc) {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002142 collector = FindCollectorByGcType(gc_type);
Mathieu Chartier50482232013-11-21 11:48:14 -08002143 } else {
2144 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002145 }
Mathieu Chartier08cef222014-10-22 17:18:34 -07002146 if (IsGcConcurrent()) {
2147 // Disable concurrent GC check so that we don't have spammy JNI requests.
2148 // This gets recalculated in GrowForUtilization. It is important that it is disabled /
2149 // calculated in the same thread so that there aren't any races that can cause it to become
2150 // permanantly disabled. b/17942071
2151 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2152 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002153 CHECK(collector != nullptr)
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002154 << "Could not find garbage collector with collector_type="
2155 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002156 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002157 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2158 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07002159 RequestHeapTrim();
Mathieu Chartier39e32612013-11-12 16:28:05 -08002160 // Enqueue cleared references.
Mathieu Chartier308351a2014-06-15 12:39:02 -07002161 reference_processor_.EnqueueClearedReferences(self);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002162 // Grow the heap so that we know when to perform the next GC.
Mathieu Chartierafe49982014-03-27 10:55:04 -07002163 GrowForUtilization(collector);
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002164 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2165 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002166 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002167 // (mutator time blocked >= long_pause_log_threshold_).
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002168 bool log_gc = gc_cause == kGcCauseExplicit;
2169 if (!log_gc && CareAboutPauseTimes()) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002170 // GC for alloc pauses the allocating thread, so consider it as a pause.
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002171 log_gc = duration > long_gc_log_threshold_ ||
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002172 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002173 for (uint64_t pause : pause_times) {
2174 log_gc = log_gc || pause >= long_pause_log_threshold_;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002175 }
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002176 }
2177 if (log_gc) {
2178 const size_t percent_free = GetPercentFree();
2179 const size_t current_heap_size = GetBytesAllocated();
2180 const size_t total_memory = GetTotalMemory();
2181 std::ostringstream pause_string;
2182 for (size_t i = 0; i < pause_times.size(); ++i) {
2183 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002184 << ((i != pause_times.size() - 1) ? "," : "");
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002185 }
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002186 LOG(INFO) << gc_cause << " " << collector->GetName()
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002187 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
2188 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2189 << current_gc_iteration_.GetFreedLargeObjects() << "("
2190 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002191 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2192 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2193 << " total " << PrettyDuration((duration / 1000) * 1000);
Ian Rogersc7dd2952014-10-21 23:31:19 -07002194 VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002195 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002196 FinishGC(self, gc_type);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07002197 // Inform DDMS that a GC completed.
Ian Rogers15bf2d32012-08-28 17:33:04 -07002198 Dbg::GcDidFinish();
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07002199 return gc_type;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002200}
Mathieu Chartiera6399032012-06-11 18:49:50 -07002201
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002202void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2203 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002204 collector_type_running_ = kCollectorTypeNone;
2205 if (gc_type != collector::kGcTypeNone) {
2206 last_gc_type_ = gc_type;
2207 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002208 // Wake anyone who may have been waiting for the GC to complete.
2209 gc_complete_cond_->Broadcast(self);
2210}
2211
Mathieu Chartier815873e2014-02-13 18:02:13 -08002212static void RootMatchesObjectVisitor(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
2213 RootType /*root_type*/) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002214 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
Mathieu Chartier815873e2014-02-13 18:02:13 -08002215 if (*root == obj) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002216 LOG(INFO) << "Object " << obj << " is a root";
2217 }
2218}
2219
2220class ScanVisitor {
2221 public:
Brian Carlstromdf629502013-07-17 22:39:56 -07002222 void operator()(const mirror::Object* obj) const {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002223 LOG(ERROR) << "Would have rescanned object " << obj;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002224 }
2225};
2226
Ian Rogers1d54e732013-05-02 21:10:01 -07002227// Verify a reference from an object.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002228class VerifyReferenceVisitor {
2229 public:
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002230 explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
Ian Rogers1d54e732013-05-02 21:10:01 -07002231 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002232 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
Ian Rogers1d54e732013-05-02 21:10:01 -07002233
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002234 size_t GetFailureCount() const {
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07002235 return fail_count_->LoadSequentiallyConsistent();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002236 }
2237
Mathieu Chartier407f7022014-02-18 14:37:05 -08002238 void operator()(mirror::Class* klass, mirror::Reference* ref) const
2239 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002240 if (verify_referent_) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002241 VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002242 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08002243 }
2244
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -07002245 void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
Mathieu Chartier407f7022014-02-18 14:37:05 -08002246 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002247 VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
Mathieu Chartier407f7022014-02-18 14:37:05 -08002248 }
2249
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002250 bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
2251 return heap_->IsLiveObjectLocked(obj, true, false, true);
2252 }
2253
2254 static void VerifyRootCallback(mirror::Object** root, void* arg, uint32_t thread_id,
2255 RootType root_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2256 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
2257 if (!visitor->VerifyReference(nullptr, *root, MemberOffset(0))) {
2258 LOG(ERROR) << "Root " << *root << " is dead with type " << PrettyTypeOf(*root)
2259 << " thread_id= " << thread_id << " root_type= " << root_type;
2260 }
2261 }
2262
2263 private:
Mathieu Chartier407f7022014-02-18 14:37:05 -08002264 // TODO: Fix the no thread safety analysis.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002265 // Returns false on failure.
2266 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002267 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002268 if (ref == nullptr || IsLive(ref)) {
2269 // Verify that the reference is live.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002270 return true;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002271 }
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07002272 if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002273 // Print message on only on first failure to prevent spam.
2274 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002275 }
2276 if (obj != nullptr) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002277 // Only do this part for non roots.
Ian Rogers1d54e732013-05-02 21:10:01 -07002278 accounting::CardTable* card_table = heap_->GetCardTable();
2279 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2280 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Ian Rogers13735952014-10-08 12:43:28 -07002281 uint8_t* card_addr = card_table->CardFromAddr(obj);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002282 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2283 << offset << "\n card value = " << static_cast<int>(*card_addr);
2284 if (heap_->IsValidObjectAddress(obj->GetClass())) {
2285 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
2286 } else {
2287 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002288 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002289
Mathieu Chartierb363f662014-07-16 13:28:58 -07002290 // Attempt to find the class inside of the recently freed objects.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002291 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2292 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2293 space::MallocSpace* space = ref_space->AsMallocSpace();
2294 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2295 if (ref_class != nullptr) {
2296 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2297 << PrettyClass(ref_class);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002298 } else {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002299 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002300 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002301 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002302
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002303 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2304 ref->GetClass()->IsClass()) {
2305 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
2306 } else {
2307 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2308 << ") is not a valid heap address";
2309 }
2310
Ian Rogers13735952014-10-08 12:43:28 -07002311 card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002312 void* cover_begin = card_table->AddrFromCard(card_addr);
2313 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2314 accounting::CardTable::kCardSize);
2315 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2316 << "-" << cover_end;
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002317 accounting::ContinuousSpaceBitmap* bitmap =
2318 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002319
2320 if (bitmap == nullptr) {
2321 LOG(ERROR) << "Object " << obj << " has no bitmap";
Mathieu Chartier4e305412014-02-19 10:54:44 -08002322 if (!VerifyClassClass(obj->GetClass())) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002323 LOG(ERROR) << "Object " << obj << " failed class verification!";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002324 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002325 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07002326 // Print out how the object is live.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002327 if (bitmap->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002328 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2329 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002330 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002331 LOG(ERROR) << "Object " << obj << " found in allocation stack";
2332 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002333 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002334 LOG(ERROR) << "Object " << obj << " found in live stack";
2335 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002336 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2337 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2338 }
2339 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2340 LOG(ERROR) << "Ref " << ref << " found in live stack";
2341 }
Ian Rogers1d54e732013-05-02 21:10:01 -07002342 // Attempt to see if the card table missed the reference.
2343 ScanVisitor scan_visitor;
Ian Rogers13735952014-10-08 12:43:28 -07002344 uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
Ian Rogers1d54e732013-05-02 21:10:01 -07002345 card_table->Scan(bitmap, byte_cover_begin,
Mathieu Chartier184e3222013-08-03 14:02:57 -07002346 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002347 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002348
2349 // Search to see if any of the roots reference our object.
2350 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
Mathieu Chartier893263b2014-03-04 11:07:42 -08002351 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002352
2353 // Search to see if any of the roots reference our reference.
2354 arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
Mathieu Chartier893263b2014-03-04 11:07:42 -08002355 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002356 }
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002357 return false;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002358 }
2359
Ian Rogers1d54e732013-05-02 21:10:01 -07002360 Heap* const heap_;
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002361 Atomic<size_t>* const fail_count_;
2362 const bool verify_referent_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002363};
2364
Ian Rogers1d54e732013-05-02 21:10:01 -07002365// Verify all references within an object, for use with HeapBitmap::Visit.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002366class VerifyObjectVisitor {
2367 public:
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002368 explicit VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2369 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002370 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002371
Mathieu Chartier590fee92013-09-13 13:46:47 -07002372 void operator()(mirror::Object* obj) const
Ian Rogersb726dcb2012-09-05 08:57:23 -07002373 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002374 // Note: we are verifying the references in obj but not obj itself, this is because obj must
2375 // be live or else how did we find it in the live bitmap?
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002376 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002377 // The class doesn't count as a reference but we should verify it anyways.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002378 obj->VisitReferences<true>(visitor, visitor);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002379 }
2380
Mathieu Chartier590fee92013-09-13 13:46:47 -07002381 static void VisitCallback(mirror::Object* obj, void* arg)
2382 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2383 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
2384 visitor->operator()(obj);
2385 }
2386
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002387 size_t GetFailureCount() const {
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07002388 return fail_count_->LoadSequentiallyConsistent();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002389 }
2390
2391 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002392 Heap* const heap_;
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002393 Atomic<size_t>* const fail_count_;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002394 const bool verify_referent_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002395};
2396
Mathieu Chartierc1790162014-05-23 10:54:50 -07002397void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2398 // Slow path, the allocation stack push back must have already failed.
2399 DCHECK(!allocation_stack_->AtomicPushBack(*obj));
2400 do {
2401 // TODO: Add handle VerifyObject.
2402 StackHandleScope<1> hs(self);
2403 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2404 // Push our object into the reserve region of the allocaiton stack. This is only required due
2405 // to heap verification requiring that roots are live (either in the live bitmap or in the
2406 // allocation stack).
2407 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2408 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2409 } while (!allocation_stack_->AtomicPushBack(*obj));
2410}
2411
2412void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2413 // Slow path, the allocation stack push back must have already failed.
2414 DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
2415 mirror::Object** start_address;
2416 mirror::Object** end_address;
2417 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
2418 &end_address)) {
2419 // TODO: Add handle VerifyObject.
2420 StackHandleScope<1> hs(self);
2421 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2422 // Push our object into the reserve region of the allocaiton stack. This is only required due
2423 // to heap verification requiring that roots are live (either in the live bitmap or in the
2424 // allocation stack).
2425 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2426 // Push into the reserve allocation stack.
2427 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2428 }
2429 self->SetThreadLocalAllocationStack(start_address, end_address);
2430 // Retry on the new thread-local allocation stack.
2431 CHECK(self->PushOnThreadLocalAllocationStack(*obj)); // Must succeed.
2432}
2433
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002434// Must do this with mutators suspended since we are directly accessing the allocation stacks.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002435size_t Heap::VerifyHeapReferences(bool verify_referents) {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002436 Thread* self = Thread::Current();
2437 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002438 // Lets sort our allocation stacks so that we can efficiently binary search them.
Ian Rogers1d54e732013-05-02 21:10:01 -07002439 allocation_stack_->Sort();
2440 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002441 // Since we sorted the allocation stack content, need to revoke all
2442 // thread-local allocation stacks.
2443 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002444 Atomic<size_t> fail_count_(0);
2445 VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002446 // Verify objects in the allocation stack since these will be objects which were:
2447 // 1. Allocated prior to the GC (pre GC verification).
2448 // 2. Allocated during the GC (pre sweep GC verification).
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002449 // We don't want to verify the objects in the live stack since they themselves may be
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002450 // pointing to dead objects if they are not reachable.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002451 VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
2452 // Verify the roots:
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002453 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRootCallback, &visitor);
2454 if (visitor.GetFailureCount() > 0) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002455 // Dump mod-union tables.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002456 for (const auto& table_pair : mod_union_tables_) {
2457 accounting::ModUnionTable* mod_union_table = table_pair.second;
2458 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
2459 }
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002460 // Dump remembered sets.
2461 for (const auto& table_pair : remembered_sets_) {
2462 accounting::RememberedSet* remembered_set = table_pair.second;
2463 remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
2464 }
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07002465 DumpSpaces(LOG(ERROR));
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002466 }
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002467 return visitor.GetFailureCount();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002468}
2469
2470class VerifyReferenceCardVisitor {
2471 public:
2472 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
2473 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
2474 Locks::heap_bitmap_lock_)
Ian Rogers1d54e732013-05-02 21:10:01 -07002475 : heap_(heap), failed_(failed) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002476 }
2477
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002478 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
2479 // annotalysis on visitors.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002480 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
2481 NO_THREAD_SAFETY_ANALYSIS {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07002482 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002483 // Filter out class references since changing an object's class does not mark the card as dirty.
2484 // Also handles large objects, since the only reference they hold is a class reference.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002485 if (ref != nullptr && !ref->IsClass()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002486 accounting::CardTable* card_table = heap_->GetCardTable();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002487 // If the object is not dirty and it is referencing something in the live stack other than
2488 // class, then it must be on a dirty card.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07002489 if (!card_table->AddrIsInCardTable(obj)) {
2490 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
2491 *failed_ = true;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002492 } else if (!card_table->IsDirty(obj)) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002493 // TODO: Check mod-union tables.
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002494 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
2495 // kCardDirty - 1 if it didnt get touched since we aged it.
Ian Rogers1d54e732013-05-02 21:10:01 -07002496 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier407f7022014-02-18 14:37:05 -08002497 if (live_stack->ContainsSorted(ref)) {
2498 if (live_stack->ContainsSorted(obj)) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002499 LOG(ERROR) << "Object " << obj << " found in live stack";
2500 }
2501 if (heap_->GetLiveBitmap()->Test(obj)) {
2502 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2503 }
2504 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
2505 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
2506
2507 // Print which field of the object is dead.
2508 if (!obj->IsObjectArray()) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002509 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002510 CHECK(klass != NULL);
Ian Rogersef7d42f2014-01-06 12:55:46 -08002511 mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
2512 : klass->GetIFields();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002513 CHECK(fields != NULL);
2514 for (int32_t i = 0; i < fields->GetLength(); ++i) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002515 mirror::ArtField* cur = fields->Get(i);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002516 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
2517 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
2518 << PrettyField(cur);
2519 break;
2520 }
2521 }
2522 } else {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002523 mirror::ObjectArray<mirror::Object>* object_array =
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002524 obj->AsObjectArray<mirror::Object>();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002525 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
2526 if (object_array->Get(i) == ref) {
2527 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
2528 }
2529 }
2530 }
2531
2532 *failed_ = true;
2533 }
2534 }
2535 }
2536 }
2537
2538 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002539 Heap* const heap_;
2540 bool* const failed_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002541};
2542
2543class VerifyLiveStackReferences {
2544 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07002545 explicit VerifyLiveStackReferences(Heap* heap)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002546 : heap_(heap),
Brian Carlstrom93ba8932013-07-17 21:31:49 -07002547 failed_(false) {}
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002548
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002549 void operator()(mirror::Object* obj) const
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002550 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2551 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07002552 obj->VisitReferences<true>(visitor, VoidFunctor());
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002553 }
2554
2555 bool Failed() const {
2556 return failed_;
2557 }
2558
2559 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002560 Heap* const heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002561 bool failed_;
2562};
2563
2564bool Heap::VerifyMissingCardMarks() {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002565 Thread* self = Thread::Current();
2566 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002567 // We need to sort the live stack since we binary search it.
Ian Rogers1d54e732013-05-02 21:10:01 -07002568 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002569 // Since we sorted the allocation stack content, need to revoke all
2570 // thread-local allocation stacks.
2571 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002572 VerifyLiveStackReferences visitor(this);
2573 GetLiveBitmap()->Visit(visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002574 // We can verify objects in the live stack since none of these should reference dead objects.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002575 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002576 if (!kUseThreadLocalAllocationStack || *it != nullptr) {
2577 visitor(*it);
2578 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002579 }
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07002580 return !visitor.Failed();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002581}
2582
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002583void Heap::SwapStacks(Thread* self) {
2584 if (kUseThreadLocalAllocationStack) {
2585 live_stack_->AssertAllZero();
2586 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -08002587 allocation_stack_.swap(live_stack_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002588}
2589
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002590void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002591 // This must be called only during the pause.
2592 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
2593 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
2594 MutexLock mu2(self, *Locks::thread_list_lock_);
2595 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2596 for (Thread* t : thread_list) {
2597 t->RevokeThreadLocalAllocationStack();
2598 }
2599}
2600
Ian Rogers68d8b422014-07-17 11:09:10 -07002601void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
2602 if (kIsDebugBuild) {
2603 if (rosalloc_space_ != nullptr) {
2604 rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
2605 }
2606 if (bump_pointer_space_ != nullptr) {
2607 bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
2608 }
2609 }
2610}
2611
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07002612void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
2613 if (kIsDebugBuild) {
2614 if (bump_pointer_space_ != nullptr) {
2615 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
2616 }
2617 }
2618}
2619
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002620accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
2621 auto it = mod_union_tables_.find(space);
2622 if (it == mod_union_tables_.end()) {
2623 return nullptr;
2624 }
2625 return it->second;
2626}
2627
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002628accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
2629 auto it = remembered_sets_.find(space);
2630 if (it == remembered_sets_.end()) {
2631 return nullptr;
2632 }
2633 return it->second;
2634}
2635
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002636void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002637 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Ian Rogers1d54e732013-05-02 21:10:01 -07002638 // Clear cards and keep track of cards cleared in the mod-union table.
Mathieu Chartier02e25112013-08-14 16:14:24 -07002639 for (const auto& space : continuous_spaces_) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002640 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002641 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002642 if (table != nullptr) {
2643 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
2644 "ImageModUnionClearCards";
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002645 TimingLogger::ScopedTiming t(name, timings);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002646 table->ClearCards();
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002647 } else if (use_rem_sets && rem_set != nullptr) {
2648 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
2649 << static_cast<int>(collector_type_);
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002650 TimingLogger::ScopedTiming t("AllocSpaceRemSetClearCards", timings);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002651 rem_set->ClearCards();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002652 } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002653 TimingLogger::ScopedTiming t("AllocSpaceClearCards", timings);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08002654 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
2655 // were dirty before the GC started.
Mathieu Chartierbd0a6532014-02-27 11:14:21 -08002656 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
2657 // -> clean(cleaning thread).
Mathieu Chartier590fee92013-09-13 13:46:47 -07002658 // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002659 // roots and then we scan / update mod union tables after. We will always scan either card.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002660 // If we end up with the non aged card, we scan it it in the pause.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002661 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
2662 VoidFunctor());
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07002663 }
2664 }
2665}
2666
Mathieu Chartier407f7022014-02-18 14:37:05 -08002667static void IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*, void*) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002668}
2669
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002670void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
2671 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002672 TimingLogger* const timings = current_gc_iteration_.GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002673 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002674 if (verify_pre_gc_heap_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002675 TimingLogger::ScopedTiming t("(Paused)PreGcVerifyHeapReferences", timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002676 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002677 size_t failures = VerifyHeapReferences();
2678 if (failures > 0) {
2679 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
2680 << " failures";
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002681 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002682 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002683 // Check that all objects which reference things in the live stack are on dirty cards.
2684 if (verify_missing_card_marks_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002685 TimingLogger::ScopedTiming t("(Paused)PreGcVerifyMissingCardMarks", timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002686 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2687 SwapStacks(self);
2688 // Sort the live stack so that we can quickly binary search it later.
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07002689 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
2690 << " missing card mark verification failed\n" << DumpSpaces();
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002691 SwapStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002692 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002693 if (verify_mod_union_table_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002694 TimingLogger::ScopedTiming t("(Paused)PreGcVerifyModUnionTables", timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002695 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002696 for (const auto& table_pair : mod_union_tables_) {
2697 accounting::ModUnionTable* mod_union_table = table_pair.second;
Mathieu Chartier407f7022014-02-18 14:37:05 -08002698 mod_union_table->UpdateAndMarkReferences(IdentityMarkHeapReferenceCallback, nullptr);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002699 mod_union_table->Verify();
2700 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002701 }
2702}
2703
2704void Heap::PreGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07002705 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002706 collector::GarbageCollector::ScopedPause pause(gc);
2707 PreGcVerificationPaused(gc);
2708 }
2709}
2710
2711void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
2712 // TODO: Add a new runtime option for this?
2713 if (verify_pre_gc_rosalloc_) {
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002714 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002715 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002716}
2717
Ian Rogers1d54e732013-05-02 21:10:01 -07002718void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002719 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002720 TimingLogger* const timings = current_gc_iteration_.GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002721 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002722 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
2723 // reachable objects.
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002724 if (verify_pre_sweeping_heap_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002725 TimingLogger::ScopedTiming t("(Paused)PostSweepingVerifyHeapReferences", timings);
Ian Rogers1d54e732013-05-02 21:10:01 -07002726 CHECK_NE(self->GetState(), kRunnable);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002727 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2728 // Swapping bound bitmaps does nothing.
2729 gc->SwapBitmaps();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002730 // Pass in false since concurrent reference processing can mean that the reference referents
2731 // may point to dead objects at the point which PreSweepingGcVerification is called.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002732 size_t failures = VerifyHeapReferences(false);
2733 if (failures > 0) {
2734 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
2735 << " failures";
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002736 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002737 gc->SwapBitmaps();
2738 }
2739 if (verify_pre_sweeping_rosalloc_) {
2740 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
2741 }
2742}
2743
2744void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
2745 // Only pause if we have to do some verification.
2746 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002747 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002748 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002749 if (verify_system_weaks_) {
2750 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2751 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
2752 mark_sweep->VerifySystemWeaks();
2753 }
2754 if (verify_post_gc_rosalloc_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002755 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002756 }
2757 if (verify_post_gc_heap_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002758 TimingLogger::ScopedTiming t("(Paused)PostGcVerifyHeapReferences", timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002759 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002760 size_t failures = VerifyHeapReferences();
2761 if (failures > 0) {
2762 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
2763 << " failures";
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002764 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002765 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002766}
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002767
Ian Rogers1d54e732013-05-02 21:10:01 -07002768void Heap::PostGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002769 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
2770 collector::GarbageCollector::ScopedPause pause(gc);
Mathieu Chartierd35326f2014-08-18 15:02:59 -07002771 PostGcVerificationPaused(gc);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002772 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07002773}
2774
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002775void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002776 TimingLogger::ScopedTiming t(name, timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002777 for (const auto& space : continuous_spaces_) {
2778 if (space->IsRosAllocSpace()) {
2779 VLOG(heap) << name << " : " << space->GetName();
2780 space->AsRosAllocSpace()->Verify();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002781 }
2782 }
2783}
2784
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002785collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002786 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002787 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002788 return WaitForGcToCompleteLocked(cause, self);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002789}
2790
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002791collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002792 collector::GcType last_gc_type = collector::kGcTypeNone;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002793 uint64_t wait_start = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002794 while (collector_type_running_ != kCollectorTypeNone) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002795 ATRACE_BEGIN("GC: Wait For Completion");
2796 // We must wait, change thread state then sleep on gc_complete_cond_;
2797 gc_complete_cond_->Wait(self);
2798 last_gc_type = last_gc_type_;
Mathieu Chartier752a0e62013-06-27 11:03:27 -07002799 ATRACE_END();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002800 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002801 uint64_t wait_time = NanoTime() - wait_start;
2802 total_wait_time_ += wait_time;
2803 if (wait_time > long_pause_log_threshold_) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002804 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
2805 << " for cause " << cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002806 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07002807 return last_gc_type;
Carl Shapiro69759ea2011-07-21 18:13:35 -07002808}
2809
Elliott Hughesc967f782012-04-16 10:23:15 -07002810void Heap::DumpForSigQuit(std::ostream& os) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002811 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002812 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -07002813 DumpGcPerformanceInfo(os);
Elliott Hughesc967f782012-04-16 10:23:15 -07002814}
2815
2816size_t Heap::GetPercentFree() {
Mathieu Chartierd30e1d62014-06-09 13:25:22 -07002817 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
Elliott Hughesc967f782012-04-16 10:23:15 -07002818}
2819
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08002820void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002821 if (max_allowed_footprint > GetMaxMemory()) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002822 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002823 << PrettySize(GetMaxMemory());
2824 max_allowed_footprint = GetMaxMemory();
2825 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07002826 max_allowed_footprint_ = max_allowed_footprint;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07002827}
2828
Mathieu Chartier590fee92013-09-13 13:46:47 -07002829bool Heap::IsMovableObject(const mirror::Object* obj) const {
2830 if (kMovingCollector) {
Mathieu Chartier31f44142014-04-08 14:40:03 -07002831 space::Space* space = FindContinuousSpaceFromObject(obj, true);
2832 if (space != nullptr) {
2833 // TODO: Check large object?
2834 return space->CanMoveObjects();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002835 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002836 }
2837 return false;
2838}
2839
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002840void Heap::UpdateMaxNativeFootprint() {
Ian Rogers3e5cf302014-05-20 16:40:37 -07002841 size_t native_size = native_bytes_allocated_.LoadRelaxed();
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002842 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
2843 size_t target_size = native_size / GetTargetHeapUtilization();
2844 if (target_size > native_size + max_free_) {
2845 target_size = native_size + max_free_;
2846 } else if (target_size < native_size + min_free_) {
2847 target_size = native_size + min_free_;
2848 }
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07002849 native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002850}
2851
Mathieu Chartierafe49982014-03-27 10:55:04 -07002852collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
2853 for (const auto& collector : garbage_collectors_) {
2854 if (collector->GetCollectorType() == collector_type_ &&
2855 collector->GetGcType() == gc_type) {
2856 return collector;
2857 }
2858 }
2859 return nullptr;
2860}
2861
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002862double Heap::HeapGrowthMultiplier() const {
2863 // If we don't care about pause times we are background, so return 1.0.
2864 if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
2865 return 1.0;
2866 }
2867 return foreground_heap_growth_multiplier_;
2868}
2869
Mathieu Chartierafe49982014-03-27 10:55:04 -07002870void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002871 // We know what our utilization is at this moment.
2872 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002873 const uint64_t bytes_allocated = GetBytesAllocated();
Mathieu Chartier65db8802012-11-20 12:36:46 -08002874 last_gc_size_ = bytes_allocated;
Ian Rogers1d54e732013-05-02 21:10:01 -07002875 last_gc_time_ns_ = NanoTime();
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002876 uint64_t target_size;
Mathieu Chartierafe49982014-03-27 10:55:04 -07002877 collector::GcType gc_type = collector_ran->GetGcType();
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002878 if (gc_type != collector::kGcTypeSticky) {
2879 // Grow the heap for non sticky GC.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002880 const float multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
2881 // foreground.
2882 intptr_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
2883 CHECK_GE(delta, 0);
2884 target_size = bytes_allocated + delta * multiplier;
2885 target_size = std::min(target_size,
2886 bytes_allocated + static_cast<uint64_t>(max_free_ * multiplier));
2887 target_size = std::max(target_size,
2888 bytes_allocated + static_cast<uint64_t>(min_free_ * multiplier));
Mathieu Chartier590fee92013-09-13 13:46:47 -07002889 native_need_to_run_finalization_ = true;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002890 next_gc_type_ = collector::kGcTypeSticky;
2891 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002892 collector::GcType non_sticky_gc_type =
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002893 HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
Mathieu Chartierafe49982014-03-27 10:55:04 -07002894 // Find what the next non sticky collector will be.
2895 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
2896 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
2897 // do another sticky collection next.
2898 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
2899 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
2900 // if the sticky GC throughput always remained >= the full/partial throughput.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002901 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
Mathieu Chartierafe49982014-03-27 10:55:04 -07002902 non_sticky_collector->GetEstimatedMeanThroughput() &&
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002903 non_sticky_collector->NumberOfIterations() > 0 &&
Mathieu Chartierafe49982014-03-27 10:55:04 -07002904 bytes_allocated <= max_allowed_footprint_) {
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002905 next_gc_type_ = collector::kGcTypeSticky;
2906 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002907 next_gc_type_ = non_sticky_gc_type;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002908 }
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002909 // If we have freed enough memory, shrink the heap back down.
2910 if (bytes_allocated + max_free_ < max_allowed_footprint_) {
2911 target_size = bytes_allocated + max_free_;
2912 } else {
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002913 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002914 }
2915 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002916 if (!ignore_max_footprint_) {
2917 SetIdealFootprint(target_size);
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002918 if (IsGcConcurrent()) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002919 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002920 // Calculate the estimated GC duration.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002921 const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002922 // Estimate how many remaining bytes we will have when we need to start the next GC.
2923 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
Mathieu Chartier74762802014-01-24 10:21:35 -08002924 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002925 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
2926 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
2927 // A never going to happen situation that from the estimated allocation rate we will exceed
2928 // the applications entire footprint with the given estimated allocation rate. Schedule
Mathieu Chartier74762802014-01-24 10:21:35 -08002929 // another GC nearly straight away.
2930 remaining_bytes = kMinConcurrentRemainingBytes;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002931 }
Mathieu Chartier74762802014-01-24 10:21:35 -08002932 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07002933 DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
Mathieu Chartier74762802014-01-24 10:21:35 -08002934 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
2935 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
2936 // right away.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002937 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
2938 static_cast<size_t>(bytes_allocated));
Mathieu Chartier65db8802012-11-20 12:36:46 -08002939 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002940 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07002941}
2942
jeffhaoc1160702011-10-27 15:48:45 -07002943void Heap::ClearGrowthLimit() {
Mathieu Chartier80de7a62012-11-27 17:21:50 -08002944 growth_limit_ = capacity_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002945 non_moving_space_->ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -07002946}
2947
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07002948void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002949 ScopedObjectAccess soa(self);
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07002950 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
Ian Rogers53b8b092014-03-13 23:45:53 -07002951 jvalue args[1];
2952 args[0].l = arg.get();
2953 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07002954 // Restore object in case it gets moved.
2955 *object = soa.Decode<mirror::Object*>(arg.get());
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002956}
2957
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07002958void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) {
2959 StackHandleScope<1> hs(self);
2960 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2961 RequestConcurrentGC(self);
2962}
2963
Ian Rogers1f539342012-10-03 21:09:42 -07002964void Heap::RequestConcurrentGC(Thread* self) {
Mathieu Chartier069387a2012-06-18 12:01:01 -07002965 // Make sure that we can do a concurrent GC.
Ian Rogers120f1c72012-09-28 17:17:10 -07002966 Runtime* runtime = Runtime::Current();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002967 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
Mathieu Chartier590fee92013-09-13 13:46:47 -07002968 self->IsHandlingStackOverflow()) {
Ian Rogers120f1c72012-09-28 17:17:10 -07002969 return;
2970 }
Ian Rogers120f1c72012-09-28 17:17:10 -07002971 JNIEnv* env = self->GetJniEnv();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002972 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2973 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002974 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2975 WellKnownClasses::java_lang_Daemons_requestGC);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002976 CHECK(!env->ExceptionCheck());
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002977}
2978
Ian Rogers81d425b2012-09-27 16:03:43 -07002979void Heap::ConcurrentGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002980 if (Runtime::Current()->IsShuttingDown(self)) {
2981 return;
Mathieu Chartier2542d662012-06-21 17:14:11 -07002982 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002983 // Wait for any GCs currently running to finish.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002984 if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
Mathieu Chartierf9ed0d32013-11-21 16:42:47 -08002985 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
2986 // instead. E.g. can't do partial, so do full instead.
2987 if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
2988 collector::kGcTypeNone) {
2989 for (collector::GcType gc_type : gc_plan_) {
2990 // Attempt to run the collector, if we succeed, we are done.
2991 if (gc_type > next_gc_type_ &&
2992 CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
2993 break;
2994 }
2995 }
2996 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002997 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002998}
2999
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07003000void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003001 Thread* self = Thread::Current();
3002 {
3003 MutexLock mu(self, *heap_trim_request_lock_);
3004 if (desired_collector_type_ == desired_collector_type) {
3005 return;
3006 }
Mathieu Chartierb2728552014-09-08 20:08:41 +00003007 heap_transition_or_trim_target_time_ =
3008 std::max(heap_transition_or_trim_target_time_, NanoTime() + delta_time);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003009 desired_collector_type_ = desired_collector_type;
3010 }
3011 SignalHeapTrimDaemon(self);
3012}
3013
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07003014void Heap::RequestHeapTrim() {
Ian Rogers48931882013-01-22 14:35:16 -08003015 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3016 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3017 // a space it will hold its lock and can become a cause of jank.
3018 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3019 // forking.
3020
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08003021 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3022 // because that only marks object heads, so a large array looks like lots of empty space. We
3023 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3024 // to utilization (which is probably inversely proportional to how much benefit we can expect).
3025 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3026 // not how much use we're making of those pages.
Ian Rogers120f1c72012-09-28 17:17:10 -07003027
3028 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07003029 Runtime* runtime = Runtime::Current();
Mathieu Chartier30cbbee2014-09-08 13:35:11 -07003030 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
3031 runtime->IsZygote()) {
3032 // Ignore the request if we are the zygote to prevent app launching lag due to sleep in heap
3033 // trimmer daemon. b/17310019
Mathieu Chartier590fee92013-09-13 13:46:47 -07003034 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
3035 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
3036 // as we don't hold the lock while requesting the trim).
3037 return;
Ian Rogerse1d490c2012-02-03 09:09:07 -08003038 }
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07003039 {
3040 MutexLock mu(self, *heap_trim_request_lock_);
3041 if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
3042 // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
3043 // just yet.
3044 return;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003045 }
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07003046 heap_trim_request_pending_ = true;
Mathieu Chartierb2728552014-09-08 20:08:41 +00003047 uint64_t current_time = NanoTime();
3048 if (heap_transition_or_trim_target_time_ < current_time) {
3049 heap_transition_or_trim_target_time_ = current_time + kHeapTrimWait;
3050 }
Mathieu Chartierc39e3422013-08-07 16:41:36 -07003051 }
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07003052 // Notify the daemon thread which will actually do the heap trim.
3053 SignalHeapTrimDaemon(self);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08003054}
3055
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003056void Heap::SignalHeapTrimDaemon(Thread* self) {
3057 JNIEnv* env = self->GetJniEnv();
3058 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
3059 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != nullptr);
3060 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
3061 WellKnownClasses::java_lang_Daemons_requestHeapTrim);
3062 CHECK(!env->ExceptionCheck());
3063}
3064
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003065void Heap::RevokeThreadLocalBuffers(Thread* thread) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003066 if (rosalloc_space_ != nullptr) {
3067 rosalloc_space_->RevokeThreadLocalBuffers(thread);
3068 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003069 if (bump_pointer_space_ != nullptr) {
3070 bump_pointer_space_->RevokeThreadLocalBuffers(thread);
3071 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003072}
3073
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07003074void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3075 if (rosalloc_space_ != nullptr) {
3076 rosalloc_space_->RevokeThreadLocalBuffers(thread);
3077 }
3078}
3079
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003080void Heap::RevokeAllThreadLocalBuffers() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003081 if (rosalloc_space_ != nullptr) {
3082 rosalloc_space_->RevokeAllThreadLocalBuffers();
3083 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003084 if (bump_pointer_space_ != nullptr) {
3085 bump_pointer_space_->RevokeAllThreadLocalBuffers();
3086 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003087}
3088
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003089bool Heap::IsGCRequestPending() const {
3090 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
3091}
3092
Mathieu Chartier590fee92013-09-13 13:46:47 -07003093void Heap::RunFinalization(JNIEnv* env) {
3094 // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
3095 if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
3096 CHECK(WellKnownClasses::java_lang_System != nullptr);
3097 WellKnownClasses::java_lang_System_runFinalization =
3098 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
3099 CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
3100 }
3101 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
3102 WellKnownClasses::java_lang_System_runFinalization);
3103}
3104
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003105void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003106 Thread* self = ThreadForEnv(env);
3107 if (native_need_to_run_finalization_) {
3108 RunFinalization(env);
3109 UpdateMaxNativeFootprint();
3110 native_need_to_run_finalization_ = false;
3111 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003112 // Total number of native bytes allocated.
Ian Rogers3e5cf302014-05-20 16:40:37 -07003113 size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
3114 new_native_bytes_allocated += bytes;
3115 if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
Mathieu Chartiere4cab172014-08-19 18:24:04 -07003116 collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08003117 collector::kGcTypeFull;
3118
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003119 // The second watermark is higher than the gc watermark. If you hit this it means you are
3120 // allocating native objects faster than the GC can keep up with.
Mathieu Chartier08487452014-09-02 16:21:01 -07003121 if (new_native_bytes_allocated > growth_limit_) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003122 if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003123 // Just finished a GC, attempt to run finalizers.
3124 RunFinalization(env);
3125 CHECK(!env->ExceptionCheck());
3126 }
3127 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
Mathieu Chartier08487452014-09-02 16:21:01 -07003128 if (new_native_bytes_allocated > growth_limit_) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08003129 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003130 RunFinalization(env);
3131 native_need_to_run_finalization_ = false;
3132 CHECK(!env->ExceptionCheck());
3133 }
3134 // We have just run finalizers, update the native watermark since it is very likely that
3135 // finalizers released native managed allocations.
3136 UpdateMaxNativeFootprint();
3137 } else if (!IsGCRequestPending()) {
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07003138 if (IsGcConcurrent()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003139 RequestConcurrentGC(self);
3140 } else {
Hiroshi Yamauchid20aba12014-04-11 15:31:09 -07003141 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003142 }
3143 }
3144 }
3145}
3146
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003147void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
3148 size_t expected_size;
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003149 do {
Ian Rogers3e5cf302014-05-20 16:40:37 -07003150 expected_size = native_bytes_allocated_.LoadRelaxed();
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003151 if (UNLIKELY(bytes > expected_size)) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003152 ScopedObjectAccess soa(env);
3153 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003154 StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
Mathieu Chartier590fee92013-09-13 13:46:47 -07003155 "registered as allocated", bytes, expected_size).c_str());
3156 break;
3157 }
Mathieu Chartier8ec31f92014-09-03 10:30:11 -07003158 } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
3159 expected_size - bytes));
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003160}
3161
Ian Rogersef7d42f2014-01-06 12:55:46 -08003162size_t Heap::GetTotalMemory() const {
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07003163 return std::max(max_allowed_footprint_, GetBytesAllocated());
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07003164}
3165
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003166void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3167 DCHECK(mod_union_table != nullptr);
3168 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3169}
3170
Mathieu Chartierc645f1d2014-03-06 18:11:53 -08003171void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
3172 CHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
Ian Rogers1ff3c982014-08-12 02:30:58 -07003173 (c->IsVariableSize() || c->GetObjectSize() == byte_count));
Mathieu Chartierc645f1d2014-03-06 18:11:53 -08003174 CHECK_GE(byte_count, sizeof(mirror::Object));
3175}
3176
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003177void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3178 CHECK(remembered_set != nullptr);
3179 space::Space* space = remembered_set->GetSpace();
3180 CHECK(space != nullptr);
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07003181 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003182 remembered_sets_.Put(space, remembered_set);
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07003183 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003184}
3185
3186void Heap::RemoveRememberedSet(space::Space* space) {
3187 CHECK(space != nullptr);
3188 auto it = remembered_sets_.find(space);
3189 CHECK(it != remembered_sets_.end());
Mathieu Chartier5189e242014-07-24 11:11:05 -07003190 delete it->second;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003191 remembered_sets_.erase(it);
3192 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
3193}
3194
Mathieu Chartier4aeec172014-03-27 16:09:46 -07003195void Heap::ClearMarkedObjects() {
3196 // Clear all of the spaces' mark bitmaps.
3197 for (const auto& space : GetContinuousSpaces()) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07003198 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07003199 if (space->GetLiveBitmap() != mark_bitmap) {
3200 mark_bitmap->Clear();
3201 }
3202 }
3203 // Clear the marked objects in the discontinous space object sets.
3204 for (const auto& space : GetDiscontinuousSpaces()) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07003205 space->GetMarkBitmap()->Clear();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07003206 }
3207}
3208
Ian Rogers1d54e732013-05-02 21:10:01 -07003209} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07003210} // namespace art