blob: bf8aaaeba78d86840298ab37d00c1082f461243d [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Brian Carlstrom58ae9412011-10-04 00:56:06 -070019#include <limits>
Hans Boehmb5870722018-12-13 16:25:05 -080020#if defined(__BIONIC__) || defined(__GLIBC__)
Hans Boehmc220f982018-10-12 16:15:45 -070021#include <malloc.h> // For mallinfo()
Hans Boehmb5870722018-12-13 16:25:05 -080022#endif
Ian Rogers700a4022014-05-19 16:49:03 -070023#include <memory>
Carl Shapiro58551df2011-07-24 03:09:51 -070024#include <vector>
25
Andreas Gampe46ee31b2016-12-14 10:11:49 -080026#include "android-base/stringprintf.h"
27
Andreas Gampe27fa96c2016-10-07 15:05:24 -070028#include "allocation_listener.h"
Mathieu Chartierc7853442015-03-27 14:35:38 -070029#include "art_field-inl.h"
Mathieu Chartier34583592017-03-23 23:51:34 -070030#include "backtrace_helper.h"
Mathieu Chartierbad02672014-08-25 13:08:22 -070031#include "base/allocator.h"
Mathieu Chartier8d447252015-10-26 10:21:14 -070032#include "base/arena_allocator.h"
Ian Rogersc7dd2952014-10-21 23:31:19 -070033#include "base/dumpable.h"
David Sehr891a50e2017-10-27 17:01:07 -070034#include "base/file_utils.h"
Mathieu Chartierb2f99362013-11-20 17:26:00 -080035#include "base/histogram-inl.h"
Andreas Gampe170331f2017-12-07 18:41:03 -080036#include "base/logging.h" // For VLOG.
Hiroshi Yamauchi55113ed2017-02-10 15:12:46 -080037#include "base/memory_tool.h"
David Sehrc431b9d2018-03-02 12:01:51 -080038#include "base/os.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080039#include "base/stl_util.h"
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080040#include "base/systrace.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010041#include "base/time_utils.h"
Andreas Gampe97b28112018-12-04 09:09:12 -080042#include "base/utils.h"
Mathieu Chartier987ccff2013-07-08 11:05:21 -070043#include "common_throws.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070044#include "debugger.h"
David Sehr9e734c72018-01-04 17:56:19 -080045#include "dex/dex_file-inl.h"
Steven Morelande431e272017-07-18 16:53:49 -070046#include "entrypoints/quick/quick_alloc_entrypoints.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070047#include "gc/accounting/card_table-inl.h"
48#include "gc/accounting/heap_bitmap-inl.h"
49#include "gc/accounting/mod_union_table-inl.h"
Andreas Gamped4901292017-05-30 18:41:34 -070050#include "gc/accounting/read_barrier_table.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080051#include "gc/accounting/remembered_set.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070052#include "gc/accounting/space_bitmap-inl.h"
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070053#include "gc/collector/concurrent_copying.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070054#include "gc/collector/mark_sweep.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070055#include "gc/collector/partial_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070056#include "gc/collector/semi_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070057#include "gc/collector/sticky_mark_sweep.h"
Hans Boehmfb8b4e22018-09-05 16:45:42 -070058#include "gc/racing_check.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070059#include "gc/reference_processor.h"
Hiroshi Yamauchi3b1d1b72016-10-12 11:53:57 -070060#include "gc/scoped_gc_critical_section.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070061#include "gc/space/bump_pointer_space.h"
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070062#include "gc/space/dlmalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070063#include "gc/space/image_space.h"
64#include "gc/space/large_object_space.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080065#include "gc/space/region_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070066#include "gc/space/rosalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070067#include "gc/space/space-inl.h"
Mathieu Chartiera1602f22014-01-13 17:19:19 -080068#include "gc/space/zygote_space.h"
Mathieu Chartiera5eae692014-12-17 17:56:03 -080069#include "gc/task_processor.h"
Mathieu Chartier1ca68902017-04-18 11:26:22 -070070#include "gc/verification.h"
Andreas Gampe9b8c5882016-10-21 15:27:46 -070071#include "gc_pause_listener.h"
Andreas Gamped4901292017-05-30 18:41:34 -070072#include "gc_root.h"
Steven Morelande431e272017-07-18 16:53:49 -070073#include "handle_scope-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070074#include "heap-inl.h"
Andreas Gampe351c4472017-07-12 19:32:55 -070075#include "heap-visit-objects-inl.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070076#include "image.h"
Mathieu Chartiereb175f72014-10-31 11:49:27 -070077#include "intern_table.h"
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +000078#include "jit/jit.h"
79#include "jit/jit_code_cache.h"
Vladimir Markoa3ad0cd2018-05-04 10:06:38 +010080#include "jni/java_vm_ext.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080081#include "mirror/class-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080082#include "mirror/object-inl.h"
Andreas Gampec6ea7d02017-02-01 16:46:28 -080083#include "mirror/object-refvisitor-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080084#include "mirror/object_array-inl.h"
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070085#include "mirror/reference-inl.h"
Andreas Gampe373a9b52017-10-18 09:01:57 -070086#include "nativehelper/scoped_local_ref.h"
Steven Morelande431e272017-07-18 16:53:49 -070087#include "obj_ptr-inl.h"
Ian Rogers53b8b092014-03-13 23:45:53 -070088#include "reflection.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080089#include "runtime.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070090#include "scoped_thread_state_change-inl.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070091#include "thread_list.h"
Andreas Gampe90b936d2017-01-31 08:58:55 -080092#include "verify_object-inl.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070093#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070094
95namespace art {
Mathieu Chartier50482232013-11-21 11:48:14 -080096
Ian Rogers1d54e732013-05-02 21:10:01 -070097namespace gc {
Carl Shapiro69759ea2011-07-21 18:13:35 -070098
Mathieu Chartier91e30632014-03-25 15:58:50 -070099static constexpr size_t kCollectorTransitionStressIterations = 0;
100static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
Andreas Gampeed56b5e2017-10-19 12:58:19 -0700101
102DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition);
103
Ian Rogers1d54e732013-05-02 21:10:01 -0700104// Minimum amount of remaining bytes before a concurrent GC is triggered.
Mathieu Chartier720ef762013-08-17 14:46:54 -0700105static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
Mathieu Chartier74762802014-01-24 10:21:35 -0800106static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -0700107// Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700108// relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -0700109// threads (lower pauses, use less memory bandwidth).
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700110static constexpr double kStickyGcThroughputAdjustment =
111 kEnableGenerationalConcurrentCopyingCollection ? 0.5 : 1.0;
Mathieu Chartierc1790162014-05-23 10:54:50 -0700112// Whether or not we compact the zygote in PreZygoteFork.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700113static constexpr bool kCompactZygote = kMovingCollector;
Mathieu Chartierc1790162014-05-23 10:54:50 -0700114// How many reserve entries are at the end of the allocation stack, these are only needed if the
115// allocation stack overflows.
116static constexpr size_t kAllocationStackReserveSize = 1024;
117// Default mark stack size in bytes.
118static const size_t kDefaultMarkStackSize = 64 * KB;
Zuo Wangf37a88b2014-07-10 04:26:41 -0700119// Define space name.
120static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
121static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
122static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
Mathieu Chartier7247af52014-11-19 10:51:42 -0800123static const char* kNonMovingSpaceName = "non moving space";
124static const char* kZygoteSpaceName = "zygote space";
Mathieu Chartierb363f662014-07-16 13:28:58 -0700125static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
Mathieu Chartier95a505c2014-12-10 18:45:30 -0800126static constexpr bool kGCALotMode = false;
127// GC alot mode uses a small allocation stack to stress test a lot of GC.
128static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
129 sizeof(mirror::HeapReference<mirror::Object>);
130// Verify objet has a small allocation stack size since searching the allocation stack is slow.
131static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
132 sizeof(mirror::HeapReference<mirror::Object>);
133static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
134 sizeof(mirror::HeapReference<mirror::Object>);
Mathieu Chartier0051be62012-10-12 17:47:11 -0700135
Andreas Gampeace0dc12016-01-20 13:33:13 -0800136// For deterministic compilation, we need the heap to be at a well-known address.
137static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
Hiroshi Yamauchib62f2e62016-03-23 15:51:24 -0700138// Dump the rosalloc stats on SIGQUIT.
139static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
Andreas Gampeace0dc12016-01-20 13:33:13 -0800140
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800141static const char* kRegionSpaceName = "main space (region space)";
142
Mathieu Chartier6bc77742017-04-18 17:46:23 -0700143// If true, we log all GCs in the both the foreground and background. Used for debugging.
144static constexpr bool kLogAllGCs = false;
145
146// How much we grow the TLAB if we can do it.
147static constexpr size_t kPartialTlabSize = 16 * KB;
148static constexpr bool kUsePartialTlabs = true;
149
Mathieu Chartiera98a2822017-05-24 16:14:10 -0700150// Use Max heap for 2 seconds, this is smaller than the usual 5s window since we don't want to leave
151// allocate with relaxed ergonomics for that long.
152static constexpr size_t kPostForkMaxHeapDurationMS = 2000;
153
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800154#if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
155// 300 MB (0x12c00000) - (default non-moving space capacity).
Mathieu Chartierfa4ea822018-03-02 13:48:54 -0800156uint8_t* const Heap::kPreferredAllocSpaceBegin =
157 reinterpret_cast<uint8_t*>(300 * MB - kDefaultNonMovingSpaceCapacity);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800158#else
Andreas Gampee8857fe2017-05-03 08:28:13 -0700159#ifdef __ANDROID__
160// For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
Mathieu Chartierfa4ea822018-03-02 13:48:54 -0800161uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
Andreas Gampee8857fe2017-05-03 08:28:13 -0700162#else
163// For 32-bit host, use 0x40000000 because asan uses most of the space below this.
Mathieu Chartierfa4ea822018-03-02 13:48:54 -0800164uint8_t* const Heap::kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000);
Andreas Gampee8857fe2017-05-03 08:28:13 -0700165#endif
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800166#endif
167
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700168static inline bool CareAboutPauseTimes() {
169 return Runtime::Current()->InJankPerceptibleProcessState();
170}
171
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700172Heap::Heap(size_t initial_size,
173 size_t growth_limit,
174 size_t min_free,
175 size_t max_free,
176 double target_utilization,
177 double foreground_heap_growth_multiplier,
178 size_t capacity,
179 size_t non_moving_space_capacity,
Vladimir Markod1908512018-11-22 14:57:28 +0000180 const std::vector<std::string>& boot_class_path,
181 const std::vector<std::string>& boot_class_path_locations,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700182 const std::string& image_file_name,
183 const InstructionSet image_instruction_set,
184 CollectorType foreground_collector_type,
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700185 CollectorType background_collector_type,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700186 space::LargeObjectSpaceType large_object_space_type,
187 size_t large_object_threshold,
188 size_t parallel_gc_threads,
189 size_t conc_gc_threads,
190 bool low_memory_mode,
191 size_t long_pause_log_threshold,
192 size_t long_gc_log_threshold,
Hans Boehmc220f982018-10-12 16:15:45 -0700193 bool ignore_target_footprint,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700194 bool use_tlab,
195 bool verify_pre_gc_heap,
196 bool verify_pre_sweeping_heap,
197 bool verify_post_gc_heap,
198 bool verify_pre_gc_rosalloc,
199 bool verify_pre_sweeping_rosalloc,
200 bool verify_post_gc_rosalloc,
201 bool gc_stress_mode,
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700202 bool measure_gc_performance,
Mathieu Chartier31000802015-06-14 14:14:37 -0700203 bool use_homogeneous_space_compaction_for_oom,
Albert Mingkun Yangde94ea72018-11-16 10:15:49 +0000204 uint64_t min_interval_homogeneous_space_compaction_by_oom,
205 bool dump_region_info_before_gc,
206 bool dump_region_info_after_gc)
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800207 : non_moving_space_(nullptr),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800208 rosalloc_space_(nullptr),
209 dlmalloc_space_(nullptr),
Mathieu Chartierfc5b5282014-01-09 16:15:36 -0800210 main_space_(nullptr),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800211 collector_type_(kCollectorTypeNone),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700212 foreground_collector_type_(foreground_collector_type),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800213 background_collector_type_(background_collector_type),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700214 desired_collector_type_(foreground_collector_type_),
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800215 pending_task_lock_(nullptr),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700216 parallel_gc_threads_(parallel_gc_threads),
217 conc_gc_threads_(conc_gc_threads),
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700218 low_memory_mode_(low_memory_mode),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700219 long_pause_log_threshold_(long_pause_log_threshold),
220 long_gc_log_threshold_(long_gc_log_threshold),
Albert Mingkun Yang2d7329b2018-11-30 19:58:18 +0000221 process_cpu_start_time_ns_(ProcessCpuNanoTime()),
Albert Mingkun Yang6e0d3252018-12-10 15:22:45 +0000222 pre_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
223 post_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
224 pre_gc_weighted_allocated_bytes_(0.0),
225 post_gc_weighted_allocated_bytes_(0.0),
Hans Boehmc220f982018-10-12 16:15:45 -0700226 ignore_target_footprint_(ignore_target_footprint),
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -0700227 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700228 zygote_space_(nullptr),
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700229 large_object_threshold_(large_object_threshold),
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700230 disable_thread_flip_count_(0),
231 thread_flip_running_(false),
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800232 collector_type_running_(kCollectorTypeNone),
Mathieu Chartier40112dd2017-06-26 17:49:09 -0700233 last_gc_cause_(kGcCauseNone),
Mathieu Chartier183009a2017-02-16 21:19:28 -0800234 thread_running_gc_(nullptr),
Ian Rogers1d54e732013-05-02 21:10:01 -0700235 last_gc_type_(collector::kGcTypeNone),
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -0700236 next_gc_type_(collector::kGcTypePartial),
Mathieu Chartier80de7a62012-11-27 17:21:50 -0800237 capacity_(capacity),
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700238 growth_limit_(growth_limit),
Hans Boehmc220f982018-10-12 16:15:45 -0700239 target_footprint_(initial_size),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800240 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Ian Rogers1d54e732013-05-02 21:10:01 -0700241 total_bytes_freed_ever_(0),
242 total_objects_freed_ever_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800243 num_bytes_allocated_(0),
Hans Boehmc220f982018-10-12 16:15:45 -0700244 native_bytes_registered_(0),
Richard Uhlercaaa2b02017-02-01 09:54:17 +0000245 old_native_bytes_allocated_(0),
Hans Boehmc220f982018-10-12 16:15:45 -0700246 native_objects_notified_(0),
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700247 num_bytes_freed_revoke_(0),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700248 verify_missing_card_marks_(false),
249 verify_system_weaks_(false),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800250 verify_pre_gc_heap_(verify_pre_gc_heap),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700251 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800252 verify_post_gc_heap_(verify_post_gc_heap),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700253 verify_mod_union_table_(false),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800254 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700255 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800256 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
Mathieu Chartier31000802015-06-14 14:14:37 -0700257 gc_stress_mode_(gc_stress_mode),
Hans Boehmd972b422017-09-11 12:57:00 -0700258 /* For GC a lot mode, we limit the allocation stacks to be kGcAlotInterval allocations. This
Mathieu Chartier0418ae22013-07-31 13:35:46 -0700259 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
260 * verification is enabled, we limit the size of allocation stacks to speed up their
261 * searching.
262 */
Mathieu Chartier95a505c2014-12-10 18:45:30 -0800263 max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
264 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
265 kDefaultAllocationStackSize),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800266 current_allocator_(kAllocatorTypeDlMalloc),
267 current_non_moving_allocator_(kAllocatorTypeNonMoving),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700268 bump_pointer_space_(nullptr),
269 temp_space_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800270 region_space_(nullptr),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700271 min_free_(min_free),
272 max_free_(max_free),
273 target_utilization_(target_utilization),
Mathieu Chartier11c273d2017-10-15 20:54:45 -0700274 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700275 total_wait_time_(0),
Mathieu Chartier4e305412014-02-19 10:54:44 -0800276 verify_object_mode_(kVerifyObjectModeDisabled),
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800277 disable_moving_gc_count_(0),
Vladimir Marko8da690f2016-08-11 18:25:53 +0100278 semi_space_collector_(nullptr),
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700279 active_concurrent_copying_collector_(nullptr),
280 young_concurrent_copying_collector_(nullptr),
Vladimir Marko8da690f2016-08-11 18:25:53 +0100281 concurrent_copying_collector_(nullptr),
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700282 is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
Zuo Wangf37a88b2014-07-10 04:26:41 -0700283 use_tlab_(use_tlab),
284 main_space_backup_(nullptr),
Mathieu Chartierb363f662014-07-16 13:28:58 -0700285 min_interval_homogeneous_space_compaction_by_oom_(
286 min_interval_homogeneous_space_compaction_by_oom),
Zuo Wangf37a88b2014-07-10 04:26:41 -0700287 last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800288 pending_collector_transition_(nullptr),
289 pending_heap_trim_(nullptr),
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -0700290 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
291 running_collection_is_blocking_(false),
292 blocking_gc_count_(0U),
293 blocking_gc_time_(0U),
294 last_update_time_gc_count_rate_histograms_( // Round down by the window duration.
295 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
296 gc_count_last_window_(0U),
297 blocking_gc_count_last_window_(0U),
298 gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
299 blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
Man Cao8c2ff642015-05-27 17:25:30 -0700300 kGcCountRateMaxBucketCount),
Mathieu Chartier31000802015-06-14 14:14:37 -0700301 alloc_tracking_enabled_(false),
302 backtrace_lock_(nullptr),
303 seen_backtrace_count_(0u),
Mathieu Chartier51168372015-08-12 16:40:32 -0700304 unique_backtrace_count_(0u),
Albert Mingkun Yangde94ea72018-11-16 10:15:49 +0000305 gc_disabled_for_shutdown_(false),
306 dump_region_info_before_gc_(dump_region_info_before_gc),
307 dump_region_info_after_gc_(dump_region_info_after_gc) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800308 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800309 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700310 }
Hiroshi Yamauchi1b0adbf2016-11-14 17:35:12 -0800311 if (kUseReadBarrier) {
312 CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
313 CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
314 }
Mathieu Chartier1ca68902017-04-18 11:26:22 -0700315 verification_.reset(new Verification(this));
Mathieu Chartier8261d022016-08-08 09:41:04 -0700316 CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800317 ScopedTrace trace(__FUNCTION__);
Mathieu Chartier31000802015-06-14 14:14:37 -0700318 Runtime* const runtime = Runtime::Current();
Mathieu Chartier50482232013-11-21 11:48:14 -0800319 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
320 // entrypoints.
Mathieu Chartier31000802015-06-14 14:14:37 -0700321 const bool is_zygote = runtime->IsZygote();
Mathieu Chartier8e219ae2014-08-19 14:29:46 -0700322 if (!is_zygote) {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700323 // Background compaction is currently not supported for command line runs.
324 if (background_collector_type_ != foreground_collector_type_) {
Mathieu Chartier52ba1992014-05-07 14:39:21 -0700325 VLOG(heap) << "Disabling background compaction for non zygote";
Mathieu Chartier31f44142014-04-08 14:40:03 -0700326 background_collector_type_ = foreground_collector_type_;
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800327 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800328 }
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800329 ChangeCollector(desired_collector_type_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700330 live_bitmap_.reset(new accounting::HeapBitmap(this));
331 mark_bitmap_.reset(new accounting::HeapBitmap(this));
Jeff Haodcdc85b2015-12-04 14:06:18 -0800332
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800333 // We don't have hspace compaction enabled with GSS or CC.
334 if (foreground_collector_type_ == kCollectorTypeGSS ||
335 foreground_collector_type_ == kCollectorTypeCC) {
Hiroshi Yamauchi20ed5af2014-11-17 18:05:44 -0800336 use_homogeneous_space_compaction_for_oom_ = false;
337 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700338 bool support_homogeneous_space_compaction =
Mathieu Chartier0deeb812014-08-21 18:28:20 -0700339 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
Hiroshi Yamauchi20ed5af2014-11-17 18:05:44 -0800340 use_homogeneous_space_compaction_for_oom_;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700341 // We may use the same space the main space for the non moving space if we don't need to compact
342 // from the main space.
343 // This is not the case if we support homogeneous compaction or have a moving background
344 // collector type.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700345 bool separate_non_moving_space = is_zygote ||
346 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
347 IsMovingGc(background_collector_type_);
Mathieu Chartier76ce9172016-01-27 10:44:20 -0800348 if (foreground_collector_type_ == kCollectorTypeGSS) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700349 separate_non_moving_space = false;
350 }
Vladimir Markod44d7032018-08-30 13:02:31 +0100351
352 // Requested begin for the alloc space, to follow the mapped image and oat files
353 uint8_t* request_begin = nullptr;
354 // Calculate the extra space required after the boot image, see allocations below.
Vladimir Marko4df2d802018-09-27 16:42:44 +0000355 size_t heap_reservation_size = 0u;
356 if (separate_non_moving_space) {
357 heap_reservation_size = non_moving_space_capacity;
358 } else if ((foreground_collector_type_ != kCollectorTypeCC) &&
359 (is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) {
360 heap_reservation_size = capacity_;
361 }
Vladimir Markod44d7032018-08-30 13:02:31 +0100362 heap_reservation_size = RoundUp(heap_reservation_size, kPageSize);
363 // Load image space(s).
364 std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
365 MemMap heap_reservation;
Vladimir Markod1908512018-11-22 14:57:28 +0000366 if (space::ImageSpace::LoadBootImage(boot_class_path,
367 boot_class_path_locations,
368 image_file_name,
Vladimir Markod44d7032018-08-30 13:02:31 +0100369 image_instruction_set,
370 heap_reservation_size,
371 &boot_image_spaces,
372 &heap_reservation)) {
373 DCHECK_EQ(heap_reservation_size, heap_reservation.IsValid() ? heap_reservation.Size() : 0u);
374 DCHECK(!boot_image_spaces.empty());
375 request_begin = boot_image_spaces.back()->GetImageHeader().GetOatFileEnd();
376 DCHECK(!heap_reservation.IsValid() || request_begin == heap_reservation.Begin())
377 << "request_begin=" << static_cast<const void*>(request_begin)
378 << " heap_reservation.Begin()=" << static_cast<const void*>(heap_reservation.Begin());
379 for (std::unique_ptr<space::ImageSpace>& space : boot_image_spaces) {
380 boot_image_spaces_.push_back(space.get());
381 AddSpace(space.release());
382 }
383 } else {
384 if (foreground_collector_type_ == kCollectorTypeCC) {
385 // Need to use a low address so that we can allocate a contiguous 2 * Xmx space
386 // when there's no image (dex2oat for target).
387 request_begin = kPreferredAllocSpaceBegin;
388 }
389 // Gross hack to make dex2oat deterministic.
390 if (foreground_collector_type_ == kCollectorTypeMS && Runtime::Current()->IsAotCompiler()) {
391 // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
392 // b/26849108
393 request_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
394 }
395 }
396
397 /*
398 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
399 +- nonmoving space (non_moving_space_capacity)+-
400 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
401 +-????????????????????????????????????????????+-
402 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
403 +-main alloc space / bump space 1 (capacity_) +-
404 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
405 +-????????????????????????????????????????????+-
406 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
407 +-main alloc space2 / bump space 2 (capacity_)+-
408 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
409 */
410
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100411 MemMap main_mem_map_1;
412 MemMap main_mem_map_2;
Andreas Gampeace0dc12016-01-20 13:33:13 -0800413
Mathieu Chartierb363f662014-07-16 13:28:58 -0700414 std::string error_str;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100415 MemMap non_moving_space_mem_map;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700416 if (separate_non_moving_space) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800417 ScopedTrace trace2("Create separate non moving space");
Mathieu Chartier7247af52014-11-19 10:51:42 -0800418 // If we are the zygote, the non moving space becomes the zygote space when we run
419 // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
420 // rename the mem map later.
Roland Levillain5e8d5f02016-10-18 18:03:43 +0100421 const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700422 // Reserve the non moving mem map before the other two since it needs to be at a specific
423 // address.
Vladimir Markod44d7032018-08-30 13:02:31 +0100424 DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
425 if (heap_reservation.IsValid()) {
426 non_moving_space_mem_map = heap_reservation.RemapAtEnd(
427 heap_reservation.Begin(), space_name, PROT_READ | PROT_WRITE, &error_str);
428 } else {
429 non_moving_space_mem_map = MapAnonymousPreferredAddress(
430 space_name, request_begin, non_moving_space_capacity, &error_str);
431 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100432 CHECK(non_moving_space_mem_map.IsValid()) << error_str;
Vladimir Markod44d7032018-08-30 13:02:31 +0100433 DCHECK(!heap_reservation.IsValid());
Mathieu Chartierc44ce2e2014-08-25 16:32:41 -0700434 // Try to reserve virtual memory at a lower address if we have a separate non moving space.
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800435 request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700436 }
Hiroshi Yamauchi3dbf2342015-03-17 16:01:11 -0700437 // Attempt to create 2 mem maps at or after the requested begin.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800438 if (foreground_collector_type_ != kCollectorTypeCC) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800439 ScopedTrace trace2("Create main mem map");
Vladimir Marko4df2d802018-09-27 16:42:44 +0000440 if (separate_non_moving_space ||
441 !(is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100442 main_mem_map_1 = MapAnonymousPreferredAddress(
443 kMemMapSpaceName[0], request_begin, capacity_, &error_str);
Hiroshi Yamauchi3dbf2342015-03-17 16:01:11 -0700444 } else {
Vladimir Marko4df2d802018-09-27 16:42:44 +0000445 // If no separate non-moving space and we are the zygote or the collector type is GSS,
446 // the main space must come right after the image space to avoid a gap.
447 // This is required since we want the zygote space to be adjacent to the image space.
Vladimir Markod44d7032018-08-30 13:02:31 +0100448 DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
449 main_mem_map_1 = MemMap::MapAnonymous(
450 kMemMapSpaceName[0],
451 request_begin,
452 capacity_,
453 PROT_READ | PROT_WRITE,
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700454 /* low_4gb= */ true,
455 /* reuse= */ false,
Vladimir Markod44d7032018-08-30 13:02:31 +0100456 heap_reservation.IsValid() ? &heap_reservation : nullptr,
457 &error_str);
Hiroshi Yamauchi3dbf2342015-03-17 16:01:11 -0700458 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100459 CHECK(main_mem_map_1.IsValid()) << error_str;
Vladimir Markod44d7032018-08-30 13:02:31 +0100460 DCHECK(!heap_reservation.IsValid());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800461 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700462 if (support_homogeneous_space_compaction ||
463 background_collector_type_ == kCollectorTypeSS ||
464 foreground_collector_type_ == kCollectorTypeSS) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800465 ScopedTrace trace2("Create main mem map 2");
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100466 main_mem_map_2 = MapAnonymousPreferredAddress(
467 kMemMapSpaceName[1], main_mem_map_1.End(), capacity_, &error_str);
468 CHECK(main_mem_map_2.IsValid()) << error_str;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700469 }
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800470
Mathieu Chartierb363f662014-07-16 13:28:58 -0700471 // Create the non moving space first so that bitmaps don't take up the address range.
472 if (separate_non_moving_space) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800473 ScopedTrace trace2("Add non moving space");
Mathieu Chartier31f44142014-04-08 14:40:03 -0700474 // Non moving space is always dlmalloc since we currently don't have support for multiple
Zuo Wangf37a88b2014-07-10 04:26:41 -0700475 // active rosalloc spaces.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100476 const size_t size = non_moving_space_mem_map.Size();
Vladimir Markobd5e5f62018-09-07 11:21:34 +0100477 const void* non_moving_space_mem_map_begin = non_moving_space_mem_map.Begin();
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100478 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map),
479 "zygote / non moving space",
480 kDefaultStartingSize,
481 initial_size,
482 size,
483 size,
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700484 /* can_move_objects= */ false);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700485 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
Vladimir Markobd5e5f62018-09-07 11:21:34 +0100486 << non_moving_space_mem_map_begin;
487 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
Mathieu Chartierb363f662014-07-16 13:28:58 -0700488 AddSpace(non_moving_space_);
489 }
490 // Create other spaces based on whether or not we have a moving GC.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800491 if (foreground_collector_type_ == kCollectorTypeCC) {
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800492 CHECK(separate_non_moving_space);
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000493 // Reserve twice the capacity, to allow evacuating every region for explicit GCs.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100494 MemMap region_space_mem_map =
495 space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
496 CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
497 region_space_ = space::RegionSpace::Create(kRegionSpaceName, std::move(region_space_mem_map));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800498 AddSpace(region_space_);
Richard Uhler054a0782015-04-07 10:56:50 -0700499 } else if (IsMovingGc(foreground_collector_type_) &&
500 foreground_collector_type_ != kCollectorTypeGSS) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700501 // Create bump pointer spaces.
502 // We only to create the bump pointer if the foreground collector is a compacting GC.
503 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
504 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100505 std::move(main_mem_map_1));
Mathieu Chartierb363f662014-07-16 13:28:58 -0700506 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
507 AddSpace(bump_pointer_space_);
508 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100509 std::move(main_mem_map_2));
Mathieu Chartierb363f662014-07-16 13:28:58 -0700510 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
511 AddSpace(temp_space_);
512 CHECK(separate_non_moving_space);
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700513 } else {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100514 CreateMainMallocSpace(std::move(main_mem_map_1), initial_size, growth_limit_, capacity_);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700515 CHECK(main_space_ != nullptr);
516 AddSpace(main_space_);
517 if (!separate_non_moving_space) {
Zuo Wangf37a88b2014-07-10 04:26:41 -0700518 non_moving_space_ = main_space_;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700519 CHECK(!non_moving_space_->CanMoveObjects());
520 }
521 if (foreground_collector_type_ == kCollectorTypeGSS) {
522 CHECK_EQ(foreground_collector_type_, background_collector_type_);
523 // Create bump pointer spaces instead of a backup space.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100524 main_mem_map_2.Reset();
525 bump_pointer_space_ = space::BumpPointerSpace::Create(
Vladimir Marko11306592018-10-26 14:22:59 +0100526 "Bump pointer space 1", kGSSBumpPointerSpaceCapacity);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700527 CHECK(bump_pointer_space_ != nullptr);
528 AddSpace(bump_pointer_space_);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100529 temp_space_ = space::BumpPointerSpace::Create(
Vladimir Marko11306592018-10-26 14:22:59 +0100530 "Bump pointer space 2", kGSSBumpPointerSpaceCapacity);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700531 CHECK(temp_space_ != nullptr);
532 AddSpace(temp_space_);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100533 } else if (main_mem_map_2.IsValid()) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700534 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100535 main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2),
536 initial_size,
537 growth_limit_,
538 capacity_,
539 name,
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700540 /* can_move_objects= */ true));
Mathieu Chartierb363f662014-07-16 13:28:58 -0700541 CHECK(main_space_backup_.get() != nullptr);
542 // Add the space so its accounted for in the heap_begin and heap_end.
543 AddSpace(main_space_backup_.get());
Zuo Wangf37a88b2014-07-10 04:26:41 -0700544 }
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700545 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700546 CHECK(non_moving_space_ != nullptr);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700547 CHECK(!non_moving_space_->CanMoveObjects());
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700548 // Allocate the large object space.
Igor Murashkinaaebaa02015-01-26 10:55:53 -0800549 if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
Vladimir Marko11306592018-10-26 14:22:59 +0100550 large_object_space_ = space::FreeListSpace::Create("free list large object space", capacity_);
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700551 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
Igor Murashkinaaebaa02015-01-26 10:55:53 -0800552 } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700553 large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
554 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700555 } else {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700556 // Disable the large object space by making the cutoff excessively large.
557 large_object_threshold_ = std::numeric_limits<size_t>::max();
558 large_object_space_ = nullptr;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700559 }
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700560 if (large_object_space_ != nullptr) {
561 AddSpace(large_object_space_);
562 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700563 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
Mathieu Chartier590fee92013-09-13 13:46:47 -0700564 CHECK(!continuous_spaces_.empty());
565 // Relies on the spaces being sorted.
Ian Rogers13735952014-10-08 12:43:28 -0700566 uint8_t* heap_begin = continuous_spaces_.front()->Begin();
567 uint8_t* heap_end = continuous_spaces_.back()->Limit();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700568 size_t heap_capacity = heap_end - heap_begin;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700569 // Remove the main backup space since it slows down the GC to have unused extra spaces.
Mathieu Chartier0310da52014-12-01 13:40:48 -0800570 // TODO: Avoid needing to do this.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700571 if (main_space_backup_.get() != nullptr) {
572 RemoveSpace(main_space_backup_.get());
573 }
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800574 // Allocate the card table.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800575 // We currently don't support dynamically resizing the card table.
576 // Since we don't know where in the low_4gb the app image will be located, make the card table
577 // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
578 UNUSED(heap_capacity);
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000579 // Start at 4 KB, we can be sure there are no spaces mapped this low since the address range is
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800580 // reserved by the kernel.
581 static constexpr size_t kMinHeapAddress = 4 * KB;
582 card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
583 4 * GB - kMinHeapAddress));
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700584 CHECK(card_table_.get() != nullptr) << "Failed to create card table";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800585 if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
586 rb_table_.reset(new accounting::ReadBarrierTable());
587 DCHECK(rb_table_->IsAllCleared());
588 }
Jeff Haodcdc85b2015-12-04 14:06:18 -0800589 if (HasBootImageSpace()) {
Mathieu Chartier4858a932015-01-23 13:18:53 -0800590 // Don't add the image mod union table if we are running without an image, this can crash if
591 // we use the CardCache implementation.
Jeff Haodcdc85b2015-12-04 14:06:18 -0800592 for (space::ImageSpace* image_space : GetBootImageSpaces()) {
593 accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
594 "Image mod-union table", this, image_space);
595 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
596 AddModUnionTable(mod_union_table);
597 }
Mathieu Chartier4858a932015-01-23 13:18:53 -0800598 }
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700599 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800600 accounting::RememberedSet* non_moving_space_rem_set =
601 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
602 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
603 AddRememberedSet(non_moving_space_rem_set);
604 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700605 // TODO: Count objects in the image space here?
Orion Hodson88591fe2018-03-06 13:35:43 +0000606 num_bytes_allocated_.store(0, std::memory_order_relaxed);
Mathieu Chartierc1790162014-05-23 10:54:50 -0700607 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
608 kDefaultMarkStackSize));
609 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
610 allocation_stack_.reset(accounting::ObjectStack::Create(
611 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
612 live_stack_.reset(accounting::ObjectStack::Create(
613 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
Mathieu Chartier65db8802012-11-20 12:36:46 -0800614 // It's still too early to take a lock because there are no threads yet, but we can create locks
615 // now. We don't create it earlier to make it clear that you can't use locks during heap
616 // initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700617 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogersc604d732012-10-14 16:09:54 -0700618 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
619 *gc_complete_lock_));
Richard Uhlercaaa2b02017-02-01 09:54:17 +0000620
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700621 thread_flip_lock_ = new Mutex("GC thread flip lock");
622 thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
623 *thread_flip_lock_));
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800624 task_processor_.reset(new TaskProcessor());
Mathieu Chartier3cf22532015-07-09 15:15:09 -0700625 reference_processor_.reset(new ReferenceProcessor());
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800626 pending_task_lock_ = new Mutex("Pending task lock");
Hans Boehmc220f982018-10-12 16:15:45 -0700627 if (ignore_target_footprint_) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700628 SetIdealFootprint(std::numeric_limits<size_t>::max());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700629 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700630 }
Hans Boehmc220f982018-10-12 16:15:45 -0700631 CHECK_NE(target_footprint_.load(std::memory_order_relaxed), 0U);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800632 // Create our garbage collectors.
Mathieu Chartier50482232013-11-21 11:48:14 -0800633 for (size_t i = 0; i < 2; ++i) {
634 const bool concurrent = i != 0;
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800635 if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
636 (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
637 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
638 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
639 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
640 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800641 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800642 if (kMovingCollector) {
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800643 if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
644 MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
645 use_homogeneous_space_compaction_for_oom_) {
646 // TODO: Clean this up.
647 const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
648 semi_space_collector_ = new collector::SemiSpace(this, generational,
649 generational ? "generational" : "");
650 garbage_collectors_.push_back(semi_space_collector_);
651 }
652 if (MayUseCollector(kCollectorTypeCC)) {
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700653 concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700654 /*young_gen=*/false,
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700655 "",
656 measure_gc_performance);
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700657 if (kEnableGenerationalConcurrentCopyingCollection) {
658 young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
659 this,
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700660 /*young_gen=*/true,
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700661 "young",
662 measure_gc_performance);
663 }
664 active_concurrent_copying_collector_ = concurrent_copying_collector_;
Hiroshi Yamauchi4af14172016-10-25 11:55:10 -0700665 DCHECK(region_space_ != nullptr);
666 concurrent_copying_collector_->SetRegionSpace(region_space_);
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700667 if (kEnableGenerationalConcurrentCopyingCollection) {
668 young_concurrent_copying_collector_->SetRegionSpace(region_space_);
Lokesh Gidra1c34b712018-12-18 13:41:58 -0800669 // At this point, non-moving space should be created.
670 DCHECK(non_moving_space_ != nullptr);
671 concurrent_copying_collector_->CreateInterRegionRefBitmaps();
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700672 }
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800673 garbage_collectors_.push_back(concurrent_copying_collector_);
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700674 if (kEnableGenerationalConcurrentCopyingCollection) {
675 garbage_collectors_.push_back(young_concurrent_copying_collector_);
676 }
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800677 }
Mathieu Chartier0325e622012-09-05 14:22:51 -0700678 }
Jeff Haodcdc85b2015-12-04 14:06:18 -0800679 if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
Andreas Gampee1cb2982014-08-27 11:01:09 -0700680 (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700681 // Check that there's no gap between the image space and the non moving space so that the
Andreas Gampee1cb2982014-08-27 11:01:09 -0700682 // immune region won't break (eg. due to a large object allocated in the gap). This is only
683 // required when we're the zygote or using GSS.
Mathieu Chartiera06ba052016-01-06 13:51:52 -0800684 // Space with smallest Begin().
685 space::ImageSpace* first_space = nullptr;
686 for (space::ImageSpace* space : boot_image_spaces_) {
687 if (first_space == nullptr || space->Begin() < first_space->Begin()) {
688 first_space = space;
689 }
690 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100691 bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700692 if (!no_gap) {
David Srbecky5dedb802015-06-17 00:08:02 +0100693 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700694 MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true);
Mathieu Chartierc7853442015-03-27 14:35:38 -0700695 LOG(FATAL) << "There's a gap between the image space and the non-moving space";
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700696 }
697 }
Mathieu Chartier31000802015-06-14 14:14:37 -0700698 instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
699 if (gc_stress_mode_) {
700 backtrace_lock_ = new Mutex("GC complete lock");
701 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700702 if (is_running_on_memory_tool_ || gc_stress_mode_) {
Mathieu Chartier31000802015-06-14 14:14:37 -0700703 instrumentation->InstrumentQuickAllocEntryPoints();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700704 }
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800705 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800706 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700707 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700708}
709
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100710MemMap Heap::MapAnonymousPreferredAddress(const char* name,
711 uint8_t* request_begin,
712 size_t capacity,
713 std::string* out_error_str) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700714 while (true) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100715 MemMap map = MemMap::MapAnonymous(name,
716 request_begin,
717 capacity,
718 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100719 /*low_4gb=*/ true,
720 /*reuse=*/ false,
721 /*reservation=*/ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100722 out_error_str);
723 if (map.IsValid() || request_begin == nullptr) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700724 return map;
725 }
726 // Retry a second time with no specified request begin.
727 request_begin = nullptr;
728 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700729}
730
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800731bool Heap::MayUseCollector(CollectorType type) const {
732 return foreground_collector_type_ == type || background_collector_type_ == type;
733}
734
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100735space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700736 size_t initial_size,
737 size_t growth_limit,
738 size_t capacity,
739 const char* name,
740 bool can_move_objects) {
Zuo Wangf37a88b2014-07-10 04:26:41 -0700741 space::MallocSpace* malloc_space = nullptr;
742 if (kUseRosAlloc) {
743 // Create rosalloc space.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100744 malloc_space = space::RosAllocSpace::CreateFromMemMap(std::move(mem_map),
745 name,
746 kDefaultStartingSize,
747 initial_size,
748 growth_limit,
749 capacity,
750 low_memory_mode_,
751 can_move_objects);
Zuo Wangf37a88b2014-07-10 04:26:41 -0700752 } else {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100753 malloc_space = space::DlMallocSpace::CreateFromMemMap(std::move(mem_map),
754 name,
755 kDefaultStartingSize,
756 initial_size,
757 growth_limit,
758 capacity,
Zuo Wangf37a88b2014-07-10 04:26:41 -0700759 can_move_objects);
760 }
761 if (collector::SemiSpace::kUseRememberedSet) {
762 accounting::RememberedSet* rem_set =
763 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
764 CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
765 AddRememberedSet(rem_set);
766 }
767 CHECK(malloc_space != nullptr) << "Failed to create " << name;
768 malloc_space->SetFootprintLimit(malloc_space->Capacity());
769 return malloc_space;
770}
771
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100772void Heap::CreateMainMallocSpace(MemMap&& mem_map,
773 size_t initial_size,
774 size_t growth_limit,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700775 size_t capacity) {
776 // Is background compaction is enabled?
777 bool can_move_objects = IsMovingGc(background_collector_type_) !=
Zuo Wangf37a88b2014-07-10 04:26:41 -0700778 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700779 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
780 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
781 // from the main space to the zygote space. If background compaction is enabled, always pass in
782 // that we can move objets.
783 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
784 // After the zygote we want this to be false if we don't have background compaction enabled so
785 // that getting primitive array elements is faster.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700786 // We never have homogeneous compaction with GSS and don't need a space with movable objects.
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700787 can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700788 }
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700789 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
790 RemoveRememberedSet(main_space_);
791 }
Zuo Wangf37a88b2014-07-10 04:26:41 -0700792 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100793 main_space_ = CreateMallocSpaceFromMemMap(std::move(mem_map),
794 initial_size,
795 growth_limit,
796 capacity, name,
Zuo Wangf37a88b2014-07-10 04:26:41 -0700797 can_move_objects);
798 SetSpaceAsDefault(main_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700799 VLOG(heap) << "Created main space " << main_space_;
800}
801
Mathieu Chartier50482232013-11-21 11:48:14 -0800802void Heap::ChangeAllocator(AllocatorType allocator) {
Mathieu Chartier50482232013-11-21 11:48:14 -0800803 if (current_allocator_ != allocator) {
Mathieu Chartierd8891782014-03-02 13:28:37 -0800804 // These two allocators are only used internally and don't have any entrypoints.
805 CHECK_NE(allocator, kAllocatorTypeLOS);
806 CHECK_NE(allocator, kAllocatorTypeNonMoving);
Mathieu Chartier50482232013-11-21 11:48:14 -0800807 current_allocator_ = allocator;
Mathieu Chartierd8891782014-03-02 13:28:37 -0800808 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
Mathieu Chartier50482232013-11-21 11:48:14 -0800809 SetQuickAllocEntryPointsAllocator(current_allocator_);
810 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
811 }
812}
813
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700814void Heap::DisableMovingGc() {
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -0700815 CHECK(!kUseReadBarrier);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700816 if (IsMovingGc(foreground_collector_type_)) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700817 foreground_collector_type_ = kCollectorTypeCMS;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800818 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700819 if (IsMovingGc(background_collector_type_)) {
820 background_collector_type_ = foreground_collector_type_;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800821 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700822 TransitionCollector(foreground_collector_type_);
Mathieu Chartier4f55e222015-09-04 13:26:21 -0700823 Thread* const self = Thread::Current();
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700824 ScopedThreadStateChange tsc(self, kSuspended);
Mathieu Chartier4f55e222015-09-04 13:26:21 -0700825 ScopedSuspendAll ssa(__FUNCTION__);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700826 // Something may have caused the transition to fail.
Mathieu Chartiere4927f62014-08-23 13:56:03 -0700827 if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700828 CHECK(main_space_ != nullptr);
829 // The allocation stack may have non movable objects in it. We need to flush it since the GC
830 // can't only handle marking allocation stack objects of one non moving space and one main
831 // space.
832 {
833 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
834 FlushAllocStack();
835 }
836 main_space_->DisableMovingObjects();
837 non_moving_space_ = main_space_;
838 CHECK(!non_moving_space_->CanMoveObjects());
839 }
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800840}
841
Mathieu Chartier590fee92013-09-13 13:46:47 -0700842bool Heap::IsCompilingBoot() const {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800843 if (!Runtime::Current()->IsAotCompiler()) {
Alex Light64ad14d2014-08-19 14:23:13 -0700844 return false;
845 }
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800846 ScopedObjectAccess soa(Thread::Current());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700847 for (const auto& space : continuous_spaces_) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800848 if (space->IsImageSpace() || space->IsZygoteSpace()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700849 return false;
850 }
851 }
852 return true;
853}
854
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800855void Heap::IncrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700856 // Need to do this holding the lock to prevent races where the GC is about to run / running when
857 // we attempt to disable it.
Mathieu Chartiercaa82d62014-02-02 16:51:17 -0800858 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700859 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800860 ++disable_moving_gc_count_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700861 if (IsMovingGc(collector_type_running_)) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -0700862 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800863 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700864}
865
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800866void Heap::DecrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700867 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartierb735bd92015-06-24 17:04:17 -0700868 CHECK_GT(disable_moving_gc_count_, 0U);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800869 --disable_moving_gc_count_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700870}
871
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700872void Heap::IncrementDisableThreadFlip(Thread* self) {
873 // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
874 CHECK(kUseReadBarrier);
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -0800875 bool is_nested = self->GetDisableThreadFlipCount() > 0;
876 self->IncrementDisableThreadFlipCount();
877 if (is_nested) {
878 // If this is a nested JNI critical section enter, we don't need to wait or increment the global
879 // counter. The global counter is incremented only once for a thread for the outermost enter.
880 return;
881 }
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700882 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
883 MutexLock mu(self, *thread_flip_lock_);
884 bool has_waited = false;
885 uint64_t wait_start = NanoTime();
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700886 if (thread_flip_running_) {
Andreas Gampe9b827ab2017-12-07 19:32:48 -0800887 ScopedTrace trace("IncrementDisableThreadFlip");
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700888 while (thread_flip_running_) {
889 has_waited = true;
890 thread_flip_cond_->Wait(self);
891 }
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700892 }
893 ++disable_thread_flip_count_;
894 if (has_waited) {
895 uint64_t wait_time = NanoTime() - wait_start;
896 total_wait_time_ += wait_time;
897 if (wait_time > long_pause_log_threshold_) {
898 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
899 }
900 }
901}
902
903void Heap::DecrementDisableThreadFlip(Thread* self) {
904 // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
905 // the GC waiting before doing a thread flip.
906 CHECK(kUseReadBarrier);
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -0800907 self->DecrementDisableThreadFlipCount();
908 bool is_outermost = self->GetDisableThreadFlipCount() == 0;
909 if (!is_outermost) {
910 // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
911 // The global counter is decremented only once for a thread for the outermost exit.
912 return;
913 }
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700914 MutexLock mu(self, *thread_flip_lock_);
915 CHECK_GT(disable_thread_flip_count_, 0U);
916 --disable_thread_flip_count_;
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -0800917 if (disable_thread_flip_count_ == 0) {
918 // Potentially notify the GC thread blocking to begin a thread flip.
919 thread_flip_cond_->Broadcast(self);
920 }
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700921}
922
923void Heap::ThreadFlipBegin(Thread* self) {
924 // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
925 // > 0, block. Otherwise, go ahead.
926 CHECK(kUseReadBarrier);
927 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
928 MutexLock mu(self, *thread_flip_lock_);
929 bool has_waited = false;
930 uint64_t wait_start = NanoTime();
931 CHECK(!thread_flip_running_);
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -0800932 // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
933 // GC. This like a writer preference of a reader-writer lock.
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700934 thread_flip_running_ = true;
935 while (disable_thread_flip_count_ > 0) {
936 has_waited = true;
937 thread_flip_cond_->Wait(self);
938 }
939 if (has_waited) {
940 uint64_t wait_time = NanoTime() - wait_start;
941 total_wait_time_ += wait_time;
942 if (wait_time > long_pause_log_threshold_) {
943 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
944 }
945 }
946}
947
948void Heap::ThreadFlipEnd(Thread* self) {
949 // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
950 // waiting before doing a JNI critical.
951 CHECK(kUseReadBarrier);
952 MutexLock mu(self, *thread_flip_lock_);
953 CHECK(thread_flip_running_);
954 thread_flip_running_ = false;
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -0800955 // Potentially notify mutator threads blocking to enter a JNI critical section.
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700956 thread_flip_cond_->Broadcast(self);
957}
958
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700959void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
960 if (old_process_state != new_process_state) {
961 const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
Mathieu Chartier91e30632014-03-25 15:58:50 -0700962 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
963 // Start at index 1 to avoid "is always false" warning.
964 // Have iteration 1 always transition the collector.
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700965 TransitionCollector((((i & 1) == 1) == jank_perceptible)
966 ? foreground_collector_type_
967 : background_collector_type_);
Mathieu Chartier91e30632014-03-25 15:58:50 -0700968 usleep(kCollectorTransitionStressWait);
969 }
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700970 if (jank_perceptible) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800971 // Transition back to foreground right away to prevent jank.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700972 RequestCollectorTransition(foreground_collector_type_, 0);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800973 } else {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800974 // Don't delay for debug builds since we may want to stress test the GC.
Zuo Wangf37a88b2014-07-10 04:26:41 -0700975 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
976 // special handling which does a homogenous space compaction once but then doesn't transition
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -0700977 // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
978 // transition the collector.
Zuo Wangf37a88b2014-07-10 04:26:41 -0700979 RequestCollectorTransition(background_collector_type_,
Andreas Gampeed56b5e2017-10-19 12:58:19 -0700980 kStressCollectorTransition
981 ? 0
982 : kCollectorTransitionWait);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800983 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800984 }
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800985}
986
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700987void Heap::CreateThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700988 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
989 if (num_threads != 0) {
Mathieu Chartierbcd5e9d2013-11-13 14:33:28 -0800990 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700991 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700992}
993
Mathieu Chartier590fee92013-09-13 13:46:47 -0700994void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
Mathieu Chartier00b59152014-07-25 10:13:51 -0700995 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
996 space::ContinuousSpace* space2 = non_moving_space_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800997 // TODO: Generalize this to n bitmaps?
Mathieu Chartier00b59152014-07-25 10:13:51 -0700998 CHECK(space1 != nullptr);
999 CHECK(space2 != nullptr);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001000 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001001 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
1002 stack);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001003}
1004
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001005void Heap::DeleteThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001006 thread_pool_.reset(nullptr);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001007}
1008
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001009void Heap::AddSpace(space::Space* space) {
Zuo Wangf37a88b2014-07-10 04:26:41 -07001010 CHECK(space != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001011 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1012 if (space->IsContinuousSpace()) {
1013 DCHECK(!space->IsDiscontinuousSpace());
1014 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1015 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001016 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1017 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartierecc82302017-02-16 10:20:12 -08001018 // The region space bitmap is not added since VisitObjects visits the region space objects with
1019 // special handling.
1020 if (live_bitmap != nullptr && !space->IsRegionSpace()) {
Mathieu Chartier2796a162014-07-25 11:50:47 -07001021 CHECK(mark_bitmap != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001022 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
1023 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001024 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001025 continuous_spaces_.push_back(continuous_space);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001026 // Ensure that spaces remain sorted in increasing order of start address.
1027 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
1028 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
1029 return a->Begin() < b->Begin();
1030 });
Mathieu Chartier590fee92013-09-13 13:46:47 -07001031 } else {
Mathieu Chartier2796a162014-07-25 11:50:47 -07001032 CHECK(space->IsDiscontinuousSpace());
Mathieu Chartier590fee92013-09-13 13:46:47 -07001033 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001034 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1035 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartier590fee92013-09-13 13:46:47 -07001036 discontinuous_spaces_.push_back(discontinuous_space);
1037 }
1038 if (space->IsAllocSpace()) {
1039 alloc_spaces_.push_back(space->AsAllocSpace());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001040 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001041}
1042
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001043void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
1044 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1045 if (continuous_space->IsDlMallocSpace()) {
1046 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
1047 } else if (continuous_space->IsRosAllocSpace()) {
1048 rosalloc_space_ = continuous_space->AsRosAllocSpace();
1049 }
1050}
1051
1052void Heap::RemoveSpace(space::Space* space) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001053 DCHECK(space != nullptr);
1054 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1055 if (space->IsContinuousSpace()) {
1056 DCHECK(!space->IsDiscontinuousSpace());
1057 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1058 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001059 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1060 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartierecc82302017-02-16 10:20:12 -08001061 if (live_bitmap != nullptr && !space->IsRegionSpace()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001062 DCHECK(mark_bitmap != nullptr);
1063 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
1064 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
1065 }
1066 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
1067 DCHECK(it != continuous_spaces_.end());
1068 continuous_spaces_.erase(it);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001069 } else {
1070 DCHECK(space->IsDiscontinuousSpace());
1071 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001072 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1073 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001074 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
1075 discontinuous_space);
1076 DCHECK(it != discontinuous_spaces_.end());
1077 discontinuous_spaces_.erase(it);
1078 }
1079 if (space->IsAllocSpace()) {
1080 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
1081 DCHECK(it != alloc_spaces_.end());
1082 alloc_spaces_.erase(it);
1083 }
1084}
1085
Albert Mingkun Yang6e0d3252018-12-10 15:22:45 +00001086double Heap::CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
1087 uint64_t current_process_cpu_time) const {
Albert Mingkun Yang2d7329b2018-11-30 19:58:18 +00001088 uint64_t bytes_allocated = GetBytesAllocated();
Albert Mingkun Yang6e0d3252018-12-10 15:22:45 +00001089 double weight = current_process_cpu_time - gc_last_process_cpu_time_ns;
1090 return weight * bytes_allocated;
1091}
1092
1093void Heap::CalculatePreGcWeightedAllocatedBytes() {
1094 uint64_t current_process_cpu_time = ProcessCpuNanoTime();
1095 pre_gc_weighted_allocated_bytes_ +=
1096 CalculateGcWeightedAllocatedBytes(pre_gc_last_process_cpu_time_ns_, current_process_cpu_time);
1097 pre_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
1098}
1099
1100void Heap::CalculatePostGcWeightedAllocatedBytes() {
1101 uint64_t current_process_cpu_time = ProcessCpuNanoTime();
1102 post_gc_weighted_allocated_bytes_ +=
1103 CalculateGcWeightedAllocatedBytes(post_gc_last_process_cpu_time_ns_, current_process_cpu_time);
1104 post_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
Albert Mingkun Yang2d7329b2018-11-30 19:58:18 +00001105}
1106
Albert Mingkun Yangd6e178e2018-11-19 12:58:30 +00001107uint64_t Heap::GetTotalGcCpuTime() {
1108 uint64_t sum = 0;
Albert Mingkun Yang1c42e752018-11-19 16:10:24 +00001109 for (auto* collector : garbage_collectors_) {
Albert Mingkun Yangd6e178e2018-11-19 12:58:30 +00001110 sum += collector->GetTotalCpuTime();
1111 }
1112 return sum;
1113}
1114
Elliott Hughes8b788fe2013-04-17 15:57:01 -07001115void Heap::DumpGcPerformanceInfo(std::ostream& os) {
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001116 // Dump cumulative timings.
Elliott Hughes8b788fe2013-04-17 15:57:01 -07001117 os << "Dumping cumulative Gc timings\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001118 uint64_t total_duration = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001119 // Dump cumulative loggers for each GC type.
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001120 uint64_t total_paused_time = 0;
Albert Mingkun Yang1c42e752018-11-19 16:10:24 +00001121 for (auto* collector : garbage_collectors_) {
Mathieu Chartier104fa0c2014-08-07 14:26:27 -07001122 total_duration += collector->GetCumulativeTimings().GetTotalNs();
1123 total_paused_time += collector->GetTotalPausedTimeNs();
1124 collector->DumpPerformanceInfo(os);
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001125 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001126 if (total_duration != 0) {
Brian Carlstrom2d888622013-07-18 17:02:00 -07001127 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
Elliott Hughes8b788fe2013-04-17 15:57:01 -07001128 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1129 os << "Mean GC size throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -07001130 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -07001131 os << "Mean GC object throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -07001132 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001133 }
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001134 uint64_t total_objects_allocated = GetObjectsAllocatedEver();
Mathieu Chartierc30a7252014-08-12 10:13:48 -07001135 os << "Total number of allocations " << total_objects_allocated << "\n";
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001136 os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1137 os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
Mathieu Chartierc30a7252014-08-12 10:13:48 -07001138 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001139 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1140 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
Mathieu Chartierc30a7252014-08-12 10:13:48 -07001141 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1142 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001143 if (HasZygoteSpace()) {
1144 os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1145 }
Elliott Hughes8b788fe2013-04-17 15:57:01 -07001146 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001147 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1148 os << "Total GC count: " << GetGcCount() << "\n";
1149 os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1150 os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1151 os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1152
1153 {
1154 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1155 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1156 os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1157 gc_count_rate_histogram_.DumpBins(os);
1158 os << "\n";
1159 }
1160 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1161 os << "Histogram of blocking GC count per "
1162 << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1163 blocking_gc_count_rate_histogram_.DumpBins(os);
1164 os << "\n";
1165 }
1166 }
1167
Hiroshi Yamauchib62f2e62016-03-23 15:51:24 -07001168 if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1169 rosalloc_space_->DumpStats(os);
1170 }
1171
Hans Boehmc220f982018-10-12 16:15:45 -07001172 os << "Native bytes total: " << GetNativeBytes()
1173 << " registered: " << native_bytes_registered_.load(std::memory_order_relaxed) << "\n";
1174
1175 os << "Total native bytes at last GC: "
1176 << old_native_bytes_allocated_.load(std::memory_order_relaxed) << "\n";
Mathieu Chartier5d2a3f72016-05-11 11:35:39 -07001177
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001178 BaseMutex::DumpAll(os);
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001179}
1180
Hiroshi Yamauchi37670172015-06-10 17:20:54 -07001181void Heap::ResetGcPerformanceInfo() {
Albert Mingkun Yang1c42e752018-11-19 16:10:24 +00001182 for (auto* collector : garbage_collectors_) {
Hiroshi Yamauchi37670172015-06-10 17:20:54 -07001183 collector->ResetMeasurements();
1184 }
Albert Mingkun Yang2d7329b2018-11-30 19:58:18 +00001185
1186 process_cpu_start_time_ns_ = ProcessCpuNanoTime();
Albert Mingkun Yang6e0d3252018-12-10 15:22:45 +00001187
1188 pre_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
1189 pre_gc_weighted_allocated_bytes_ = 0u;
1190
1191 post_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
1192 post_gc_weighted_allocated_bytes_ = 0u;
Albert Mingkun Yang2d7329b2018-11-30 19:58:18 +00001193
Hiroshi Yamauchi37670172015-06-10 17:20:54 -07001194 total_bytes_freed_ever_ = 0;
1195 total_objects_freed_ever_ = 0;
1196 total_wait_time_ = 0;
1197 blocking_gc_count_ = 0;
1198 blocking_gc_time_ = 0;
1199 gc_count_last_window_ = 0;
1200 blocking_gc_count_last_window_ = 0;
1201 last_update_time_gc_count_rate_histograms_ = // Round down by the window duration.
1202 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1203 {
1204 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1205 gc_count_rate_histogram_.Reset();
1206 blocking_gc_count_rate_histogram_.Reset();
1207 }
1208}
1209
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001210uint64_t Heap::GetGcCount() const {
1211 uint64_t gc_count = 0U;
Albert Mingkun Yang1c42e752018-11-19 16:10:24 +00001212 for (auto* collector : garbage_collectors_) {
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001213 gc_count += collector->GetCumulativeTimings().GetIterations();
1214 }
1215 return gc_count;
1216}
1217
1218uint64_t Heap::GetGcTime() const {
1219 uint64_t gc_time = 0U;
Albert Mingkun Yang1c42e752018-11-19 16:10:24 +00001220 for (auto* collector : garbage_collectors_) {
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001221 gc_time += collector->GetCumulativeTimings().GetTotalNs();
1222 }
1223 return gc_time;
1224}
1225
1226uint64_t Heap::GetBlockingGcCount() const {
1227 return blocking_gc_count_;
1228}
1229
1230uint64_t Heap::GetBlockingGcTime() const {
1231 return blocking_gc_time_;
1232}
1233
1234void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1235 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1236 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1237 gc_count_rate_histogram_.DumpBins(os);
1238 }
1239}
1240
1241void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1242 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1243 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1244 blocking_gc_count_rate_histogram_.DumpBins(os);
1245 }
1246}
1247
Andreas Gampe27fa96c2016-10-07 15:05:24 -07001248ALWAYS_INLINE
1249static inline AllocationListener* GetAndOverwriteAllocationListener(
1250 Atomic<AllocationListener*>* storage, AllocationListener* new_value) {
Orion Hodson88591fe2018-03-06 13:35:43 +00001251 return storage->exchange(new_value);
Andreas Gampe27fa96c2016-10-07 15:05:24 -07001252}
1253
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001254Heap::~Heap() {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001255 VLOG(heap) << "Starting ~Heap()";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001256 STLDeleteElements(&garbage_collectors_);
1257 // If we don't reset then the mark stack complains in its destructor.
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001258 allocation_stack_->Reset();
Man Cao8c2ff642015-05-27 17:25:30 -07001259 allocation_records_.reset();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001260 live_stack_->Reset();
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001261 STLDeleteValues(&mod_union_tables_);
Mathieu Chartier0767c9a2014-03-26 12:53:19 -07001262 STLDeleteValues(&remembered_sets_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001263 STLDeleteElements(&continuous_spaces_);
1264 STLDeleteElements(&discontinuous_spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001265 delete gc_complete_lock_;
Andreas Gampe6be4f2a2015-11-10 13:34:17 -08001266 delete thread_flip_lock_;
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001267 delete pending_task_lock_;
Mathieu Chartier31000802015-06-14 14:14:37 -07001268 delete backtrace_lock_;
Hans Boehmfb8b4e22018-09-05 16:45:42 -07001269 uint64_t unique_count = unique_backtrace_count_.load();
1270 uint64_t seen_count = seen_backtrace_count_.load();
Orion Hodson88591fe2018-03-06 13:35:43 +00001271 if (unique_count != 0 || seen_count != 0) {
1272 LOG(INFO) << "gc stress unique=" << unique_count << " total=" << (unique_count + seen_count);
Mathieu Chartier31000802015-06-14 14:14:37 -07001273 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001274 VLOG(heap) << "Finished ~Heap()";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001275}
1276
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001277
1278space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001279 for (const auto& space : continuous_spaces_) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001280 if (space->Contains(addr)) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001281 return space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001282 }
1283 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001284 return nullptr;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001285}
1286
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001287space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1288 bool fail_ok) const {
1289 space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
1290 if (space != nullptr) {
1291 return space;
1292 }
1293 if (!fail_ok) {
1294 LOG(FATAL) << "object " << obj << " not inside any spaces!";
1295 }
1296 return nullptr;
1297}
1298
1299space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
Ian Rogers1d54e732013-05-02 21:10:01 -07001300 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001301 for (const auto& space : discontinuous_spaces_) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001302 if (space->Contains(obj.Ptr())) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001303 return space;
Ian Rogers1d54e732013-05-02 21:10:01 -07001304 }
1305 }
1306 if (!fail_ok) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001307 LOG(FATAL) << "object " << obj << " not inside any spaces!";
Ian Rogers1d54e732013-05-02 21:10:01 -07001308 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001309 return nullptr;
Ian Rogers1d54e732013-05-02 21:10:01 -07001310}
1311
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001312space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
Ian Rogers1d54e732013-05-02 21:10:01 -07001313 space::Space* result = FindContinuousSpaceFromObject(obj, true);
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001314 if (result != nullptr) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001315 return result;
1316 }
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001317 return FindDiscontinuousSpaceFromObject(obj, fail_ok);
Ian Rogers1d54e732013-05-02 21:10:01 -07001318}
1319
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001320space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
1321 for (const auto& space : continuous_spaces_) {
1322 if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1323 return space;
1324 }
1325 }
1326 for (const auto& space : discontinuous_spaces_) {
1327 if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1328 return space;
1329 }
1330 }
1331 return nullptr;
1332}
1333
Roland Levillain5fcf1ea2018-10-30 11:58:08 +00001334std::string Heap::DumpSpaceNameFromAddress(const void* addr) const {
1335 space::Space* space = FindSpaceFromAddress(addr);
1336 return (space != nullptr) ? space->GetName() : "no space";
1337}
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001338
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001339void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
Mathieu Chartiere8f3f032016-04-04 16:49:44 -07001340 // If we're in a stack overflow, do not create a new exception. It would require running the
1341 // constructor, which will of course still be in a stack overflow.
1342 if (self->IsHandlingStackOverflow()) {
Roland Levillain7b0e8442018-04-11 18:27:47 +01001343 self->SetException(
1344 Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow());
Mathieu Chartiere8f3f032016-04-04 16:49:44 -07001345 return;
1346 }
1347
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001348 std::ostringstream oss;
Ian Rogersef7d42f2014-01-06 12:55:46 -08001349 size_t total_bytes_free = GetFreeMemory();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001350 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
Mathieu Chartiera9033d72016-12-01 17:41:17 -08001351 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
Hans Boehmc220f982018-10-12 16:15:45 -07001352 << " target footprint " << target_footprint_.load(std::memory_order_relaxed)
1353 << ", growth limit "
Mathieu Chartiera9033d72016-12-01 17:41:17 -08001354 << growth_limit_;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001355 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
Zuo Wangf37a88b2014-07-10 04:26:41 -07001356 if (total_bytes_free >= byte_count) {
Mathieu Chartierb363f662014-07-16 13:28:58 -07001357 space::AllocSpace* space = nullptr;
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001358 if (allocator_type == kAllocatorTypeNonMoving) {
1359 space = non_moving_space_;
1360 } else if (allocator_type == kAllocatorTypeRosAlloc ||
1361 allocator_type == kAllocatorTypeDlMalloc) {
1362 space = main_space_;
Mathieu Chartierb363f662014-07-16 13:28:58 -07001363 } else if (allocator_type == kAllocatorTypeBumpPointer ||
1364 allocator_type == kAllocatorTypeTLAB) {
1365 space = bump_pointer_space_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001366 } else if (allocator_type == kAllocatorTypeRegion ||
1367 allocator_type == kAllocatorTypeRegionTLAB) {
1368 space = region_space_;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001369 }
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001370 if (space != nullptr) {
1371 space->LogFragmentationAllocFailure(oss, byte_count);
1372 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001373 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001374 self->ThrowOutOfMemoryError(oss.str().c_str());
1375}
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001376
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001377void Heap::DoPendingCollectorTransition() {
1378 CollectorType desired_collector_type = desired_collector_type_;
Mathieu Chartierb2728552014-09-08 20:08:41 +00001379 // Launch homogeneous space compaction if it is desired.
1380 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1381 if (!CareAboutPauseTimes()) {
1382 PerformHomogeneousSpaceCompact();
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001383 } else {
1384 VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
Mathieu Chartierb2728552014-09-08 20:08:41 +00001385 }
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -07001386 } else if (desired_collector_type == kCollectorTypeCCBackground) {
1387 DCHECK(kUseReadBarrier);
1388 if (!CareAboutPauseTimes()) {
1389 // Invoke CC full compaction.
1390 CollectGarbageInternal(collector::kGcTypeFull,
1391 kGcCauseCollectorTransition,
Andreas Gampe98ea9d92018-10-19 14:06:15 -07001392 /*clear_soft_references=*/false);
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -07001393 } else {
1394 VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
1395 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001396 } else {
1397 TransitionCollector(desired_collector_type);
Mathieu Chartierb2728552014-09-08 20:08:41 +00001398 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001399}
1400
1401void Heap::Trim(Thread* self) {
Mathieu Chartier8d447252015-10-26 10:21:14 -07001402 Runtime* const runtime = Runtime::Current();
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07001403 if (!CareAboutPauseTimes()) {
1404 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1405 // about pauses.
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001406 ScopedTrace trace("Deflating monitors");
Hiroshi Yamauchi3b1d1b72016-10-12 11:53:57 -07001407 // Avoid race conditions on the lock word for CC.
1408 ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim);
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001409 ScopedSuspendAll ssa(__FUNCTION__);
1410 uint64_t start_time = NanoTime();
1411 size_t count = runtime->GetMonitorList()->DeflateMonitors();
1412 VLOG(heap) << "Deflating " << count << " monitors took "
1413 << PrettyDuration(NanoTime() - start_time);
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07001414 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001415 TrimIndirectReferenceTables(self);
1416 TrimSpaces(self);
Mathieu Chartier8d447252015-10-26 10:21:14 -07001417 // Trim arenas that may have been used by JIT or verifier.
Mathieu Chartier8d447252015-10-26 10:21:14 -07001418 runtime->GetArenaPool()->TrimMaps();
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08001419}
1420
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -08001421class TrimIndirectReferenceTableClosure : public Closure {
1422 public:
1423 explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1424 }
Roland Levillainf73caca2018-08-24 17:19:07 +01001425 void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
Ian Rogers55256cb2017-12-21 17:07:11 -08001426 thread->GetJniEnv()->TrimLocals();
Lei Lidd9943d2015-02-02 14:24:44 +08001427 // If thread is a running mutator, then act on behalf of the trim thread.
1428 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -07001429 barrier_->Pass(Thread::Current());
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -08001430 }
1431
1432 private:
1433 Barrier* const barrier_;
1434};
1435
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001436void Heap::TrimIndirectReferenceTables(Thread* self) {
1437 ScopedObjectAccess soa(self);
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001438 ScopedTrace trace(__PRETTY_FUNCTION__);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001439 JavaVMExt* vm = soa.Vm();
1440 // Trim globals indirect reference table.
1441 vm->TrimGlobals();
1442 // Trim locals indirect reference tables.
1443 Barrier barrier(0);
1444 TrimIndirectReferenceTableClosure closure(&barrier);
1445 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1446 size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
Lei Lidd9943d2015-02-02 14:24:44 +08001447 if (barrier_count != 0) {
1448 barrier.Increment(self, barrier_count);
1449 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001450}
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -08001451
Mathieu Chartieraa516822015-10-02 15:53:37 -07001452void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
Mathieu Chartierb93d5b12017-05-19 13:05:06 -07001453 // Need to do this before acquiring the locks since we don't want to get suspended while
1454 // holding any locks.
1455 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartieraa516822015-10-02 15:53:37 -07001456 MutexLock mu(self, *gc_complete_lock_);
1457 // Ensure there is only one GC at a time.
1458 WaitForGcToCompleteLocked(cause, self);
1459 collector_type_running_ = collector_type;
Mathieu Chartier40112dd2017-06-26 17:49:09 -07001460 last_gc_cause_ = cause;
Mathieu Chartier183009a2017-02-16 21:19:28 -08001461 thread_running_gc_ = self;
Mathieu Chartieraa516822015-10-02 15:53:37 -07001462}
1463
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001464void Heap::TrimSpaces(Thread* self) {
Mathieu Chartierb93d5b12017-05-19 13:05:06 -07001465 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1466 // trimming.
1467 StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001468 ScopedTrace trace(__PRETTY_FUNCTION__);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001469 const uint64_t start_ns = NanoTime();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001470 // Trim the managed spaces.
1471 uint64_t total_alloc_space_allocated = 0;
1472 uint64_t total_alloc_space_size = 0;
1473 uint64_t managed_reclaimed = 0;
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001474 {
1475 ScopedObjectAccess soa(self);
1476 for (const auto& space : continuous_spaces_) {
1477 if (space->IsMallocSpace()) {
1478 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1479 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1480 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1481 // for a long period of time.
1482 managed_reclaimed += malloc_space->Trim();
1483 }
1484 total_alloc_space_size += malloc_space->Size();
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001485 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001486 }
1487 }
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001488 total_alloc_space_allocated = GetBytesAllocated();
1489 if (large_object_space_ != nullptr) {
1490 total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1491 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07001492 if (bump_pointer_space_ != nullptr) {
1493 total_alloc_space_allocated -= bump_pointer_space_->Size();
1494 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001495 if (region_space_ != nullptr) {
1496 total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1497 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001498 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1499 static_cast<float>(total_alloc_space_size);
1500 uint64_t gc_heap_end_ns = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001501 // We never move things in the native heap, so we can finish the GC at this point.
1502 FinishGC(self, collector::kGcTypeNone);
Ian Rogers872dd822014-10-30 11:19:14 -07001503
Mathieu Chartier590fee92013-09-13 13:46:47 -07001504 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
Dimitry Ivanove6465bc2015-12-14 18:55:02 -08001505 << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1506 << static_cast<int>(100 * managed_utilization) << "%.";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001507}
1508
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001509bool Heap::IsValidObjectAddress(const void* addr) const {
1510 if (addr == nullptr) {
Elliott Hughes88c5c352012-03-15 18:49:48 -07001511 return true;
1512 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001513 return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001514}
1515
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001516bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
1517 return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
Mathieu Chartierd68ac702014-02-11 14:50:51 -08001518}
1519
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001520bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
1521 bool search_allocation_stack,
1522 bool search_live_stack,
1523 bool sorted) {
1524 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
Mathieu Chartier15d34022014-02-26 17:16:38 -08001525 return false;
1526 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001527 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001528 mirror::Class* klass = obj->GetClass<kVerifyNone>();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001529 if (obj == klass) {
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -08001530 // This case happens for java.lang.Class.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001531 return true;
1532 }
1533 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001534 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001535 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1536 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001537 return temp_space_->Contains(obj.Ptr());
Ian Rogers1d54e732013-05-02 21:10:01 -07001538 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001539 if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001540 return true;
1541 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001542 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001543 space::DiscontinuousSpace* d_space = nullptr;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001544 if (c_space != nullptr) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001545 if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001546 return true;
1547 }
1548 } else {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001549 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001550 if (d_space != nullptr) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001551 if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001552 return true;
1553 }
1554 }
1555 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001556 // This is covering the allocation/live stack swapping that is done without mutators suspended.
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001557 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1558 if (i > 0) {
1559 NanoSleep(MsToNs(10));
Ian Rogers1d54e732013-05-02 21:10:01 -07001560 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001561 if (search_allocation_stack) {
1562 if (sorted) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001563 if (allocation_stack_->ContainsSorted(obj.Ptr())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001564 return true;
1565 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001566 } else if (allocation_stack_->Contains(obj.Ptr())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001567 return true;
1568 }
1569 }
1570
1571 if (search_live_stack) {
1572 if (sorted) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001573 if (live_stack_->ContainsSorted(obj.Ptr())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001574 return true;
1575 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001576 } else if (live_stack_->Contains(obj.Ptr())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001577 return true;
1578 }
1579 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001580 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001581 // We need to check the bitmaps again since there is a race where we mark something as live and
1582 // then clear the stack containing it.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001583 if (c_space != nullptr) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001584 if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001585 return true;
1586 }
1587 } else {
1588 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001589 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001590 return true;
1591 }
1592 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001593 return false;
Elliott Hughes6a5bd492011-10-28 14:33:57 -07001594}
1595
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07001596std::string Heap::DumpSpaces() const {
1597 std::ostringstream oss;
1598 DumpSpaces(oss);
1599 return oss.str();
1600}
1601
1602void Heap::DumpSpaces(std::ostream& stream) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001603 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001604 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1605 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001606 stream << space << " " << *space << "\n";
1607 if (live_bitmap != nullptr) {
1608 stream << live_bitmap << " " << *live_bitmap << "\n";
1609 }
1610 if (mark_bitmap != nullptr) {
1611 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1612 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001613 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07001614 for (const auto& space : discontinuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001615 stream << space << " " << *space << "\n";
Mathieu Chartier128c52c2012-10-16 14:12:41 -07001616 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001617}
1618
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001619void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
Stephen Hines22c6a812014-07-16 11:03:43 -07001620 if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1621 return;
1622 }
1623
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001624 // Ignore early dawn of the universe verifications.
Orion Hodson88591fe2018-03-06 13:35:43 +00001625 if (UNLIKELY(num_bytes_allocated_.load(std::memory_order_relaxed) < 10 * KB)) {
Ian Rogers62d6c772013-02-27 08:32:07 -08001626 return;
1627 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001628 CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001629 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
Mathieu Chartier4e305412014-02-19 10:54:44 -08001630 CHECK(c != nullptr) << "Null class in object " << obj;
Roland Levillain14d90572015-07-16 10:52:26 +01001631 CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001632 CHECK(VerifyClassClass(c));
Mathieu Chartier0325e622012-09-05 14:22:51 -07001633
Mathieu Chartier4e305412014-02-19 10:54:44 -08001634 if (verify_object_mode_ > kVerifyObjectModeFast) {
1635 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07001636 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
Mathieu Chartierdcf8d722012-08-02 14:55:54 -07001637 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001638}
1639
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001640void Heap::VerifyHeap() {
Ian Rogers50b35e22012-10-04 10:09:15 -07001641 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Andreas Gampe0c183382017-07-13 22:26:24 -07001642 auto visitor = [&](mirror::Object* obj) {
1643 VerifyObjectBody(obj);
1644 };
1645 // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already
1646 // NO_THREAD_SAFETY_ANALYSIS.
1647 auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS {
1648 GetLiveBitmap()->Visit(visitor);
1649 };
1650 no_thread_safety_analysis();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001651}
1652
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001653void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
Mathieu Chartier601276a2014-03-20 15:12:30 -07001654 // Use signed comparison since freed bytes can be negative when background compaction foreground
1655 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1656 // free list backed space typically increasing memory footprint due to padding and binning.
Hans Boehmfb8b4e22018-09-05 16:45:42 -07001657 RACING_DCHECK_LE(freed_bytes,
1658 static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed)));
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001659 // Note: This relies on 2s complement for handling negative freed_bytes.
Hans Boehmfb8b4e22018-09-05 16:45:42 -07001660 num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes), std::memory_order_relaxed);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001661 if (Runtime::Current()->HasStatsEnabled()) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001662 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001663 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -07001664 thread_stats->freed_bytes += freed_bytes;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001665 // TODO: Do this concurrently.
1666 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1667 global_stats->freed_objects += freed_objects;
1668 global_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001669 }
Carl Shapiro58551df2011-07-24 03:09:51 -07001670}
1671
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001672void Heap::RecordFreeRevoke() {
1673 // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
Roland Levillainef012222017-06-21 16:28:06 +01001674 // ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001675 // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1676 // all the way to zero exactly as the remainder will be subtracted at the next GC.
Hans Boehmfb8b4e22018-09-05 16:45:42 -07001677 size_t bytes_freed = num_bytes_freed_revoke_.load(std::memory_order_relaxed);
1678 CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed, std::memory_order_relaxed),
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001679 bytes_freed) << "num_bytes_freed_revoke_ underflow";
Hans Boehmfb8b4e22018-09-05 16:45:42 -07001680 CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed, std::memory_order_relaxed),
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001681 bytes_freed) << "num_bytes_allocated_ underflow";
1682 GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1683}
1684
Zuo Wangf37a88b2014-07-10 04:26:41 -07001685space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001686 if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1687 return rosalloc_space_;
1688 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07001689 for (const auto& space : continuous_spaces_) {
1690 if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1691 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1692 return space->AsContinuousSpace()->AsRosAllocSpace();
1693 }
1694 }
1695 }
1696 return nullptr;
1697}
1698
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001699static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001700 instrumentation::Instrumentation* const instrumentation =
1701 Runtime::Current()->GetInstrumentation();
1702 return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1703}
1704
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001705mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1706 AllocatorType allocator,
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001707 bool instrumented,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001708 size_t alloc_size,
1709 size_t* bytes_allocated,
Ian Rogers6fac4472014-02-25 17:01:10 -08001710 size_t* usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001711 size_t* bytes_tl_bulk_allocated,
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001712 ObjPtr<mirror::Class>* klass) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001713 bool was_default_allocator = allocator == GetCurrentAllocator();
Mathieu Chartierf4f38432014-09-03 11:21:08 -07001714 // Make sure there is no pending exception since we may need to throw an OOME.
1715 self->AssertNoPendingException();
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001716 DCHECK(klass != nullptr);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001717 StackHandleScope<1> hs(self);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001718 HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001719 // The allocation failed. If the GC is running, block until it completes, and then retry the
1720 // allocation.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001721 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001722 // If we were the default allocator but the allocator changed while we were suspended,
1723 // abort the allocation.
1724 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1725 (!instrumented && EntrypointsInstrumented())) {
1726 return nullptr;
1727 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001728 if (last_gc != collector::kGcTypeNone) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001729 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001730 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001731 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001732 if (ptr != nullptr) {
1733 return ptr;
1734 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001735 }
1736
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001737 collector::GcType tried_type = next_gc_type_;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001738 const bool gc_ran =
1739 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001740 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1741 (!instrumented && EntrypointsInstrumented())) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001742 return nullptr;
1743 }
1744 if (gc_ran) {
1745 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001746 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001747 if (ptr != nullptr) {
1748 return ptr;
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001749 }
1750 }
1751
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001752 // Loop through our different Gc types and try to Gc until we get enough free memory.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001753 for (collector::GcType gc_type : gc_plan_) {
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001754 if (gc_type == tried_type) {
1755 continue;
1756 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001757 // Attempt to run the collector, if we succeed, re-try the allocation.
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001758 const bool plan_gc_ran =
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001759 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001760 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1761 (!instrumented && EntrypointsInstrumented())) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001762 return nullptr;
1763 }
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001764 if (plan_gc_ran) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001765 // Did we free sufficient memory for the allocation to succeed?
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001766 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001767 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001768 if (ptr != nullptr) {
1769 return ptr;
1770 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001771 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001772 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001773 // Allocations have failed after GCs; this is an exceptional state.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001774 // Try harder, growing the heap if necessary.
1775 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001776 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001777 if (ptr != nullptr) {
1778 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001779 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001780 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1781 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1782 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1783 // OOME.
1784 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1785 << " allocation";
1786 // TODO: Run finalization, but this may cause more allocations to occur.
1787 // We don't need a WaitForGcToComplete here either.
1788 DCHECK(!gc_plan_.empty());
1789 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001790 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1791 (!instrumented && EntrypointsInstrumented())) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001792 return nullptr;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001793 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001794 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
1795 bytes_tl_bulk_allocated);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001796 if (ptr == nullptr) {
Zuo Wangf37a88b2014-07-10 04:26:41 -07001797 const uint64_t current_time = NanoTime();
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001798 switch (allocator) {
1799 case kAllocatorTypeRosAlloc:
1800 // Fall-through.
1801 case kAllocatorTypeDlMalloc: {
1802 if (use_homogeneous_space_compaction_for_oom_ &&
1803 current_time - last_time_homogeneous_space_compaction_by_oom_ >
1804 min_interval_homogeneous_space_compaction_by_oom_) {
1805 last_time_homogeneous_space_compaction_by_oom_ = current_time;
1806 HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001807 // Thread suspension could have occurred.
1808 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1809 (!instrumented && EntrypointsInstrumented())) {
1810 return nullptr;
1811 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001812 switch (result) {
1813 case HomogeneousSpaceCompactResult::kSuccess:
1814 // If the allocation succeeded, we delayed an oom.
1815 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001816 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001817 if (ptr != nullptr) {
1818 count_delayed_oom_++;
1819 }
1820 break;
1821 case HomogeneousSpaceCompactResult::kErrorReject:
1822 // Reject due to disabled moving GC.
1823 break;
1824 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1825 // Throw OOM by default.
1826 break;
1827 default: {
Ian Rogers2c4257b2014-10-24 14:20:06 -07001828 UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1829 << static_cast<size_t>(result);
1830 UNREACHABLE();
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001831 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07001832 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001833 // Always print that we ran homogeneous space compation since this can cause jank.
1834 VLOG(heap) << "Ran heap homogeneous space compaction, "
1835 << " requested defragmentation "
Orion Hodson88591fe2018-03-06 13:35:43 +00001836 << count_requested_homogeneous_space_compaction_.load()
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001837 << " performed defragmentation "
Orion Hodson88591fe2018-03-06 13:35:43 +00001838 << count_performed_homogeneous_space_compaction_.load()
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001839 << " ignored homogeneous space compaction "
Orion Hodson88591fe2018-03-06 13:35:43 +00001840 << count_ignored_homogeneous_space_compaction_.load()
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001841 << " delayed count = "
Orion Hodson88591fe2018-03-06 13:35:43 +00001842 << count_delayed_oom_.load();
Zuo Wangf37a88b2014-07-10 04:26:41 -07001843 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001844 break;
Zuo Wangf37a88b2014-07-10 04:26:41 -07001845 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001846 case kAllocatorTypeNonMoving: {
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -07001847 if (kUseReadBarrier) {
1848 // DisableMovingGc() isn't compatible with CC.
1849 break;
1850 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001851 // Try to transition the heap if the allocation failure was due to the space being full.
Andreas Gampe98ea9d92018-10-19 14:06:15 -07001852 if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow=*/ false)) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001853 // If we aren't out of memory then the OOM was probably from the non moving space being
1854 // full. Attempt to disable compaction and turn the main space into a non moving space.
1855 DisableMovingGc();
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001856 // Thread suspension could have occurred.
1857 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1858 (!instrumented && EntrypointsInstrumented())) {
1859 return nullptr;
1860 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001861 // If we are still a moving GC then something must have caused the transition to fail.
1862 if (IsMovingGc(collector_type_)) {
1863 MutexLock mu(self, *gc_complete_lock_);
1864 // If we couldn't disable moving GC, just throw OOME and return null.
1865 LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1866 << disable_moving_gc_count_;
1867 } else {
1868 LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1869 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001870 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001871 }
1872 }
1873 break;
1874 }
1875 default: {
1876 // Do nothing for others allocators.
1877 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07001878 }
1879 }
1880 // If the allocation hasn't succeeded by this point, throw an OOM error.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001881 if (ptr == nullptr) {
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001882 ThrowOutOfMemoryError(self, alloc_size, allocator);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001883 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001884 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001885}
1886
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001887void Heap::SetTargetHeapUtilization(float target) {
Hans Boehmc220f982018-10-12 16:15:45 -07001888 DCHECK_GT(target, 0.1f); // asserted in Java code
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001889 DCHECK_LT(target, 1.0f);
1890 target_utilization_ = target;
1891}
1892
Ian Rogers1d54e732013-05-02 21:10:01 -07001893size_t Heap::GetObjectsAllocated() const {
Mathieu Chartier4f55e222015-09-04 13:26:21 -07001894 Thread* const self = Thread::Current();
Mathieu Chartierb43390c2015-05-12 10:47:11 -07001895 ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
Roland Levillainef012222017-06-21 16:28:06 +01001896 // Prevent GC running during GetObjectsAllocated since we may get a checkpoint request that tells
Mathieu Chartiere8649c72017-03-03 18:02:18 -08001897 // us to suspend while we are doing SuspendAll. b/35232978
1898 gc::ScopedGCCriticalSection gcs(Thread::Current(),
1899 gc::kGcCauseGetObjectsAllocated,
1900 gc::kCollectorTypeGetObjectsAllocated);
Mathieu Chartierb43390c2015-05-12 10:47:11 -07001901 // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
Mathieu Chartier4f55e222015-09-04 13:26:21 -07001902 ScopedSuspendAll ssa(__FUNCTION__);
1903 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001904 size_t total = 0;
Mathieu Chartier4f55e222015-09-04 13:26:21 -07001905 for (space::AllocSpace* space : alloc_spaces_) {
1906 total += space->GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001907 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001908 return total;
1909}
1910
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001911uint64_t Heap::GetObjectsAllocatedEver() const {
Mathieu Chartier4edd8472015-06-01 10:47:36 -07001912 uint64_t total = GetObjectsFreedEver();
1913 // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1914 if (Thread::Current() != nullptr) {
1915 total += GetObjectsAllocated();
1916 }
1917 return total;
Ian Rogers1d54e732013-05-02 21:10:01 -07001918}
1919
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001920uint64_t Heap::GetBytesAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001921 return GetBytesFreedEver() + GetBytesAllocated();
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001922}
1923
Richard Uhler660be6f2017-11-22 16:12:29 +00001924// Check whether the given object is an instance of the given class.
1925static bool MatchesClass(mirror::Object* obj,
1926 Handle<mirror::Class> h_class,
1927 bool use_is_assignable_from) REQUIRES_SHARED(Locks::mutator_lock_) {
1928 mirror::Class* instance_class = obj->GetClass();
1929 CHECK(instance_class != nullptr);
1930 ObjPtr<mirror::Class> klass = h_class.Get();
1931 if (use_is_assignable_from) {
1932 return klass != nullptr && klass->IsAssignableFrom(instance_class);
1933 }
1934 return instance_class == klass;
1935}
1936
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001937void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
1938 bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001939 uint64_t* counts) {
Andreas Gampe1c158a02017-07-13 17:26:19 -07001940 auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampe1c158a02017-07-13 17:26:19 -07001941 for (size_t i = 0; i < classes.size(); ++i) {
Richard Uhler660be6f2017-11-22 16:12:29 +00001942 if (MatchesClass(obj, classes[i], use_is_assignable_from)) {
Andreas Gampe1c158a02017-07-13 17:26:19 -07001943 ++counts[i];
Elliott Hughes3b78c942013-01-15 17:35:41 -08001944 }
1945 }
Andreas Gampe1c158a02017-07-13 17:26:19 -07001946 };
1947 VisitObjects(instance_counter);
Elliott Hughes3b78c942013-01-15 17:35:41 -08001948}
1949
Andreas Gampe1c158a02017-07-13 17:26:19 -07001950void Heap::GetInstances(VariableSizedHandleScope& scope,
1951 Handle<mirror::Class> h_class,
Richard Uhler660be6f2017-11-22 16:12:29 +00001952 bool use_is_assignable_from,
Andreas Gampe1c158a02017-07-13 17:26:19 -07001953 int32_t max_count,
1954 std::vector<Handle<mirror::Object>>& instances) {
1955 DCHECK_GE(max_count, 0);
1956 auto instance_collector = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
Richard Uhler660be6f2017-11-22 16:12:29 +00001957 if (MatchesClass(obj, h_class, use_is_assignable_from)) {
Andreas Gampe1c158a02017-07-13 17:26:19 -07001958 if (max_count == 0 || instances.size() < static_cast<size_t>(max_count)) {
1959 instances.push_back(scope.NewHandle(obj));
1960 }
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001961 }
Andreas Gampe1c158a02017-07-13 17:26:19 -07001962 };
1963 VisitObjects(instance_collector);
1964}
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001965
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -07001966void Heap::GetReferringObjects(VariableSizedHandleScope& scope,
1967 Handle<mirror::Object> o,
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001968 int32_t max_count,
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -07001969 std::vector<Handle<mirror::Object>>& referring_objects) {
Andreas Gampe1c158a02017-07-13 17:26:19 -07001970 class ReferringObjectsFinder {
1971 public:
1972 ReferringObjectsFinder(VariableSizedHandleScope& scope_in,
1973 Handle<mirror::Object> object_in,
1974 int32_t max_count_in,
1975 std::vector<Handle<mirror::Object>>& referring_objects_in)
1976 REQUIRES_SHARED(Locks::mutator_lock_)
1977 : scope_(scope_in),
1978 object_(object_in),
1979 max_count_(max_count_in),
1980 referring_objects_(referring_objects_in) {}
1981
1982 // For Object::VisitReferences.
1983 void operator()(ObjPtr<mirror::Object> obj,
1984 MemberOffset offset,
1985 bool is_static ATTRIBUTE_UNUSED) const
1986 REQUIRES_SHARED(Locks::mutator_lock_) {
1987 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
1988 if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1989 referring_objects_.push_back(scope_.NewHandle(obj));
1990 }
1991 }
1992
1993 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
1994 const {}
1995 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
1996
1997 private:
1998 VariableSizedHandleScope& scope_;
1999 Handle<mirror::Object> const object_;
2000 const uint32_t max_count_;
2001 std::vector<Handle<mirror::Object>>& referring_objects_;
2002 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
2003 };
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -07002004 ReferringObjectsFinder finder(scope, o, max_count, referring_objects);
Andreas Gampe1c158a02017-07-13 17:26:19 -07002005 auto referring_objects_finder = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2006 obj->VisitReferences(finder, VoidFunctor());
2007 };
2008 VisitObjects(referring_objects_finder);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08002009}
2010
Andreas Gampe94c589d2017-12-27 12:43:01 -08002011void Heap::CollectGarbage(bool clear_soft_references, GcCause cause) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07002012 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
2013 // last GC will not have necessarily been cleared.
Andreas Gampe94c589d2017-12-27 12:43:01 -08002014 CollectGarbageInternal(gc_plan_.back(), cause, clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -07002015}
2016
Mathieu Chartierdb00eaf2015-08-31 17:10:05 -07002017bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
2018 return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
2019 foreground_collector_type_ == kCollectorTypeCMS;
2020}
2021
Zuo Wangf37a88b2014-07-10 04:26:41 -07002022HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
2023 Thread* self = Thread::Current();
2024 // Inc requested homogeneous space compaction.
2025 count_requested_homogeneous_space_compaction_++;
2026 // Store performed homogeneous space compaction at a new request arrival.
Zuo Wangf37a88b2014-07-10 04:26:41 -07002027 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Yi Konge11d50f2018-01-09 16:55:04 -08002028 // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
2029 // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
2030 // http://b/71769596
2031 // Locks::mutator_lock_->AssertNotHeld(self);
Zuo Wangf37a88b2014-07-10 04:26:41 -07002032 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08002033 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
Zuo Wangf37a88b2014-07-10 04:26:41 -07002034 MutexLock mu(self, *gc_complete_lock_);
2035 // Ensure there is only one GC at a time.
2036 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
Roland Levillain2ae376f2018-01-30 11:35:11 +00002037 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable
2038 // count is non zero.
2039 // If the collector type changed to something which doesn't benefit from homogeneous space
2040 // compaction, exit.
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07002041 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
2042 !main_space_->CanMoveObjects()) {
Mathieu Chartierdb00eaf2015-08-31 17:10:05 -07002043 return kErrorReject;
2044 }
2045 if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
2046 return kErrorUnsupported;
Zuo Wangf37a88b2014-07-10 04:26:41 -07002047 }
2048 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
2049 }
2050 if (Runtime::Current()->IsShuttingDown(self)) {
2051 // Don't allow heap transitions to happen if the runtime is shutting down since these can
2052 // cause objects to get finalized.
2053 FinishGC(self, collector::kGcTypeNone);
2054 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
2055 }
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002056 collector::GarbageCollector* collector;
2057 {
2058 ScopedSuspendAll ssa(__FUNCTION__);
2059 uint64_t start_time = NanoTime();
2060 // Launch compaction.
2061 space::MallocSpace* to_space = main_space_backup_.release();
2062 space::MallocSpace* from_space = main_space_;
2063 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2064 const uint64_t space_size_before_compaction = from_space->Size();
2065 AddSpace(to_space);
2066 // Make sure that we will have enough room to copy.
2067 CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
2068 collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
2069 const uint64_t space_size_after_compaction = to_space->Size();
2070 main_space_ = to_space;
2071 main_space_backup_.reset(from_space);
2072 RemoveSpace(from_space);
2073 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
2074 // Update performed homogeneous space compaction count.
2075 count_performed_homogeneous_space_compaction_++;
2076 // Print statics log and resume all threads.
2077 uint64_t duration = NanoTime() - start_time;
2078 VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
2079 << PrettySize(space_size_before_compaction) << " -> "
2080 << PrettySize(space_size_after_compaction) << " compact-ratio: "
2081 << std::fixed << static_cast<double>(space_size_after_compaction) /
2082 static_cast<double>(space_size_before_compaction);
2083 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07002084 // Finish GC.
Mathieu Chartier3cf22532015-07-09 15:15:09 -07002085 reference_processor_->EnqueueClearedReferences(self);
Zuo Wangf37a88b2014-07-10 04:26:41 -07002086 GrowForUtilization(semi_space_collector_);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002087 LogGC(kGcCauseHomogeneousSpaceCompact, collector);
Zuo Wangf37a88b2014-07-10 04:26:41 -07002088 FinishGC(self, collector::kGcTypeFull);
Mathieu Chartier598302a2015-09-23 14:52:39 -07002089 {
2090 ScopedObjectAccess soa(self);
2091 soa.Vm()->UnloadNativeLibraries();
2092 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07002093 return HomogeneousSpaceCompactResult::kSuccess;
2094}
2095
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002096void Heap::TransitionCollector(CollectorType collector_type) {
2097 if (collector_type == collector_type_) {
2098 return;
2099 }
Hiroshi Yamauchia01d0662016-08-30 17:44:41 -07002100 // Collector transition must not happen with CC
2101 CHECK(!kUseReadBarrier);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002102 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
2103 << " -> " << static_cast<int>(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002104 uint64_t start_time = NanoTime();
Hans Boehmfb8b4e22018-09-05 16:45:42 -07002105 uint32_t before_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002106 Runtime* const runtime = Runtime::Current();
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002107 Thread* const self = Thread::Current();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002108 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Yi Konge11d50f2018-01-09 16:55:04 -08002109 // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
2110 // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
2111 // http://b/71769596
2112 // Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartier1d27b342014-01-28 12:51:09 -08002113 // Busy wait until we can GC (StartGC can fail if we have a non-zero
2114 // compacting_gc_disable_count_, this should rarely occurs).
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002115 for (;;) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002116 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08002117 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002118 MutexLock mu(self, *gc_complete_lock_);
2119 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002120 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
Mathieu Chartiere4927f62014-08-23 13:56:03 -07002121 // Currently we only need a heap transition if we switch from a moving collector to a
2122 // non-moving one, or visa versa.
2123 const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
Mathieu Chartierb38d4832014-04-10 10:56:55 -07002124 // If someone else beat us to it and changed the collector before we could, exit.
2125 // This is safe to do before the suspend all since we set the collector_type_running_ before
2126 // we exit the loop. If another thread attempts to do the heap transition before we exit,
2127 // then it would get blocked on WaitForGcToCompleteLocked.
2128 if (collector_type == collector_type_) {
2129 return;
2130 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002131 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
2132 if (!copying_transition || disable_moving_gc_count_ == 0) {
2133 // TODO: Not hard code in semi-space collector?
2134 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
2135 break;
2136 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002137 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002138 usleep(1000);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002139 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002140 if (runtime->IsShuttingDown(self)) {
Hiroshi Yamauchia6a8d142014-05-12 16:57:33 -07002141 // Don't allow heap transitions to happen if the runtime is shutting down since these can
2142 // cause objects to get finalized.
2143 FinishGC(self, collector::kGcTypeNone);
2144 return;
2145 }
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002146 collector::GarbageCollector* collector = nullptr;
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002147 {
2148 ScopedSuspendAll ssa(__FUNCTION__);
2149 switch (collector_type) {
2150 case kCollectorTypeSS: {
2151 if (!IsMovingGc(collector_type_)) {
2152 // Create the bump pointer space from the backup space.
2153 CHECK(main_space_backup_ != nullptr);
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002154 MemMap mem_map = main_space_backup_->ReleaseMemMap();
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002155 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
2156 // pointer space last transition it will be protected.
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002157 CHECK(mem_map.IsValid());
2158 mem_map.Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002159 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002160 std::move(mem_map));
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002161 AddSpace(bump_pointer_space_);
2162 collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
2163 // Use the now empty main space mem map for the bump pointer temp space.
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002164 mem_map = main_space_->ReleaseMemMap();
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002165 // Unset the pointers just in case.
2166 if (dlmalloc_space_ == main_space_) {
2167 dlmalloc_space_ = nullptr;
2168 } else if (rosalloc_space_ == main_space_) {
2169 rosalloc_space_ = nullptr;
2170 }
2171 // Remove the main space so that we don't try to trim it, this doens't work for debug
2172 // builds since RosAlloc attempts to read the magic number from a protected page.
2173 RemoveSpace(main_space_);
2174 RemoveRememberedSet(main_space_);
2175 delete main_space_; // Delete the space since it has been removed.
2176 main_space_ = nullptr;
2177 RemoveRememberedSet(main_space_backup_.get());
2178 main_space_backup_.reset(nullptr); // Deletes the space.
2179 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002180 std::move(mem_map));
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002181 AddSpace(temp_space_);
Hiroshi Yamauchic1276c82014-08-07 10:27:17 -07002182 }
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002183 break;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002184 }
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002185 case kCollectorTypeMS:
2186 // Fall through.
2187 case kCollectorTypeCMS: {
2188 if (IsMovingGc(collector_type_)) {
2189 CHECK(temp_space_ != nullptr);
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002190 MemMap mem_map = temp_space_->ReleaseMemMap();
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002191 RemoveSpace(temp_space_);
2192 temp_space_ = nullptr;
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002193 mem_map.Protect(PROT_READ | PROT_WRITE);
2194 CreateMainMallocSpace(std::move(mem_map),
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002195 kDefaultInitialSize,
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002196 std::min(mem_map.Size(), growth_limit_),
2197 mem_map.Size());
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002198 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
2199 AddSpace(main_space_);
2200 collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002201 mem_map = bump_pointer_space_->ReleaseMemMap();
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002202 RemoveSpace(bump_pointer_space_);
2203 bump_pointer_space_ = nullptr;
2204 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
2205 // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
2206 if (kIsDebugBuild && kUseRosAlloc) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002207 mem_map.Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002208 }
2209 main_space_backup_.reset(CreateMallocSpaceFromMemMap(
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002210 std::move(mem_map),
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002211 kDefaultInitialSize,
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002212 std::min(mem_map.Size(), growth_limit_),
2213 mem_map.Size(),
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002214 name,
2215 true));
2216 if (kIsDebugBuild && kUseRosAlloc) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002217 main_space_backup_->GetMemMap()->Protect(PROT_NONE);
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002218 }
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002219 }
2220 break;
2221 }
2222 default: {
2223 LOG(FATAL) << "Attempted to transition to invalid collector type "
2224 << static_cast<size_t>(collector_type);
Elliott Hughesc1896c92018-11-29 11:33:18 -08002225 UNREACHABLE();
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002226 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002227 }
Mathieu Chartier4f55e222015-09-04 13:26:21 -07002228 ChangeCollector(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002229 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002230 // Can't call into java code with all threads suspended.
Mathieu Chartier3cf22532015-07-09 15:15:09 -07002231 reference_processor_->EnqueueClearedReferences(self);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002232 uint64_t duration = NanoTime() - start_time;
Mathieu Chartierafe49982014-03-27 10:55:04 -07002233 GrowForUtilization(semi_space_collector_);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002234 DCHECK(collector != nullptr);
2235 LogGC(kGcCauseCollectorTransition, collector);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002236 FinishGC(self, collector::kGcTypeFull);
Mathieu Chartier598302a2015-09-23 14:52:39 -07002237 {
2238 ScopedObjectAccess soa(self);
2239 soa.Vm()->UnloadNativeLibraries();
2240 }
Hans Boehmfb8b4e22018-09-05 16:45:42 -07002241 int32_t after_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002242 int32_t delta_allocated = before_allocated - after_allocated;
Mathieu Chartier19d46b42014-06-17 15:04:40 -07002243 std::string saved_str;
2244 if (delta_allocated >= 0) {
2245 saved_str = " saved at least " + PrettySize(delta_allocated);
2246 } else {
2247 saved_str = " expanded " + PrettySize(-delta_allocated);
2248 }
Mathieu Chartierf8cb1782016-03-18 18:45:41 -07002249 VLOG(heap) << "Collector transition to " << collector_type << " took "
2250 << PrettyDuration(duration) << saved_str;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002251}
2252
Mathieu Chartier0de9f732013-11-22 17:58:48 -08002253void Heap::ChangeCollector(CollectorType collector_type) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002254 // TODO: Only do this with all mutators suspended to avoid races.
2255 if (collector_type != collector_type_) {
2256 collector_type_ = collector_type;
2257 gc_plan_.clear();
2258 switch (collector_type_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002259 case kCollectorTypeCC: {
Mathieu Chartier8d1a9962016-08-17 16:39:45 -07002260 if (kEnableGenerationalConcurrentCopyingCollection) {
2261 gc_plan_.push_back(collector::kGcTypeSticky);
2262 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002263 gc_plan_.push_back(collector::kGcTypeFull);
2264 if (use_tlab_) {
2265 ChangeAllocator(kAllocatorTypeRegionTLAB);
2266 } else {
2267 ChangeAllocator(kAllocatorTypeRegion);
2268 }
2269 break;
2270 }
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002271 case kCollectorTypeSS: // Fall-through.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08002272 case kCollectorTypeGSS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002273 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002274 if (use_tlab_) {
2275 ChangeAllocator(kAllocatorTypeTLAB);
2276 } else {
2277 ChangeAllocator(kAllocatorTypeBumpPointer);
2278 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002279 break;
2280 }
2281 case kCollectorTypeMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002282 gc_plan_.push_back(collector::kGcTypeSticky);
2283 gc_plan_.push_back(collector::kGcTypePartial);
2284 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002285 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002286 break;
2287 }
2288 case kCollectorTypeCMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002289 gc_plan_.push_back(collector::kGcTypeSticky);
2290 gc_plan_.push_back(collector::kGcTypePartial);
2291 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002292 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002293 break;
2294 }
2295 default: {
Ian Rogers2c4257b2014-10-24 14:20:06 -07002296 UNIMPLEMENTED(FATAL);
2297 UNREACHABLE();
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002298 }
Mathieu Chartier0de9f732013-11-22 17:58:48 -08002299 }
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002300 if (IsGcConcurrent()) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002301 concurrent_start_bytes_ =
Hans Boehmc220f982018-10-12 16:15:45 -07002302 UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
2303 kMinConcurrentRemainingBytes);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002304 } else {
2305 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier0de9f732013-11-22 17:58:48 -08002306 }
2307 }
2308}
2309
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002310// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +01002311class ZygoteCompactingCollector final : public collector::SemiSpace {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002312 public:
Roland Levillain3887c462015-08-12 18:15:42 +01002313 ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002314 : SemiSpace(heap, false, "zygote collector"),
2315 bin_live_bitmap_(nullptr),
2316 bin_mark_bitmap_(nullptr),
2317 is_running_on_memory_tool_(is_running_on_memory_tool) {}
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002318
Andreas Gampe0c183382017-07-13 22:26:24 -07002319 void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002320 bin_live_bitmap_ = space->GetLiveBitmap();
2321 bin_mark_bitmap_ = space->GetMarkBitmap();
Andreas Gampe0c183382017-07-13 22:26:24 -07002322 uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002323 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2324 // Note: This requires traversing the space in increasing order of object addresses.
Andreas Gampe0c183382017-07-13 22:26:24 -07002325 auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2326 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2327 size_t bin_size = object_addr - prev;
2328 // Add the bin consisting of the end of the previous object to the start of the current object.
2329 AddBin(bin_size, prev);
2330 prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
2331 };
2332 bin_live_bitmap_->Walk(visitor);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002333 // Add the last bin which spans after the last object to the end of the space.
Andreas Gampe0c183382017-07-13 22:26:24 -07002334 AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002335 }
2336
2337 private:
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002338 // Maps from bin sizes to locations.
2339 std::multimap<size_t, uintptr_t> bins_;
2340 // Live bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002341 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002342 // Mark bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002343 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002344 const bool is_running_on_memory_tool_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002345
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002346 void AddBin(size_t size, uintptr_t position) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002347 if (is_running_on_memory_tool_) {
2348 MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2349 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002350 if (size != 0) {
2351 bins_.insert(std::make_pair(size, position));
2352 }
2353 }
2354
Andreas Gampefa6a1b02018-09-07 08:11:55 -07002355 bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const override {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002356 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2357 // allocator.
2358 return false;
2359 }
2360
Andreas Gampefa6a1b02018-09-07 08:11:55 -07002361 mirror::Object* MarkNonForwardedObject(mirror::Object* obj) override
Mathieu Chartier90443472015-07-16 20:32:27 -07002362 REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07002363 size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002364 size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08002365 mirror::Object* forward_address;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002366 // Find the smallest bin which we can move obj in.
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002367 auto it = bins_.lower_bound(alloc_size);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002368 if (it == bins_.end()) {
2369 // No available space in the bins, place it in the target space instead (grows the zygote
2370 // space).
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002371 size_t bytes_allocated, dummy;
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002372 forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002373 if (to_space_live_bitmap_ != nullptr) {
2374 to_space_live_bitmap_->Set(forward_address);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002375 } else {
2376 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2377 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002378 }
2379 } else {
2380 size_t size = it->first;
2381 uintptr_t pos = it->second;
2382 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
2383 forward_address = reinterpret_cast<mirror::Object*>(pos);
2384 // Set the live and mark bits so that sweeping system weaks works properly.
2385 bin_live_bitmap_->Set(forward_address);
2386 bin_mark_bitmap_->Set(forward_address);
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002387 DCHECK_GE(size, alloc_size);
2388 // Add a new bin with the remaining space.
2389 AddBin(size - alloc_size, pos + alloc_size);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002390 }
Roland Levillain05e34f42018-05-24 13:19:05 +00002391 // Copy the object over to its new location.
2392 // Historical note: We did not use `alloc_size` to avoid a Valgrind error.
Hiroshi Yamauchi8711d1f2015-03-13 16:48:55 -07002393 memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002394 if (kUseBakerReadBarrier) {
2395 obj->AssertReadBarrierState();
2396 forward_address->AssertReadBarrierState();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -08002397 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002398 return forward_address;
2399 }
2400};
2401
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002402void Heap::UnBindBitmaps() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002403 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002404 for (const auto& space : GetContinuousSpaces()) {
2405 if (space->IsContinuousMemMapAllocSpace()) {
2406 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2407 if (alloc_space->HasBoundBitmaps()) {
2408 alloc_space->UnBindBitmaps();
2409 }
2410 }
2411 }
2412}
2413
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002414void Heap::PreZygoteFork() {
Mathieu Chartierfaed9952015-03-31 16:28:53 -07002415 if (!HasZygoteSpace()) {
2416 // We still want to GC in case there is some unreachable non moving objects that could cause a
2417 // suboptimal bin packing when we compact the zygote space.
2418 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
Mathieu Chartier76ce9172016-01-27 10:44:20 -08002419 // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2420 // the trim process may require locking the mutator lock.
2421 non_moving_space_->Trim();
Mathieu Chartierfaed9952015-03-31 16:28:53 -07002422 }
Ian Rogers81d425b2012-09-27 16:03:43 -07002423 Thread* self = Thread::Current();
2424 MutexLock mu(self, zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002425 // Try to see if we have any Zygote spaces.
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002426 if (HasZygoteSpace()) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002427 return;
2428 }
Mathieu Chartierea0831f2015-12-29 13:17:37 -08002429 Runtime::Current()->GetInternTable()->AddNewTable();
Mathieu Chartierc2e20622014-11-03 11:41:47 -08002430 Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002431 VLOG(heap) << "Starting PreZygoteFork";
Mathieu Chartier31f44142014-04-08 14:40:03 -07002432 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2433 // there.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002434 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07002435 const bool same_space = non_moving_space_ == main_space_;
Mathieu Chartier31f44142014-04-08 14:40:03 -07002436 if (kCompactZygote) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002437 // Temporarily disable rosalloc verification because the zygote
2438 // compaction will mess up the rosalloc internal metadata.
2439 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002440 ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002441 zygote_collector.BuildBins(non_moving_space_);
Mathieu Chartier50482232013-11-21 11:48:14 -08002442 // Create a new bump pointer space which we will compact into.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002443 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2444 non_moving_space_->Limit());
2445 // Compact the bump pointer space to a new zygote bump pointer space.
Mathieu Chartier31f44142014-04-08 14:40:03 -07002446 bool reset_main_space = false;
2447 if (IsMovingGc(collector_type_)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002448 if (collector_type_ == kCollectorTypeCC) {
2449 zygote_collector.SetFromSpace(region_space_);
2450 } else {
2451 zygote_collector.SetFromSpace(bump_pointer_space_);
2452 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07002453 } else {
2454 CHECK(main_space_ != nullptr);
Hiroshi Yamauchid04495e2015-03-11 19:09:07 -07002455 CHECK_NE(main_space_, non_moving_space_)
2456 << "Does not make sense to compact within the same space";
Mathieu Chartier31f44142014-04-08 14:40:03 -07002457 // Copy from the main space.
2458 zygote_collector.SetFromSpace(main_space_);
2459 reset_main_space = true;
2460 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08002461 zygote_collector.SetToSpace(&target_space);
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07002462 zygote_collector.SetSwapSemiSpaces(false);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08002463 zygote_collector.Run(kGcCauseCollectorTransition, false);
Mathieu Chartier31f44142014-04-08 14:40:03 -07002464 if (reset_main_space) {
2465 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2466 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002467 MemMap mem_map = main_space_->ReleaseMemMap();
Mathieu Chartier31f44142014-04-08 14:40:03 -07002468 RemoveSpace(main_space_);
Mathieu Chartier96bcd452014-06-17 09:50:02 -07002469 space::Space* old_main_space = main_space_;
Vladimir Markoc34bebf2018-08-16 16:12:49 +01002470 CreateMainMallocSpace(std::move(mem_map),
2471 kDefaultInitialSize,
2472 std::min(mem_map.Size(), growth_limit_),
2473 mem_map.Size());
Mathieu Chartier96bcd452014-06-17 09:50:02 -07002474 delete old_main_space;
Mathieu Chartier31f44142014-04-08 14:40:03 -07002475 AddSpace(main_space_);
2476 } else {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002477 if (collector_type_ == kCollectorTypeCC) {
2478 region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -07002479 // Evacuated everything out of the region space, clear the mark bitmap.
2480 region_space_->GetMarkBitmap()->Clear();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002481 } else {
2482 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2483 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07002484 }
2485 if (temp_space_ != nullptr) {
2486 CHECK(temp_space_->IsEmpty());
2487 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002488 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2489 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002490 // Update the end and write out image.
2491 non_moving_space_->SetEnd(target_space.End());
2492 non_moving_space_->SetLimit(target_space.Limit());
Mathieu Chartierfaed9952015-03-31 16:28:53 -07002493 VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002494 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07002495 // Change the collector to the post zygote one.
Mathieu Chartier31f44142014-04-08 14:40:03 -07002496 ChangeCollector(foreground_collector_type_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002497 // Save the old space so that we can remove it after we complete creating the zygote space.
2498 space::MallocSpace* old_alloc_space = non_moving_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002499 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002500 // the remaining available space.
2501 // Remove the old space before creating the zygote space since creating the zygote space sets
Mathieu Chartier2cebb242015-04-21 16:50:40 -07002502 // the old alloc space's bitmaps to null.
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002503 RemoveSpace(old_alloc_space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002504 if (collector::SemiSpace::kUseRememberedSet) {
2505 // Sanity bound check.
2506 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2507 // Remove the remembered set for the now zygote space (the old
2508 // non-moving space). Note now that we have compacted objects into
2509 // the zygote space, the data in the remembered set is no longer
2510 // needed. The zygote space will instead have a mod-union table
2511 // from this point on.
2512 RemoveRememberedSet(old_alloc_space);
2513 }
Mathieu Chartier7247af52014-11-19 10:51:42 -08002514 // Remaining space becomes the new non moving space.
2515 zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002516 &non_moving_space_);
Mathieu Chartierb363f662014-07-16 13:28:58 -07002517 CHECK(!non_moving_space_->CanMoveObjects());
2518 if (same_space) {
2519 main_space_ = non_moving_space_;
2520 SetSpaceAsDefault(main_space_);
2521 }
Mathieu Chartiera1602f22014-01-13 17:19:19 -08002522 delete old_alloc_space;
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002523 CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2524 AddSpace(zygote_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -07002525 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2526 AddSpace(non_moving_space_);
Mathieu Chartier36a270a2016-07-28 18:08:51 -07002527 if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) {
2528 // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
2529 // safe since we mark all of the objects that may reference non immune objects as gray.
2530 zygote_space_->GetLiveBitmap()->VisitMarkedRange(
2531 reinterpret_cast<uintptr_t>(zygote_space_->Begin()),
2532 reinterpret_cast<uintptr_t>(zygote_space_->Limit()),
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002533 [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -07002534 CHECK(obj->AtomicSetMarkBit(0, 1));
2535 });
2536 }
2537
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002538 // Create the zygote space mod union table.
2539 accounting::ModUnionTable* mod_union_table =
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07002540 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002541 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07002542
2543 if (collector_type_ != kCollectorTypeCC) {
2544 // Set all the cards in the mod-union table since we don't know which objects contain references
2545 // to large objects.
2546 mod_union_table->SetCards();
2547 } else {
Mathieu Chartier55c05f52017-04-11 11:12:28 -07002548 // Make sure to clear the zygote space cards so that we don't dirty pages in the next GC. There
2549 // may be dirty cards from the zygote compaction or reference processing. These cards are not
2550 // necessary to have marked since the zygote space may not refer to any objects not in the
2551 // zygote or image spaces at this point.
2552 mod_union_table->ProcessCards();
2553 mod_union_table->ClearTable();
2554
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07002555 // For CC we never collect zygote large objects. This means we do not need to set the cards for
2556 // the zygote mod-union table and we can also clear all of the existing image mod-union tables.
2557 // The existing mod-union tables are only for image spaces and may only reference zygote and
2558 // image objects.
2559 for (auto& pair : mod_union_tables_) {
2560 CHECK(pair.first->IsImageSpace());
2561 CHECK(!pair.first->AsImageSpace()->GetImageHeader().IsAppImage());
2562 accounting::ModUnionTable* table = pair.second;
2563 table->ClearTable();
2564 }
2565 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002566 AddModUnionTable(mod_union_table);
Mathieu Chartierf6c2a272015-06-03 17:32:42 -07002567 large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002568 if (collector::SemiSpace::kUseRememberedSet) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002569 // Add a new remembered set for the post-zygote non-moving space.
2570 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2571 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2572 non_moving_space_);
2573 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2574 << "Failed to create post-zygote non-moving space remembered set";
2575 AddRememberedSet(post_zygote_non_moving_space_rem_set);
2576 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002577}
2578
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002579void Heap::FlushAllocStack() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002580 MarkAllocStackAsLive(allocation_stack_.get());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002581 allocation_stack_->Reset();
2582}
2583
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002584void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2585 accounting::ContinuousSpaceBitmap* bitmap2,
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07002586 accounting::LargeObjectBitmap* large_objects,
Ian Rogers1d54e732013-05-02 21:10:01 -07002587 accounting::ObjectStack* stack) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002588 DCHECK(bitmap1 != nullptr);
2589 DCHECK(bitmap2 != nullptr);
Mathieu Chartiercb535da2015-01-23 13:50:03 -08002590 const auto* limit = stack->End();
2591 for (auto* it = stack->Begin(); it != limit; ++it) {
2592 const mirror::Object* obj = it->AsMirrorPtr();
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002593 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2594 if (bitmap1->HasAddress(obj)) {
2595 bitmap1->Set(obj);
2596 } else if (bitmap2->HasAddress(obj)) {
2597 bitmap2->Set(obj);
2598 } else {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07002599 DCHECK(large_objects != nullptr);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002600 large_objects->Set(obj);
2601 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07002602 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002603 }
2604}
2605
Mathieu Chartier590fee92013-09-13 13:46:47 -07002606void Heap::SwapSemiSpaces() {
Mathieu Chartier31f44142014-04-08 14:40:03 -07002607 CHECK(bump_pointer_space_ != nullptr);
2608 CHECK(temp_space_ != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002609 std::swap(bump_pointer_space_, temp_space_);
2610}
2611
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002612collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2613 space::ContinuousMemMapAllocSpace* source_space,
2614 GcCause gc_cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002615 CHECK(kMovingCollector);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002616 if (target_space != source_space) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002617 // Don't swap spaces since this isn't a typical semi space collection.
2618 semi_space_collector_->SetSwapSemiSpaces(false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002619 semi_space_collector_->SetFromSpace(source_space);
2620 semi_space_collector_->SetToSpace(target_space);
Zuo Wangf37a88b2014-07-10 04:26:41 -07002621 semi_space_collector_->Run(gc_cause, false);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002622 return semi_space_collector_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002623 }
Mathieu Chartierf8e5d8c2018-04-06 13:35:37 -07002624 LOG(FATAL) << "Unsupported";
2625 UNREACHABLE();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002626}
Anwar Ghuloum67f99412013-08-12 14:19:48 -07002627
Mathieu Chartier34afcde2017-06-30 15:31:11 -07002628void Heap::TraceHeapSize(size_t heap_size) {
2629 ATRACE_INT("Heap size (KB)", heap_size / KB);
2630}
2631
Hans Boehmc220f982018-10-12 16:15:45 -07002632size_t Heap::GetNativeBytes() {
2633 size_t malloc_bytes;
Hans Boehmc220f982018-10-12 16:15:45 -07002634#if defined(__BIONIC__) || defined(__GLIBC__)
Hans Boehmf91867e2018-12-13 22:27:51 -08002635 size_t mmapped_bytes;
Hans Boehmc220f982018-10-12 16:15:45 -07002636 struct mallinfo mi = mallinfo();
2637 // In spite of the documentation, the jemalloc version of this call seems to do what we want,
2638 // and it is thread-safe.
2639 if (sizeof(size_t) > sizeof(mi.uordblks) && sizeof(size_t) > sizeof(mi.hblkhd)) {
2640 // Shouldn't happen, but glibc declares uordblks as int.
2641 // Avoiding sign extension gets us correct behavior for another 2 GB.
2642 malloc_bytes = (unsigned int)mi.uordblks;
2643 mmapped_bytes = (unsigned int)mi.hblkhd;
2644 } else {
2645 malloc_bytes = mi.uordblks;
2646 mmapped_bytes = mi.hblkhd;
2647 }
2648 // From the spec, we clearly have mmapped_bytes <= malloc_bytes. Reality is sometimes
2649 // dramatically different. (b/119580449) If so, fudge it.
2650 if (mmapped_bytes > malloc_bytes) {
2651 malloc_bytes = mmapped_bytes;
2652 }
2653#else
2654 // We should hit this case only in contexts in which GC triggering is not critical. Effectively
2655 // disable GC triggering based on malloc().
2656 malloc_bytes = 1000;
2657#endif
2658 return malloc_bytes + native_bytes_registered_.load(std::memory_order_relaxed);
2659 // An alternative would be to get RSS from /proc/self/statm. Empirically, that's no
2660 // more expensive, and it would allow us to count memory allocated by means other than malloc.
2661 // However it would change as pages are unmapped and remapped due to memory pressure, among
2662 // other things. It seems risky to trigger GCs as a result of such changes.
2663}
2664
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07002665collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2666 GcCause gc_cause,
Ian Rogers1d54e732013-05-02 21:10:01 -07002667 bool clear_soft_references) {
Ian Rogers81d425b2012-09-27 16:03:43 -07002668 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002669 Runtime* runtime = Runtime::Current();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002670 // If the heap can't run the GC, silently fail and return that no GC was run.
2671 switch (gc_type) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002672 case collector::kGcTypePartial: {
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002673 if (!HasZygoteSpace()) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002674 return collector::kGcTypeNone;
2675 }
2676 break;
2677 }
2678 default: {
2679 // Other GC types don't have any special cases which makes them not runnable. The main case
2680 // here is full GC.
2681 }
2682 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002683 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Yi Konge11d50f2018-01-09 16:55:04 -08002684 // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
2685 // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
2686 // http://b/71769596
2687 // Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers120f1c72012-09-28 17:17:10 -07002688 if (self->IsHandlingStackOverflow()) {
Mathieu Chartier50c138f2015-01-07 16:00:03 -08002689 // If we are throwing a stack overflow error we probably don't have enough remaining stack
2690 // space to run the GC.
2691 return collector::kGcTypeNone;
Ian Rogers120f1c72012-09-28 17:17:10 -07002692 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002693 bool compacting_gc;
2694 {
2695 gc_complete_lock_->AssertNotHeld(self);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08002696 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002697 MutexLock mu(self, *gc_complete_lock_);
2698 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002699 WaitForGcToCompleteLocked(gc_cause, self);
Mathieu Chartier31f44142014-04-08 14:40:03 -07002700 compacting_gc = IsMovingGc(collector_type_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002701 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2702 if (compacting_gc && disable_moving_gc_count_ != 0) {
2703 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2704 return collector::kGcTypeNone;
2705 }
Mathieu Chartier51168372015-08-12 16:40:32 -07002706 if (gc_disabled_for_shutdown_) {
2707 return collector::kGcTypeNone;
2708 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002709 collector_type_running_ = collector_type_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002710 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002711 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2712 ++runtime->GetStats()->gc_for_alloc_count;
2713 ++self->GetStats()->gc_for_alloc_count;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002714 }
Hans Boehmc220f982018-10-12 16:15:45 -07002715 const size_t bytes_allocated_before_gc = GetBytesAllocated();
Richard Uhlercaaa2b02017-02-01 09:54:17 +00002716
Ian Rogers1d54e732013-05-02 21:10:01 -07002717 DCHECK_LT(gc_type, collector::kGcTypeMax);
2718 DCHECK_NE(gc_type, collector::kGcTypeNone);
Anwar Ghuloum67f99412013-08-12 14:19:48 -07002719
Mathieu Chartier590fee92013-09-13 13:46:47 -07002720 collector::GarbageCollector* collector = nullptr;
Mathieu Chartier50482232013-11-21 11:48:14 -08002721 // TODO: Clean this up.
Mathieu Chartier1d27b342014-01-28 12:51:09 -08002722 if (compacting_gc) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002723 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002724 current_allocator_ == kAllocatorTypeTLAB ||
2725 current_allocator_ == kAllocatorTypeRegion ||
2726 current_allocator_ == kAllocatorTypeRegionTLAB);
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002727 switch (collector_type_) {
2728 case kCollectorTypeSS:
2729 // Fall-through.
2730 case kCollectorTypeGSS:
2731 semi_space_collector_->SetFromSpace(bump_pointer_space_);
2732 semi_space_collector_->SetToSpace(temp_space_);
2733 semi_space_collector_->SetSwapSemiSpaces(true);
2734 collector = semi_space_collector_;
2735 break;
2736 case kCollectorTypeCC:
Mathieu Chartier8d1a9962016-08-17 16:39:45 -07002737 if (kEnableGenerationalConcurrentCopyingCollection) {
2738 // TODO: Other threads must do the flip checkpoint before they start poking at
2739 // active_concurrent_copying_collector_. So we should not concurrency here.
2740 active_concurrent_copying_collector_ = (gc_type == collector::kGcTypeSticky) ?
2741 young_concurrent_copying_collector_ : concurrent_copying_collector_;
Lokesh Gidra1c34b712018-12-18 13:41:58 -08002742 DCHECK(active_concurrent_copying_collector_->RegionSpace() == region_space_);
Mathieu Chartier8d1a9962016-08-17 16:39:45 -07002743 }
2744 collector = active_concurrent_copying_collector_;
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002745 break;
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002746 default:
2747 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002748 }
Mathieu Chartier8d1a9962016-08-17 16:39:45 -07002749 if (collector != active_concurrent_copying_collector_) {
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002750 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Hiroshi Yamauchi6edb9ae2016-02-08 14:18:21 -08002751 if (kIsDebugBuild) {
2752 // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
2753 temp_space_->GetMemMap()->TryReadable();
2754 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002755 CHECK(temp_space_->IsEmpty());
2756 }
2757 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002758 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2759 current_allocator_ == kAllocatorTypeDlMalloc) {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002760 collector = FindCollectorByGcType(gc_type);
Mathieu Chartier50482232013-11-21 11:48:14 -08002761 } else {
2762 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002763 }
Mathieu Chartier08cef222014-10-22 17:18:34 -07002764 if (IsGcConcurrent()) {
2765 // Disable concurrent GC check so that we don't have spammy JNI requests.
2766 // This gets recalculated in GrowForUtilization. It is important that it is disabled /
2767 // calculated in the same thread so that there aren't any races that can cause it to become
2768 // permanantly disabled. b/17942071
2769 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2770 }
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +00002771
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002772 CHECK(collector != nullptr)
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002773 << "Could not find garbage collector with collector_type="
2774 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002775 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002776 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2777 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
Mathieu Chartiera5eae692014-12-17 17:56:03 -08002778 RequestTrim(self);
Mathieu Chartier39e32612013-11-12 16:28:05 -08002779 // Enqueue cleared references.
Mathieu Chartier3cf22532015-07-09 15:15:09 -07002780 reference_processor_->EnqueueClearedReferences(self);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002781 // Grow the heap so that we know when to perform the next GC.
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08002782 GrowForUtilization(collector, bytes_allocated_before_gc);
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002783 LogGC(gc_cause, collector);
2784 FinishGC(self, gc_type);
2785 // Inform DDMS that a GC completed.
2786 Dbg::GcDidFinish();
Hans Boehmc220f982018-10-12 16:15:45 -07002787
2788 old_native_bytes_allocated_.store(GetNativeBytes());
2789
Mathieu Chartier598302a2015-09-23 14:52:39 -07002790 // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2791 // deadlocks in case the JNI_OnUnload function does allocations.
2792 {
2793 ScopedObjectAccess soa(self);
2794 soa.Vm()->UnloadNativeLibraries();
2795 }
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002796 return gc_type;
2797}
2798
2799void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002800 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2801 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002802 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002803 // (mutator time blocked >= long_pause_log_threshold_).
Mathieu Chartier6bc77742017-04-18 17:46:23 -07002804 bool log_gc = kLogAllGCs || gc_cause == kGcCauseExplicit;
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002805 if (!log_gc && CareAboutPauseTimes()) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002806 // GC for alloc pauses the allocating thread, so consider it as a pause.
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002807 log_gc = duration > long_gc_log_threshold_ ||
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002808 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002809 for (uint64_t pause : pause_times) {
2810 log_gc = log_gc || pause >= long_pause_log_threshold_;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002811 }
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002812 }
2813 if (log_gc) {
2814 const size_t percent_free = GetPercentFree();
2815 const size_t current_heap_size = GetBytesAllocated();
2816 const size_t total_memory = GetTotalMemory();
2817 std::ostringstream pause_string;
2818 for (size_t i = 0; i < pause_times.size(); ++i) {
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -08002819 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2820 << ((i != pause_times.size() - 1) ? "," : "");
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002821 }
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002822 LOG(INFO) << gc_cause << " " << collector->GetName()
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002823 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
2824 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2825 << current_gc_iteration_.GetFreedLargeObjects() << "("
2826 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002827 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2828 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2829 << " total " << PrettyDuration((duration / 1000) * 1000);
Ian Rogersc7dd2952014-10-21 23:31:19 -07002830 VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002831 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002832}
Mathieu Chartiera6399032012-06-11 18:49:50 -07002833
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002834void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2835 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002836 collector_type_running_ = kCollectorTypeNone;
2837 if (gc_type != collector::kGcTypeNone) {
2838 last_gc_type_ = gc_type;
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07002839
2840 // Update stats.
2841 ++gc_count_last_window_;
2842 if (running_collection_is_blocking_) {
2843 // If the currently running collection was a blocking one,
2844 // increment the counters and reset the flag.
2845 ++blocking_gc_count_;
2846 blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2847 ++blocking_gc_count_last_window_;
2848 }
2849 // Update the gc count rate histograms if due.
2850 UpdateGcCountRateHistograms();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002851 }
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07002852 // Reset.
2853 running_collection_is_blocking_ = false;
Mathieu Chartier183009a2017-02-16 21:19:28 -08002854 thread_running_gc_ = nullptr;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002855 // Wake anyone who may have been waiting for the GC to complete.
2856 gc_complete_cond_->Broadcast(self);
2857}
2858
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07002859void Heap::UpdateGcCountRateHistograms() {
2860 // Invariant: if the time since the last update includes more than
2861 // one windows, all the GC runs (if > 0) must have happened in first
2862 // window because otherwise the update must have already taken place
2863 // at an earlier GC run. So, we report the non-first windows with
2864 // zero counts to the histograms.
2865 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2866 uint64_t now = NanoTime();
2867 DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2868 uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2869 uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2870 if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2871 // Record the first window.
2872 gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run.
2873 blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2874 blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2875 // Record the other windows (with zero counts).
2876 for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2877 gc_count_rate_histogram_.AddValue(0);
2878 blocking_gc_count_rate_histogram_.AddValue(0);
2879 }
2880 // Update the last update time and reset the counters.
2881 last_update_time_gc_count_rate_histograms_ =
2882 (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2883 gc_count_last_window_ = 1; // Include the current run.
2884 blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2885 }
2886 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2887}
2888
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002889class RootMatchesObjectVisitor : public SingleRootVisitor {
2890 public:
2891 explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2892
2893 void VisitRoot(mirror::Object* root, const RootInfo& info)
Roland Levillainbbc6e7e2018-08-24 16:58:47 +01002894 override REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002895 if (root == obj_) {
2896 LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2897 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002898 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002899
2900 private:
2901 const mirror::Object* const obj_;
2902};
2903
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002904
2905class ScanVisitor {
2906 public:
Brian Carlstromdf629502013-07-17 22:39:56 -07002907 void operator()(const mirror::Object* obj) const {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002908 LOG(ERROR) << "Would have rescanned object " << obj;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002909 }
2910};
2911
Ian Rogers1d54e732013-05-02 21:10:01 -07002912// Verify a reference from an object.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002913class VerifyReferenceVisitor : public SingleRootVisitor {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002914 public:
Orion Hodson4a01cc32018-03-26 15:46:18 +01002915 VerifyReferenceVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent)
Andreas Gampe351c4472017-07-12 19:32:55 -07002916 REQUIRES_SHARED(Locks::mutator_lock_)
Orion Hodson4a01cc32018-03-26 15:46:18 +01002917 : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
2918 CHECK_EQ(self_, Thread::Current());
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002919 }
2920
Mathieu Chartier31e88222016-10-14 18:43:19 -07002921 void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002922 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002923 if (verify_referent_) {
Mathieu Chartier31e88222016-10-14 18:43:19 -07002924 VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002925 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08002926 }
2927
Mathieu Chartier31e88222016-10-14 18:43:19 -07002928 void operator()(ObjPtr<mirror::Object> obj,
2929 MemberOffset offset,
2930 bool is_static ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002931 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier31e88222016-10-14 18:43:19 -07002932 VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
Mathieu Chartier407f7022014-02-18 14:37:05 -08002933 }
2934
Mathieu Chartier31e88222016-10-14 18:43:19 -07002935 bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002936 return heap_->IsLiveObjectLocked(obj, true, false, true);
2937 }
2938
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002939 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002940 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002941 if (!root->IsNull()) {
2942 VisitRoot(root);
2943 }
2944 }
2945 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002946 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002947 const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
2948 root->AsMirrorPtr(), RootInfo(kRootVMInternal));
2949 }
2950
Roland Levillainf73caca2018-08-24 17:19:07 +01002951 void VisitRoot(mirror::Object* root, const RootInfo& root_info) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07002952 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002953 if (root == nullptr) {
2954 LOG(ERROR) << "Root is null with info " << root_info.GetType();
2955 } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
David Sehr709b0702016-10-13 09:12:37 -07002956 LOG(ERROR) << "Root " << root << " is dead with type " << mirror::Object::PrettyTypeOf(root)
Mathieu Chartiere34fa1d2015-01-14 14:55:47 -08002957 << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002958 }
2959 }
2960
2961 private:
Mathieu Chartier407f7022014-02-18 14:37:05 -08002962 // TODO: Fix the no thread safety analysis.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002963 // Returns false on failure.
2964 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002965 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002966 if (ref == nullptr || IsLive(ref)) {
2967 // Verify that the reference is live.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002968 return true;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002969 }
Orion Hodson4a01cc32018-03-26 15:46:18 +01002970 CHECK_EQ(self_, Thread::Current()); // fail_count_ is private to the calling thread.
2971 *fail_count_ += 1;
2972 if (*fail_count_ == 1) {
2973 // Only print message for the first failure to prevent spam.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002974 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002975 }
2976 if (obj != nullptr) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002977 // Only do this part for non roots.
Ian Rogers1d54e732013-05-02 21:10:01 -07002978 accounting::CardTable* card_table = heap_->GetCardTable();
2979 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2980 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Ian Rogers13735952014-10-08 12:43:28 -07002981 uint8_t* card_addr = card_table->CardFromAddr(obj);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002982 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2983 << offset << "\n card value = " << static_cast<int>(*card_addr);
2984 if (heap_->IsValidObjectAddress(obj->GetClass())) {
David Sehr709b0702016-10-13 09:12:37 -07002985 LOG(ERROR) << "Obj type " << obj->PrettyTypeOf();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002986 } else {
2987 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002988 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002989
Mathieu Chartierb363f662014-07-16 13:28:58 -07002990 // Attempt to find the class inside of the recently freed objects.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002991 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2992 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2993 space::MallocSpace* space = ref_space->AsMallocSpace();
2994 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2995 if (ref_class != nullptr) {
2996 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
David Sehr709b0702016-10-13 09:12:37 -07002997 << ref_class->PrettyClass();
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002998 } else {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002999 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003000 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003001 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003002
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003003 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
3004 ref->GetClass()->IsClass()) {
David Sehr709b0702016-10-13 09:12:37 -07003005 LOG(ERROR) << "Ref type " << ref->PrettyTypeOf();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003006 } else {
3007 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
3008 << ") is not a valid heap address";
3009 }
3010
Ian Rogers13735952014-10-08 12:43:28 -07003011 card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003012 void* cover_begin = card_table->AddrFromCard(card_addr);
3013 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
3014 accounting::CardTable::kCardSize);
3015 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
3016 << "-" << cover_end;
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07003017 accounting::ContinuousSpaceBitmap* bitmap =
3018 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003019
3020 if (bitmap == nullptr) {
3021 LOG(ERROR) << "Object " << obj << " has no bitmap";
Mathieu Chartier4e305412014-02-19 10:54:44 -08003022 if (!VerifyClassClass(obj->GetClass())) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003023 LOG(ERROR) << "Object " << obj << " failed class verification!";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003024 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003025 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07003026 // Print out how the object is live.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003027 if (bitmap->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003028 LOG(ERROR) << "Object " << obj << " found in live bitmap";
3029 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003030 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003031 LOG(ERROR) << "Object " << obj << " found in allocation stack";
3032 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003033 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003034 LOG(ERROR) << "Object " << obj << " found in live stack";
3035 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003036 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
3037 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
3038 }
3039 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
3040 LOG(ERROR) << "Ref " << ref << " found in live stack";
3041 }
Ian Rogers1d54e732013-05-02 21:10:01 -07003042 // Attempt to see if the card table missed the reference.
3043 ScanVisitor scan_visitor;
Ian Rogers13735952014-10-08 12:43:28 -07003044 uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
Lei Li727b2942015-01-15 11:26:34 +08003045 card_table->Scan<false>(bitmap, byte_cover_begin,
3046 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003047 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003048
3049 // Search to see if any of the roots reference our object.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07003050 RootMatchesObjectVisitor visitor1(obj);
3051 Runtime::Current()->VisitRoots(&visitor1);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003052 // Search to see if any of the roots reference our reference.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07003053 RootMatchesObjectVisitor visitor2(ref);
3054 Runtime::Current()->VisitRoots(&visitor2);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003055 }
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003056 return false;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003057 }
3058
Orion Hodson4a01cc32018-03-26 15:46:18 +01003059 Thread* const self_;
Ian Rogers1d54e732013-05-02 21:10:01 -07003060 Heap* const heap_;
Orion Hodson4a01cc32018-03-26 15:46:18 +01003061 size_t* const fail_count_;
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003062 const bool verify_referent_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003063};
3064
Ian Rogers1d54e732013-05-02 21:10:01 -07003065// Verify all references within an object, for use with HeapBitmap::Visit.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003066class VerifyObjectVisitor {
3067 public:
Orion Hodson4a01cc32018-03-26 15:46:18 +01003068 VerifyObjectVisitor(Thread* self, Heap* heap, size_t* fail_count, bool verify_referent)
3069 : self_(self), heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003070
Andreas Gampe351c4472017-07-12 19:32:55 -07003071 void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003072 // Note: we are verifying the references in obj but not obj itself, this is because obj must
3073 // be live or else how did we find it in the live bitmap?
Orion Hodson4a01cc32018-03-26 15:46:18 +01003074 VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003075 // The class doesn't count as a reference but we should verify it anyways.
Mathieu Chartier059ef3d2015-08-18 13:54:21 -07003076 obj->VisitReferences(visitor, visitor);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003077 }
3078
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07003079 void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07003080 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Orion Hodson4a01cc32018-03-26 15:46:18 +01003081 VerifyReferenceVisitor visitor(self_, heap_, fail_count_, verify_referent_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07003082 Runtime::Current()->VisitRoots(&visitor);
3083 }
3084
Orion Hodson4a01cc32018-03-26 15:46:18 +01003085 uint32_t GetFailureCount() const REQUIRES(Locks::mutator_lock_) {
3086 CHECK_EQ(self_, Thread::Current());
3087 return *fail_count_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003088 }
3089
3090 private:
Orion Hodson4a01cc32018-03-26 15:46:18 +01003091 Thread* const self_;
Ian Rogers1d54e732013-05-02 21:10:01 -07003092 Heap* const heap_;
Orion Hodson4a01cc32018-03-26 15:46:18 +01003093 size_t* const fail_count_;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07003094 const bool verify_referent_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003095};
3096
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003097void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
Mathieu Chartierc1790162014-05-23 10:54:50 -07003098 // Slow path, the allocation stack push back must have already failed.
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003099 DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
Mathieu Chartierc1790162014-05-23 10:54:50 -07003100 do {
3101 // TODO: Add handle VerifyObject.
3102 StackHandleScope<1> hs(self);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003103 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
Hans Boehmd972b422017-09-11 12:57:00 -07003104 // Push our object into the reserve region of the allocation stack. This is only required due
Mathieu Chartierc1790162014-05-23 10:54:50 -07003105 // to heap verification requiring that roots are live (either in the live bitmap or in the
3106 // allocation stack).
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003107 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
Mathieu Chartierc1790162014-05-23 10:54:50 -07003108 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003109 } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
Mathieu Chartierc1790162014-05-23 10:54:50 -07003110}
3111
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003112void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
3113 ObjPtr<mirror::Object>* obj) {
Mathieu Chartierc1790162014-05-23 10:54:50 -07003114 // Slow path, the allocation stack push back must have already failed.
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003115 DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
Mathieu Chartiercb535da2015-01-23 13:50:03 -08003116 StackReference<mirror::Object>* start_address;
3117 StackReference<mirror::Object>* end_address;
Mathieu Chartierc1790162014-05-23 10:54:50 -07003118 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
3119 &end_address)) {
3120 // TODO: Add handle VerifyObject.
3121 StackHandleScope<1> hs(self);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003122 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
Mathieu Chartierc1790162014-05-23 10:54:50 -07003123 // Push our object into the reserve region of the allocaiton stack. This is only required due
3124 // to heap verification requiring that roots are live (either in the live bitmap or in the
3125 // allocation stack).
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003126 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
Mathieu Chartierc1790162014-05-23 10:54:50 -07003127 // Push into the reserve allocation stack.
3128 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3129 }
3130 self->SetThreadLocalAllocationStack(start_address, end_address);
3131 // Retry on the new thread-local allocation stack.
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003132 CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr())); // Must succeed.
Mathieu Chartierc1790162014-05-23 10:54:50 -07003133}
3134
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003135// Must do this with mutators suspended since we are directly accessing the allocation stacks.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003136size_t Heap::VerifyHeapReferences(bool verify_referents) {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08003137 Thread* self = Thread::Current();
3138 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003139 // Lets sort our allocation stacks so that we can efficiently binary search them.
Ian Rogers1d54e732013-05-02 21:10:01 -07003140 allocation_stack_->Sort();
3141 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08003142 // Since we sorted the allocation stack content, need to revoke all
3143 // thread-local allocation stacks.
3144 RevokeAllThreadLocalAllocationStacks(self);
Orion Hodson4a01cc32018-03-26 15:46:18 +01003145 size_t fail_count = 0;
3146 VerifyObjectVisitor visitor(self, this, &fail_count, verify_referents);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003147 // Verify objects in the allocation stack since these will be objects which were:
3148 // 1. Allocated prior to the GC (pre GC verification).
3149 // 2. Allocated during the GC (pre sweep GC verification).
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003150 // We don't want to verify the objects in the live stack since they themselves may be
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003151 // pointing to dead objects if they are not reachable.
Andreas Gampe351c4472017-07-12 19:32:55 -07003152 VisitObjectsPaused(visitor);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003153 // Verify the roots:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07003154 visitor.VerifyRoots();
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003155 if (visitor.GetFailureCount() > 0) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07003156 // Dump mod-union tables.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003157 for (const auto& table_pair : mod_union_tables_) {
3158 accounting::ModUnionTable* mod_union_table = table_pair.second;
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07003159 mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": ");
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003160 }
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003161 // Dump remembered sets.
3162 for (const auto& table_pair : remembered_sets_) {
3163 accounting::RememberedSet* remembered_set = table_pair.second;
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07003164 remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": ");
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003165 }
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07003166 DumpSpaces(LOG_STREAM(ERROR));
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003167 }
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003168 return visitor.GetFailureCount();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003169}
3170
3171class VerifyReferenceCardVisitor {
3172 public:
3173 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07003174 REQUIRES_SHARED(Locks::mutator_lock_,
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003175 Locks::heap_bitmap_lock_)
Ian Rogers1d54e732013-05-02 21:10:01 -07003176 : heap_(heap), failed_(failed) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003177 }
3178
Mathieu Chartierda7c6502015-07-23 16:01:26 -07003179 // There is no card marks for native roots on a class.
3180 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
3181 const {}
3182 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
3183
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08003184 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3185 // annotalysis on visitors.
Mathieu Chartier407f7022014-02-18 14:37:05 -08003186 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3187 NO_THREAD_SAFETY_ANALYSIS {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07003188 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08003189 // Filter out class references since changing an object's class does not mark the card as dirty.
3190 // Also handles large objects, since the only reference they hold is a class reference.
Mathieu Chartier407f7022014-02-18 14:37:05 -08003191 if (ref != nullptr && !ref->IsClass()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003192 accounting::CardTable* card_table = heap_->GetCardTable();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003193 // If the object is not dirty and it is referencing something in the live stack other than
3194 // class, then it must be on a dirty card.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07003195 if (!card_table->AddrIsInCardTable(obj)) {
3196 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3197 *failed_ = true;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003198 } else if (!card_table->IsDirty(obj)) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08003199 // TODO: Check mod-union tables.
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08003200 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3201 // kCardDirty - 1 if it didnt get touched since we aged it.
Ian Rogers1d54e732013-05-02 21:10:01 -07003202 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier407f7022014-02-18 14:37:05 -08003203 if (live_stack->ContainsSorted(ref)) {
3204 if (live_stack->ContainsSorted(obj)) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003205 LOG(ERROR) << "Object " << obj << " found in live stack";
3206 }
3207 if (heap_->GetLiveBitmap()->Test(obj)) {
3208 LOG(ERROR) << "Object " << obj << " found in live bitmap";
3209 }
David Sehr709b0702016-10-13 09:12:37 -07003210 LOG(ERROR) << "Object " << obj << " " << mirror::Object::PrettyTypeOf(obj)
3211 << " references " << ref << " " << mirror::Object::PrettyTypeOf(ref)
3212 << " in live stack";
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003213
3214 // Print which field of the object is dead.
3215 if (!obj->IsObjectArray()) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08003216 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
Mathieu Chartierc7853442015-03-27 14:35:38 -07003217 CHECK(klass != nullptr);
Mathieu Chartierc0fe56a2015-08-11 13:01:23 -07003218 for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
Mathieu Chartier54d220e2015-07-30 16:20:06 -07003219 if (field.GetOffset().Int32Value() == offset.Int32Value()) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003220 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
David Sehr709b0702016-10-13 09:12:37 -07003221 << field.PrettyField();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003222 break;
3223 }
3224 }
3225 } else {
Ian Rogersef7d42f2014-01-06 12:55:46 -08003226 mirror::ObjectArray<mirror::Object>* object_array =
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08003227 obj->AsObjectArray<mirror::Object>();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003228 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3229 if (object_array->Get(i) == ref) {
3230 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3231 }
3232 }
3233 }
3234
3235 *failed_ = true;
3236 }
3237 }
3238 }
3239 }
3240
3241 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07003242 Heap* const heap_;
3243 bool* const failed_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003244};
3245
3246class VerifyLiveStackReferences {
3247 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07003248 explicit VerifyLiveStackReferences(Heap* heap)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003249 : heap_(heap),
Brian Carlstrom93ba8932013-07-17 21:31:49 -07003250 failed_(false) {}
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003251
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003252 void operator()(mirror::Object* obj) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07003253 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003254 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
Mathieu Chartier059ef3d2015-08-18 13:54:21 -07003255 obj->VisitReferences(visitor, VoidFunctor());
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003256 }
3257
3258 bool Failed() const {
3259 return failed_;
3260 }
3261
3262 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07003263 Heap* const heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003264 bool failed_;
3265};
3266
3267bool Heap::VerifyMissingCardMarks() {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08003268 Thread* self = Thread::Current();
3269 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003270 // We need to sort the live stack since we binary search it.
Ian Rogers1d54e732013-05-02 21:10:01 -07003271 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08003272 // Since we sorted the allocation stack content, need to revoke all
3273 // thread-local allocation stacks.
3274 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003275 VerifyLiveStackReferences visitor(this);
3276 GetLiveBitmap()->Visit(visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003277 // We can verify objects in the live stack since none of these should reference dead objects.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08003278 for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3279 if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3280 visitor(it->AsMirrorPtr());
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08003281 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003282 }
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07003283 return !visitor.Failed();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07003284}
3285
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003286void Heap::SwapStacks() {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08003287 if (kUseThreadLocalAllocationStack) {
3288 live_stack_->AssertAllZero();
3289 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -08003290 allocation_stack_.swap(live_stack_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07003291}
3292
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08003293void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08003294 // This must be called only during the pause.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003295 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08003296 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3297 MutexLock mu2(self, *Locks::thread_list_lock_);
3298 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3299 for (Thread* t : thread_list) {
3300 t->RevokeThreadLocalAllocationStack();
3301 }
3302}
3303
Ian Rogers68d8b422014-07-17 11:09:10 -07003304void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3305 if (kIsDebugBuild) {
3306 if (rosalloc_space_ != nullptr) {
3307 rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3308 }
3309 if (bump_pointer_space_ != nullptr) {
3310 bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3311 }
3312 }
3313}
3314
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07003315void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3316 if (kIsDebugBuild) {
3317 if (bump_pointer_space_ != nullptr) {
3318 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3319 }
3320 }
3321}
3322
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003323accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3324 auto it = mod_union_tables_.find(space);
3325 if (it == mod_union_tables_.end()) {
3326 return nullptr;
3327 }
3328 return it->second;
3329}
3330
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003331accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3332 auto it = remembered_sets_.find(space);
3333 if (it == remembered_sets_.end()) {
3334 return nullptr;
3335 }
3336 return it->second;
3337}
3338
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003339void Heap::ProcessCards(TimingLogger* timings,
3340 bool use_rem_sets,
3341 bool process_alloc_space_cards,
Lei Li4add3b42015-01-15 11:55:26 +08003342 bool clear_alloc_space_cards) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003343 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Ian Rogers1d54e732013-05-02 21:10:01 -07003344 // Clear cards and keep track of cards cleared in the mod-union table.
Mathieu Chartier02e25112013-08-14 16:14:24 -07003345 for (const auto& space : continuous_spaces_) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003346 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003347 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003348 if (table != nullptr) {
3349 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3350 "ImageModUnionClearCards";
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003351 TimingLogger::ScopedTiming t2(name, timings);
Mathieu Chartier6e6078a2016-10-24 15:45:41 -07003352 table->ProcessCards();
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003353 } else if (use_rem_sets && rem_set != nullptr) {
3354 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
3355 << static_cast<int>(collector_type_);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003356 TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003357 rem_set->ClearCards();
Lei Li4add3b42015-01-15 11:55:26 +08003358 } else if (process_alloc_space_cards) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003359 TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
Lei Li4add3b42015-01-15 11:55:26 +08003360 if (clear_alloc_space_cards) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -08003361 uint8_t* end = space->End();
3362 if (space->IsImageSpace()) {
3363 // Image space end is the end of the mirror objects, it is not necessarily page or card
3364 // aligned. Align up so that the check in ClearCardRange does not fail.
3365 end = AlignUp(end, accounting::CardTable::kCardSize);
3366 }
3367 card_table_->ClearCardRange(space->Begin(), end);
Lei Li4add3b42015-01-15 11:55:26 +08003368 } else {
3369 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3370 // cards were dirty before the GC started.
3371 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3372 // -> clean(cleaning thread).
3373 // The races are we either end up with: Aged card, unaged card. Since we have the
3374 // checkpoint roots and then we scan / update mod union tables after. We will always
3375 // scan either card. If we end up with the non aged card, we scan it it in the pause.
3376 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3377 VoidFunctor());
3378 }
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07003379 }
3380 }
3381}
3382
Mathieu Chartier97509952015-07-13 14:35:43 -07003383struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
Roland Levillainf73caca2018-08-24 17:19:07 +01003384 mirror::Object* MarkObject(mirror::Object* obj) override {
Mathieu Chartier97509952015-07-13 14:35:43 -07003385 return obj;
3386 }
Roland Levillainf73caca2018-08-24 17:19:07 +01003387 void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) override {
Mathieu Chartier97509952015-07-13 14:35:43 -07003388 }
3389};
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003390
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003391void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3392 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003393 TimingLogger* const timings = current_gc_iteration_.GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003394 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003395 if (verify_pre_gc_heap_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003396 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003397 size_t failures = VerifyHeapReferences();
3398 if (failures > 0) {
3399 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3400 << " failures";
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003401 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08003402 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003403 // Check that all objects which reference things in the live stack are on dirty cards.
3404 if (verify_missing_card_marks_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003405 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003406 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003407 SwapStacks();
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003408 // Sort the live stack so that we can quickly binary search it later.
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07003409 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3410 << " missing card mark verification failed\n" << DumpSpaces();
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003411 SwapStacks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003412 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003413 if (verify_mod_union_table_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003414 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003415 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003416 for (const auto& table_pair : mod_union_tables_) {
3417 accounting::ModUnionTable* mod_union_table = table_pair.second;
Mathieu Chartier97509952015-07-13 14:35:43 -07003418 IdentityMarkHeapReferenceVisitor visitor;
3419 mod_union_table->UpdateAndMarkReferences(&visitor);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003420 mod_union_table->Verify();
3421 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003422 }
3423}
3424
3425void Heap::PreGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07003426 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
Andreas Gampe4934eb12017-01-30 13:15:26 -08003427 collector::GarbageCollector::ScopedPause pause(gc, false);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003428 PreGcVerificationPaused(gc);
3429 }
3430}
3431
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003432void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003433 // TODO: Add a new runtime option for this?
3434 if (verify_pre_gc_rosalloc_) {
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003435 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003436 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08003437}
3438
Ian Rogers1d54e732013-05-02 21:10:01 -07003439void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003440 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003441 TimingLogger* const timings = current_gc_iteration_.GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003442 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003443 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3444 // reachable objects.
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003445 if (verify_pre_sweeping_heap_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003446 TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
Ian Rogers1d54e732013-05-02 21:10:01 -07003447 CHECK_NE(self->GetState(), kRunnable);
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -08003448 {
3449 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3450 // Swapping bound bitmaps does nothing.
3451 gc->SwapBitmaps();
3452 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07003453 // Pass in false since concurrent reference processing can mean that the reference referents
3454 // may point to dead objects at the point which PreSweepingGcVerification is called.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003455 size_t failures = VerifyHeapReferences(false);
3456 if (failures > 0) {
3457 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3458 << " failures";
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003459 }
Hiroshi Yamauchi0c8c3032015-01-16 16:54:35 -08003460 {
3461 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3462 gc->SwapBitmaps();
3463 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003464 }
3465 if (verify_pre_sweeping_rosalloc_) {
3466 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3467 }
3468}
3469
3470void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3471 // Only pause if we have to do some verification.
3472 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003473 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003474 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003475 if (verify_system_weaks_) {
3476 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3477 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3478 mark_sweep->VerifySystemWeaks();
3479 }
3480 if (verify_post_gc_rosalloc_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003481 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003482 }
3483 if (verify_post_gc_heap_) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08003484 TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07003485 size_t failures = VerifyHeapReferences();
3486 if (failures > 0) {
3487 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3488 << " failures";
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003489 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003490 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08003491}
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003492
Ian Rogers1d54e732013-05-02 21:10:01 -07003493void Heap::PostGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003494 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
Andreas Gampe4934eb12017-01-30 13:15:26 -08003495 collector::GarbageCollector::ScopedPause pause(gc, false);
Mathieu Chartierd35326f2014-08-18 15:02:59 -07003496 PostGcVerificationPaused(gc);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003497 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07003498}
3499
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003500void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07003501 TimingLogger::ScopedTiming t(name, timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07003502 for (const auto& space : continuous_spaces_) {
3503 if (space->IsRosAllocSpace()) {
3504 VLOG(heap) << name << " : " << space->GetName();
3505 space->AsRosAllocSpace()->Verify();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08003506 }
3507 }
3508}
3509
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003510collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08003511 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003512 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003513 return WaitForGcToCompleteLocked(cause, self);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003514}
3515
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003516collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003517 collector::GcType last_gc_type = collector::kGcTypeNone;
Mathieu Chartier40112dd2017-06-26 17:49:09 -07003518 GcCause last_gc_cause = kGcCauseNone;
Mathieu Chartier590fee92013-09-13 13:46:47 -07003519 uint64_t wait_start = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08003520 while (collector_type_running_ != kCollectorTypeNone) {
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07003521 if (self != task_processor_->GetRunningThread()) {
3522 // The current thread is about to wait for a currently running
3523 // collection to finish. If the waiting thread is not the heap
3524 // task daemon thread, the currently running collection is
3525 // considered as a blocking GC.
3526 running_collection_is_blocking_ = true;
3527 VLOG(gc) << "Waiting for a blocking GC " << cause;
3528 }
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08003529 ScopedTrace trace("GC: Wait For Completion");
Mathieu Chartier590fee92013-09-13 13:46:47 -07003530 // We must wait, change thread state then sleep on gc_complete_cond_;
3531 gc_complete_cond_->Wait(self);
3532 last_gc_type = last_gc_type_;
Mathieu Chartier40112dd2017-06-26 17:49:09 -07003533 last_gc_cause = last_gc_cause_;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07003534 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07003535 uint64_t wait_time = NanoTime() - wait_start;
3536 total_wait_time_ += wait_time;
3537 if (wait_time > long_pause_log_threshold_) {
Mathieu Chartier40112dd2017-06-26 17:49:09 -07003538 LOG(INFO) << "WaitForGcToComplete blocked " << cause << " on " << last_gc_cause << " for "
3539 << PrettyDuration(wait_time);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003540 }
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07003541 if (self != task_processor_->GetRunningThread()) {
3542 // The current thread is about to run a collection. If the thread
3543 // is not the heap task daemon thread, it's considered as a
3544 // blocking GC (i.e., blocking itself).
3545 running_collection_is_blocking_ = true;
Mathieu Chartierb166f412017-04-25 16:31:20 -07003546 // Don't log fake "GC" types that are only used for debugger or hidden APIs. If we log these,
3547 // it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
3548 if (cause == kGcCauseForAlloc ||
3549 cause == kGcCauseForNativeAlloc ||
3550 cause == kGcCauseDisableMovingGc) {
3551 VLOG(gc) << "Starting a blocking GC " << cause;
3552 }
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07003553 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07003554 return last_gc_type;
Carl Shapiro69759ea2011-07-21 18:13:35 -07003555}
3556
Elliott Hughesc967f782012-04-16 10:23:15 -07003557void Heap::DumpForSigQuit(std::ostream& os) {
Ian Rogers1d54e732013-05-02 21:10:01 -07003558 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
Mathieu Chartier2fde5332012-09-14 14:51:54 -07003559 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -07003560 DumpGcPerformanceInfo(os);
Elliott Hughesc967f782012-04-16 10:23:15 -07003561}
3562
3563size_t Heap::GetPercentFree() {
Hans Boehmc220f982018-10-12 16:15:45 -07003564 return static_cast<size_t>(100.0f * static_cast<float>(
3565 GetFreeMemory()) / target_footprint_.load(std::memory_order_relaxed));
Elliott Hughesc967f782012-04-16 10:23:15 -07003566}
3567
Hans Boehmc220f982018-10-12 16:15:45 -07003568void Heap::SetIdealFootprint(size_t target_footprint) {
3569 if (target_footprint > GetMaxMemory()) {
3570 VLOG(gc) << "Clamp target GC heap from " << PrettySize(target_footprint) << " to "
Mathieu Chartier2fde5332012-09-14 14:51:54 -07003571 << PrettySize(GetMaxMemory());
Hans Boehmc220f982018-10-12 16:15:45 -07003572 target_footprint = GetMaxMemory();
Mathieu Chartier2fde5332012-09-14 14:51:54 -07003573 }
Hans Boehmc220f982018-10-12 16:15:45 -07003574 target_footprint_.store(target_footprint, std::memory_order_relaxed);
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07003575}
3576
Mathieu Chartier0795f232016-09-27 18:43:30 -07003577bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003578 if (kMovingCollector) {
Mathieu Chartier1cc62e42016-10-03 18:01:28 -07003579 space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
Mathieu Chartier31f44142014-04-08 14:40:03 -07003580 if (space != nullptr) {
3581 // TODO: Check large object?
3582 return space->CanMoveObjects();
Mathieu Chartier590fee92013-09-13 13:46:47 -07003583 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07003584 }
3585 return false;
3586}
3587
Mathieu Chartierafe49982014-03-27 10:55:04 -07003588collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
Albert Mingkun Yang1c42e752018-11-19 16:10:24 +00003589 for (auto* collector : garbage_collectors_) {
Mathieu Chartierafe49982014-03-27 10:55:04 -07003590 if (collector->GetCollectorType() == collector_type_ &&
3591 collector->GetGcType() == gc_type) {
3592 return collector;
3593 }
3594 }
3595 return nullptr;
3596}
3597
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003598double Heap::HeapGrowthMultiplier() const {
3599 // If we don't care about pause times we are background, so return 1.0.
Mathieu Chartier11c273d2017-10-15 20:54:45 -07003600 if (!CareAboutPauseTimes()) {
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003601 return 1.0;
3602 }
3603 return foreground_heap_growth_multiplier_;
3604}
3605
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003606void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
Hans Boehmc220f982018-10-12 16:15:45 -07003607 size_t bytes_allocated_before_gc) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07003608 // We know what our utilization is at this moment.
3609 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
Hans Boehmc220f982018-10-12 16:15:45 -07003610 const size_t bytes_allocated = GetBytesAllocated();
Mathieu Chartier34afcde2017-06-30 15:31:11 -07003611 // Trace the new heap size after the GC is finished.
3612 TraceHeapSize(bytes_allocated);
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003613 uint64_t target_size;
Mathieu Chartierafe49982014-03-27 10:55:04 -07003614 collector::GcType gc_type = collector_ran->GetGcType();
Roland Levillain2ae376f2018-01-30 11:35:11 +00003615 // Use the multiplier to grow more for foreground.
Roland Levillainc5249b82018-08-15 17:43:34 +01003616 const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
3617 // foreground.
Hans Boehmc220f982018-10-12 16:15:45 -07003618 const size_t adjusted_min_free = static_cast<size_t>(min_free_ * multiplier);
3619 const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier);
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003620 if (gc_type != collector::kGcTypeSticky) {
3621 // Grow the heap for non sticky GC.
Hans Boehmc220f982018-10-12 16:15:45 -07003622 uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0);
3623 DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated
3624 << " target_utilization_=" << target_utilization_;
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07003625 target_size = bytes_allocated + delta * multiplier;
Hans Boehmc220f982018-10-12 16:15:45 -07003626 target_size = std::min(target_size,
3627 static_cast<uint64_t>(bytes_allocated + adjusted_max_free));
3628 target_size = std::max(target_size,
3629 static_cast<uint64_t>(bytes_allocated + adjusted_min_free));
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003630 next_gc_type_ = collector::kGcTypeSticky;
3631 } else {
Richard Uhlercaaa2b02017-02-01 09:54:17 +00003632 collector::GcType non_sticky_gc_type = NonStickyGcType();
Mathieu Chartierafe49982014-03-27 10:55:04 -07003633 // Find what the next non sticky collector will be.
3634 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
Mathieu Chartier8d1a9962016-08-17 16:39:45 -07003635 if (kEnableGenerationalConcurrentCopyingCollection) {
3636 if (non_sticky_collector == nullptr) {
3637 non_sticky_collector = FindCollectorByGcType(collector::kGcTypePartial);
3638 }
3639 CHECK(non_sticky_collector != nullptr);
3640 }
Mathieu Chartierafe49982014-03-27 10:55:04 -07003641 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3642 // do another sticky collection next.
3643 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
3644 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3645 // if the sticky GC throughput always remained >= the full/partial throughput.
Hans Boehmc220f982018-10-12 16:15:45 -07003646 size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003647 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
Mathieu Chartierafe49982014-03-27 10:55:04 -07003648 non_sticky_collector->GetEstimatedMeanThroughput() &&
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07003649 non_sticky_collector->NumberOfIterations() > 0 &&
Hans Boehmc220f982018-10-12 16:15:45 -07003650 bytes_allocated <= target_footprint) {
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003651 next_gc_type_ = collector::kGcTypeSticky;
3652 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07003653 next_gc_type_ = non_sticky_gc_type;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003654 }
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003655 // If we have freed enough memory, shrink the heap back down.
Hans Boehmc220f982018-10-12 16:15:45 -07003656 if (bytes_allocated + adjusted_max_free < target_footprint) {
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003657 target_size = bytes_allocated + adjusted_max_free;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003658 } else {
Hans Boehmc220f982018-10-12 16:15:45 -07003659 target_size = std::max(bytes_allocated, target_footprint);
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07003660 }
3661 }
Hans Boehmc220f982018-10-12 16:15:45 -07003662 CHECK_LE(target_size, std::numeric_limits<size_t>::max());
3663 if (!ignore_target_footprint_) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003664 SetIdealFootprint(target_size);
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07003665 if (IsGcConcurrent()) {
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003666 const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003667 current_gc_iteration_.GetFreedLargeObjectBytes() +
3668 current_gc_iteration_.GetFreedRevokeBytes();
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003669 // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3670 // how many bytes were allocated during the GC we need to add freed_bytes back on.
3671 CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
Hans Boehmc220f982018-10-12 16:15:45 -07003672 const size_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08003673 bytes_allocated_before_gc;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003674 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003675 // Estimate how many remaining bytes we will have when we need to start the next GC.
Lokesh Gidra1144b632018-01-18 10:12:38 -08003676 size_t remaining_bytes = bytes_allocated_during_gc;
Mathieu Chartier74762802014-01-24 10:21:35 -08003677 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003678 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
Hans Boehmc220f982018-10-12 16:15:45 -07003679 size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
3680 if (UNLIKELY(remaining_bytes > target_footprint)) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003681 // A never going to happen situation that from the estimated allocation rate we will exceed
3682 // the applications entire footprint with the given estimated allocation rate. Schedule
Mathieu Chartier74762802014-01-24 10:21:35 -08003683 // another GC nearly straight away.
Hans Boehmc220f982018-10-12 16:15:45 -07003684 remaining_bytes = std::min(kMinConcurrentRemainingBytes, target_footprint);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07003685 }
Hans Boehmc220f982018-10-12 16:15:45 -07003686 DCHECK_LE(target_footprint_.load(std::memory_order_relaxed), GetMaxMemory());
Mathieu Chartier74762802014-01-24 10:21:35 -08003687 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3688 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3689 // right away.
Hans Boehmc220f982018-10-12 16:15:45 -07003690 concurrent_start_bytes_ = std::max(target_footprint - remaining_bytes, bytes_allocated);
Mathieu Chartier65db8802012-11-20 12:36:46 -08003691 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08003692 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07003693}
3694
Mathieu Chartier379d09f2015-01-08 11:28:13 -08003695void Heap::ClampGrowthLimit() {
Mathieu Chartierddac4232015-04-02 10:08:03 -07003696 // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08003697 ScopedObjectAccess soa(Thread::Current());
3698 WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
Mathieu Chartier379d09f2015-01-08 11:28:13 -08003699 capacity_ = growth_limit_;
3700 for (const auto& space : continuous_spaces_) {
3701 if (space->IsMallocSpace()) {
3702 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3703 malloc_space->ClampGrowthLimit();
3704 }
3705 }
Lokesh Gidra5f0b71a2018-02-06 18:01:35 -08003706 if (collector_type_ == kCollectorTypeCC) {
3707 DCHECK(region_space_ != nullptr);
3708 // Twice the capacity as CC needs extra space for evacuating objects.
3709 region_space_->ClampGrowthLimit(2 * capacity_);
3710 }
Mathieu Chartier379d09f2015-01-08 11:28:13 -08003711 // This space isn't added for performance reasons.
3712 if (main_space_backup_.get() != nullptr) {
3713 main_space_backup_->ClampGrowthLimit();
3714 }
3715}
3716
jeffhaoc1160702011-10-27 15:48:45 -07003717void Heap::ClearGrowthLimit() {
Hans Boehmc220f982018-10-12 16:15:45 -07003718 if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_
3719 && growth_limit_ < capacity_) {
3720 target_footprint_.store(capacity_, std::memory_order_relaxed);
Mathieu Chartiera98a2822017-05-24 16:14:10 -07003721 concurrent_start_bytes_ =
Hans Boehmc220f982018-10-12 16:15:45 -07003722 UnsignedDifference(capacity_, kMinConcurrentRemainingBytes);
Mathieu Chartiera98a2822017-05-24 16:14:10 -07003723 }
Mathieu Chartier80de7a62012-11-27 17:21:50 -08003724 growth_limit_ = capacity_;
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08003725 ScopedObjectAccess soa(Thread::Current());
Mathieu Chartier0310da52014-12-01 13:40:48 -08003726 for (const auto& space : continuous_spaces_) {
3727 if (space->IsMallocSpace()) {
3728 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3729 malloc_space->ClearGrowthLimit();
3730 malloc_space->SetFootprintLimit(malloc_space->Capacity());
3731 }
3732 }
3733 // This space isn't added for performance reasons.
3734 if (main_space_backup_.get() != nullptr) {
3735 main_space_backup_->ClearGrowthLimit();
3736 main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3737 }
jeffhaoc1160702011-10-27 15:48:45 -07003738}
3739
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003740void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003741 ScopedObjectAccess soa(self);
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07003742 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
Ian Rogers53b8b092014-03-13 23:45:53 -07003743 jvalue args[1];
3744 args[0].l = arg.get();
3745 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07003746 // Restore object in case it gets moved.
Mathieu Chartier28bd2e42016-10-04 13:54:57 -07003747 *object = soa.Decode<mirror::Object>(arg.get());
Ian Rogers00f7d0e2012-07-19 15:28:27 -07003748}
3749
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003750void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
3751 bool force_full,
3752 ObjPtr<mirror::Object>* obj) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003753 StackHandleScope<1> hs(self);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07003754 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
Mathieu Chartier35b59a22017-04-17 15:24:43 -07003755 RequestConcurrentGC(self, kGcCauseBackground, force_full);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07003756}
3757
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003758class Heap::ConcurrentGCTask : public HeapTask {
3759 public:
Mathieu Chartier35b59a22017-04-17 15:24:43 -07003760 ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
3761 : HeapTask(target_time), cause_(cause), force_full_(force_full) {}
Roland Levillainf73caca2018-08-24 17:19:07 +01003762 void Run(Thread* self) override {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003763 gc::Heap* heap = Runtime::Current()->GetHeap();
Mathieu Chartier35b59a22017-04-17 15:24:43 -07003764 heap->ConcurrentGC(self, cause_, force_full_);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003765 heap->ClearConcurrentGCRequest();
Ian Rogers120f1c72012-09-28 17:17:10 -07003766 }
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003767
3768 private:
Mathieu Chartier35b59a22017-04-17 15:24:43 -07003769 const GcCause cause_;
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003770 const bool force_full_; // If true, force full (or partial) collection.
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003771};
3772
Mathieu Chartier90443472015-07-16 20:32:27 -07003773static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003774 Runtime* runtime = Runtime::Current();
3775 return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3776 !self->IsHandlingStackOverflow();
3777}
3778
3779void Heap::ClearConcurrentGCRequest() {
Orion Hodson88591fe2018-03-06 13:35:43 +00003780 concurrent_gc_pending_.store(false, std::memory_order_relaxed);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003781}
3782
Mathieu Chartier35b59a22017-04-17 15:24:43 -07003783void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) {
Mathieu Chartierac195162015-02-20 18:44:28 +00003784 if (CanAddHeapTask(self) &&
Orion Hodson4557b382018-01-03 11:47:54 +00003785 concurrent_gc_pending_.CompareAndSetStrongSequentiallyConsistent(false, true)) {
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003786 task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
Mathieu Chartier35b59a22017-04-17 15:24:43 -07003787 cause,
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003788 force_full));
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003789 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07003790}
3791
Mathieu Chartier35b59a22017-04-17 15:24:43 -07003792void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003793 if (!Runtime::Current()->IsShuttingDown(self)) {
3794 // Wait for any GCs currently running to finish.
Mathieu Chartier35b59a22017-04-17 15:24:43 -07003795 if (WaitForGcToComplete(cause, self) == collector::kGcTypeNone) {
Roland Levillainb81e9e92017-04-20 17:35:32 +01003796 // If the we can't run the GC type we wanted to run, find the next appropriate one and try
3797 // that instead. E.g. can't do partial, so do full instead.
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003798 collector::GcType next_gc_type = next_gc_type_;
3799 // If forcing full and next gc type is sticky, override with a non-sticky type.
3800 if (force_full && next_gc_type == collector::kGcTypeSticky) {
Richard Uhlercaaa2b02017-02-01 09:54:17 +00003801 next_gc_type = NonStickyGcType();
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003802 }
Mathieu Chartier35b59a22017-04-17 15:24:43 -07003803 if (CollectGarbageInternal(next_gc_type, cause, false) == collector::kGcTypeNone) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003804 for (collector::GcType gc_type : gc_plan_) {
3805 // Attempt to run the collector, if we succeed, we are done.
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -07003806 if (gc_type > next_gc_type &&
Mathieu Chartier35b59a22017-04-17 15:24:43 -07003807 CollectGarbageInternal(gc_type, cause, false) != collector::kGcTypeNone) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003808 break;
3809 }
Mathieu Chartierf9ed0d32013-11-21 16:42:47 -08003810 }
3811 }
3812 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07003813 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07003814}
3815
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003816class Heap::CollectorTransitionTask : public HeapTask {
3817 public:
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07003818 explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
3819
Roland Levillainf73caca2018-08-24 17:19:07 +01003820 void Run(Thread* self) override {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003821 gc::Heap* heap = Runtime::Current()->GetHeap();
3822 heap->DoPendingCollectorTransition();
3823 heap->ClearPendingCollectorTransition(self);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003824 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003825};
3826
3827void Heap::ClearPendingCollectorTransition(Thread* self) {
3828 MutexLock mu(self, *pending_task_lock_);
3829 pending_collector_transition_ = nullptr;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003830}
3831
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003832void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3833 Thread* self = Thread::Current();
3834 desired_collector_type_ = desired_collector_type;
3835 if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3836 return;
3837 }
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -07003838 if (collector_type_ == kCollectorTypeCC) {
3839 // For CC, we invoke a full compaction when going to the background, but the collector type
3840 // doesn't change.
3841 DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground);
3842 }
3843 DCHECK_NE(collector_type_, kCollectorTypeCCBackground);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003844 CollectorTransitionTask* added_task = nullptr;
3845 const uint64_t target_time = NanoTime() + delta_time;
3846 {
3847 MutexLock mu(self, *pending_task_lock_);
3848 // If we have an existing collector transition, update the targe time to be the new target.
3849 if (pending_collector_transition_ != nullptr) {
3850 task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3851 return;
3852 }
3853 added_task = new CollectorTransitionTask(target_time);
3854 pending_collector_transition_ = added_task;
3855 }
3856 task_processor_->AddTask(self, added_task);
3857}
3858
3859class Heap::HeapTrimTask : public HeapTask {
3860 public:
3861 explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
Roland Levillainf73caca2018-08-24 17:19:07 +01003862 void Run(Thread* self) override {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003863 gc::Heap* heap = Runtime::Current()->GetHeap();
3864 heap->Trim(self);
3865 heap->ClearPendingTrim(self);
3866 }
3867};
3868
3869void Heap::ClearPendingTrim(Thread* self) {
3870 MutexLock mu(self, *pending_task_lock_);
3871 pending_heap_trim_ = nullptr;
3872}
3873
3874void Heap::RequestTrim(Thread* self) {
3875 if (!CanAddHeapTask(self)) {
3876 return;
3877 }
Ian Rogers48931882013-01-22 14:35:16 -08003878 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3879 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3880 // a space it will hold its lock and can become a cause of jank.
3881 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3882 // forking.
3883
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08003884 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3885 // because that only marks object heads, so a large array looks like lots of empty space. We
3886 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3887 // to utilization (which is probably inversely proportional to how much benefit we can expect).
3888 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3889 // not how much use we're making of those pages.
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003890 HeapTrimTask* added_task = nullptr;
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07003891 {
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003892 MutexLock mu(self, *pending_task_lock_);
3893 if (pending_heap_trim_ != nullptr) {
3894 // Already have a heap trim request in task processor, ignore this request.
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07003895 return;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003896 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003897 added_task = new HeapTrimTask(kHeapTrimWait);
3898 pending_heap_trim_ = added_task;
Mathieu Chartierc39e3422013-08-07 16:41:36 -07003899 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -08003900 task_processor_->AddTask(self, added_task);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003901}
3902
Orion Hodson82cf9a22018-03-27 16:36:32 +01003903void Heap::IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke) {
3904 size_t previous_num_bytes_freed_revoke =
Hans Boehmfb8b4e22018-09-05 16:45:42 -07003905 num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_relaxed);
Orion Hodson82cf9a22018-03-27 16:36:32 +01003906 // Check the updated value is less than the number of bytes allocated. There is a risk of
3907 // execution being suspended between the increment above and the CHECK below, leading to
3908 // the use of previous_num_bytes_freed_revoke in the comparison.
3909 CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed),
3910 previous_num_bytes_freed_revoke + freed_bytes_revoke);
3911}
3912
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003913void Heap::RevokeThreadLocalBuffers(Thread* thread) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003914 if (rosalloc_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003915 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3916 if (freed_bytes_revoke > 0U) {
Orion Hodson82cf9a22018-03-27 16:36:32 +01003917 IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003918 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003919 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003920 if (bump_pointer_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003921 CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003922 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003923 if (region_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003924 CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003925 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003926}
3927
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07003928void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3929 if (rosalloc_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003930 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3931 if (freed_bytes_revoke > 0U) {
Orion Hodson82cf9a22018-03-27 16:36:32 +01003932 IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003933 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07003934 }
3935}
3936
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003937void Heap::RevokeAllThreadLocalBuffers() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003938 if (rosalloc_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003939 size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3940 if (freed_bytes_revoke > 0U) {
Orion Hodson82cf9a22018-03-27 16:36:32 +01003941 IncrementNumberOfBytesFreedRevoke(freed_bytes_revoke);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003942 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003943 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003944 if (bump_pointer_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003945 CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003946 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003947 if (region_space_ != nullptr) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07003948 CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08003949 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003950}
3951
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003952bool Heap::IsGCRequestPending() const {
Orion Hodson88591fe2018-03-06 13:35:43 +00003953 return concurrent_gc_pending_.load(std::memory_order_relaxed);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003954}
3955
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -07003956void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3957 env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3958 WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3959 static_cast<jlong>(timeout));
Mathieu Chartier590fee92013-09-13 13:46:47 -07003960}
3961
Hans Boehmc220f982018-10-12 16:15:45 -07003962// For GC triggering purposes, we count old (pre-last-GC) and new native allocations as
3963// different fractions of Java allocations.
3964// For now, we essentially do not count old native allocations at all, so that we can preserve the
3965// existing behavior of not limiting native heap size. If we seriously considered it, we would
3966// have to adjust collection thresholds when we encounter large amounts of old native memory,
3967// and handle native out-of-memory situations.
Richard Uhler36bdbd22017-01-24 14:17:05 +00003968
Hans Boehmc220f982018-10-12 16:15:45 -07003969static constexpr size_t kOldNativeDiscountFactor = 65536; // Approximately infinite for now.
3970static constexpr size_t kNewNativeDiscountFactor = 2;
3971
3972// If weighted java + native memory use exceeds our target by kStopForNativeFactor, and
3973// newly allocated memory exceeds kHugeNativeAlloc, we wait for GC to complete to avoid
3974// running out of memory.
3975static constexpr float kStopForNativeFactor = 2.0;
3976static constexpr size_t kHugeNativeAllocs = 200*1024*1024;
3977
3978// Return the ratio of the weighted native + java allocated bytes to its target value.
3979// A return value > 1.0 means we should collect. Significantly larger values mean we're falling
3980// behind.
3981inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes) {
3982 // Collection check for native allocation. Does not enforce Java heap bounds.
3983 // With adj_start_bytes defined below, effectively checks
3984 // <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes,
3985 // where c3 > 1, and currently c1 and c2 are 1 divided by the values defined above.
3986 size_t old_native_bytes = old_native_bytes_allocated_.load(std::memory_order_relaxed);
3987 if (old_native_bytes > current_native_bytes) {
3988 // Net decrease; skip the check, but update old value.
3989 // It's OK to lose an update if two stores race.
3990 old_native_bytes_allocated_.store(current_native_bytes, std::memory_order_relaxed);
3991 return 0.0;
3992 } else {
3993 size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes);
3994 size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor
3995 + old_native_bytes / kOldNativeDiscountFactor;
3996 size_t adj_start_bytes = concurrent_start_bytes_
3997 + NativeAllocationGcWatermark() / kNewNativeDiscountFactor;
3998 return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
3999 / static_cast<float>(adj_start_bytes);
4000 }
4001}
4002
4003inline void Heap::CheckConcurrentGCForNative(Thread* self) {
4004 size_t current_native_bytes = GetNativeBytes();
4005 float gc_urgency = NativeMemoryOverTarget(current_native_bytes);
4006 if (UNLIKELY(gc_urgency >= 1.0)) {
Richard Uhlercaaa2b02017-02-01 09:54:17 +00004007 if (IsGcConcurrent()) {
Hans Boehmc220f982018-10-12 16:15:45 -07004008 RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true);
4009 if (gc_urgency > kStopForNativeFactor
4010 && current_native_bytes > kHugeNativeAllocs) {
4011 // We're in danger of running out of memory due to rampant native allocation.
4012 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
4013 LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency;
4014 }
4015 WaitForGcToComplete(kGcCauseForAlloc, self);
4016 }
Richard Uhlercaaa2b02017-02-01 09:54:17 +00004017 } else {
4018 CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
4019 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07004020 }
4021}
4022
Hans Boehmc220f982018-10-12 16:15:45 -07004023// About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect.
4024void Heap::NotifyNativeAllocations(JNIEnv* env) {
4025 native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed);
4026 CheckConcurrentGCForNative(ThreadForEnv(env));
4027}
4028
4029// Register a native allocation with an explicit size.
4030// This should only be done for large allocations of non-malloc memory, which we wouldn't
4031// otherwise see.
4032void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
4033 native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed);
4034 uint32_t objects_notified =
4035 native_objects_notified_.fetch_add(1, std::memory_order_relaxed);
4036 if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1
4037 || bytes > kCheckImmediatelyThreshold) {
4038 CheckConcurrentGCForNative(ThreadForEnv(env));
Richard Uhlercaaa2b02017-02-01 09:54:17 +00004039 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07004040}
4041
Hans Boehmc220f982018-10-12 16:15:45 -07004042void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
4043 size_t allocated;
4044 size_t new_freed_bytes;
4045 do {
4046 allocated = native_bytes_registered_.load(std::memory_order_relaxed);
4047 new_freed_bytes = std::min(allocated, bytes);
4048 // We should not be registering more free than allocated bytes.
4049 // But correctly keep going in non-debug builds.
4050 DCHECK_EQ(new_freed_bytes, bytes);
4051 } while (!native_bytes_registered_.CompareAndSetWeakRelaxed(allocated,
4052 allocated - new_freed_bytes));
4053}
4054
Ian Rogersef7d42f2014-01-06 12:55:46 -08004055size_t Heap::GetTotalMemory() const {
Hans Boehmc220f982018-10-12 16:15:45 -07004056 return std::max(target_footprint_.load(std::memory_order_relaxed), GetBytesAllocated());
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07004057}
4058
Mathieu Chartier11409ae2013-09-23 11:49:36 -07004059void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
4060 DCHECK(mod_union_table != nullptr);
4061 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
4062}
4063
Mathieu Chartier9d156d52016-10-06 17:44:26 -07004064void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
Mathieu Chartierdf7f7f02017-10-05 09:47:58 -07004065 // Compare rounded sizes since the allocation may have been retried after rounding the size.
4066 // See b/37885600
Mathieu Chartiera5eae692014-12-17 17:56:03 -08004067 CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
Mathieu Chartieraac90122017-10-04 14:58:34 -07004068 (c->IsVariableSize() ||
4069 RoundUp(c->GetObjectSize(), kObjectAlignment) ==
4070 RoundUp(byte_count, kObjectAlignment)))
Mathieu Chartier8876fb72017-02-24 12:39:53 -08004071 << "ClassFlags=" << c->GetClassFlags()
4072 << " IsClassClass=" << c->IsClassClass()
4073 << " byte_count=" << byte_count
4074 << " IsVariableSize=" << c->IsVariableSize()
4075 << " ObjectSize=" << c->GetObjectSize()
4076 << " sizeof(Class)=" << sizeof(mirror::Class)
Andreas Gampe98ea9d92018-10-19 14:06:15 -07004077 << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass");
Mathieu Chartierc645f1d2014-03-06 18:11:53 -08004078 CHECK_GE(byte_count, sizeof(mirror::Object));
4079}
4080
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08004081void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
4082 CHECK(remembered_set != nullptr);
4083 space::Space* space = remembered_set->GetSpace();
4084 CHECK(space != nullptr);
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07004085 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08004086 remembered_sets_.Put(space, remembered_set);
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07004087 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08004088}
4089
4090void Heap::RemoveRememberedSet(space::Space* space) {
4091 CHECK(space != nullptr);
4092 auto it = remembered_sets_.find(space);
4093 CHECK(it != remembered_sets_.end());
Mathieu Chartier5189e242014-07-24 11:11:05 -07004094 delete it->second;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08004095 remembered_sets_.erase(it);
4096 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
4097}
4098
Mathieu Chartier4aeec172014-03-27 16:09:46 -07004099void Heap::ClearMarkedObjects() {
4100 // Clear all of the spaces' mark bitmaps.
4101 for (const auto& space : GetContinuousSpaces()) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07004102 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07004103 if (space->GetLiveBitmap() != mark_bitmap) {
4104 mark_bitmap->Clear();
4105 }
4106 }
4107 // Clear the marked objects in the discontinous space object sets.
4108 for (const auto& space : GetDiscontinuousSpaces()) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07004109 space->GetMarkBitmap()->Clear();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07004110 }
4111}
4112
Man Cao8c2ff642015-05-27 17:25:30 -07004113void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
4114 allocation_records_.reset(records);
4115}
4116
Man Cao1ed11b92015-06-11 22:47:35 -07004117void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
4118 if (IsAllocTrackingEnabled()) {
4119 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4120 if (IsAllocTrackingEnabled()) {
4121 GetAllocationRecords()->VisitRoots(visitor);
4122 }
4123 }
4124}
4125
Mathieu Chartier97509952015-07-13 14:35:43 -07004126void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
Man Cao8c2ff642015-05-27 17:25:30 -07004127 if (IsAllocTrackingEnabled()) {
4128 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4129 if (IsAllocTrackingEnabled()) {
Mathieu Chartier97509952015-07-13 14:35:43 -07004130 GetAllocationRecords()->SweepAllocationRecords(visitor);
Man Cao8c2ff642015-05-27 17:25:30 -07004131 }
4132 }
4133}
4134
Man Cao42c3c332015-06-23 16:38:25 -07004135void Heap::AllowNewAllocationRecords() const {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -07004136 CHECK(!kUseReadBarrier);
Hiroshi Yamauchi6f0c6cd2016-03-18 17:17:52 -07004137 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4138 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4139 if (allocation_records != nullptr) {
4140 allocation_records->AllowNewAllocationRecords();
Man Cao42c3c332015-06-23 16:38:25 -07004141 }
4142}
4143
4144void Heap::DisallowNewAllocationRecords() const {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -07004145 CHECK(!kUseReadBarrier);
Hiroshi Yamauchi6f0c6cd2016-03-18 17:17:52 -07004146 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4147 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4148 if (allocation_records != nullptr) {
4149 allocation_records->DisallowNewAllocationRecords();
Man Cao42c3c332015-06-23 16:38:25 -07004150 }
4151}
4152
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -07004153void Heap::BroadcastForNewAllocationRecords() const {
Hiroshi Yamauchi6f0c6cd2016-03-18 17:17:52 -07004154 // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
4155 // be set to false while some threads are waiting for system weak access in
4156 // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
4157 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4158 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4159 if (allocation_records != nullptr) {
4160 allocation_records->BroadcastForNewAllocationRecords();
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -07004161 }
4162}
4163
Mathieu Chartier9d156d52016-10-06 17:44:26 -07004164void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
Vladimir Marko317892b2018-05-31 11:11:32 +01004165 DCHECK(gc_stress_mode_);
Mathieu Chartier31000802015-06-14 14:14:37 -07004166 auto* const runtime = Runtime::Current();
Vladimir Marko317892b2018-05-31 11:11:32 +01004167 if (runtime->GetClassLinker()->IsInitialized() && !runtime->IsActiveTransaction()) {
Mathieu Chartier31000802015-06-14 14:14:37 -07004168 // Check if we should GC.
4169 bool new_backtrace = false;
4170 {
4171 static constexpr size_t kMaxFrames = 16u;
Mathieu Chartier34583592017-03-23 23:51:34 -07004172 FixedSizeBacktrace<kMaxFrames> backtrace;
Andreas Gampe98ea9d92018-10-19 14:06:15 -07004173 backtrace.Collect(/* skip_count= */ 2);
Mathieu Chartier34583592017-03-23 23:51:34 -07004174 uint64_t hash = backtrace.Hash();
Mathieu Chartier31000802015-06-14 14:14:37 -07004175 MutexLock mu(self, *backtrace_lock_);
4176 new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
4177 if (new_backtrace) {
4178 seen_backtraces_.insert(hash);
4179 }
4180 }
4181 if (new_backtrace) {
4182 StackHandleScope<1> hs(self);
4183 auto h = hs.NewHandleWrapper(obj);
Andreas Gampe98ea9d92018-10-19 14:06:15 -07004184 CollectGarbage(/* clear_soft_references= */ false);
Hans Boehmfb8b4e22018-09-05 16:45:42 -07004185 unique_backtrace_count_.fetch_add(1);
Mathieu Chartier31000802015-06-14 14:14:37 -07004186 } else {
Hans Boehmfb8b4e22018-09-05 16:45:42 -07004187 seen_backtrace_count_.fetch_add(1);
Mathieu Chartier31000802015-06-14 14:14:37 -07004188 }
4189 }
4190}
4191
Mathieu Chartier51168372015-08-12 16:40:32 -07004192void Heap::DisableGCForShutdown() {
4193 Thread* const self = Thread::Current();
4194 CHECK(Runtime::Current()->IsShuttingDown(self));
4195 MutexLock mu(self, *gc_complete_lock_);
4196 gc_disabled_for_shutdown_ = true;
4197}
4198
Mathieu Chartier9d156d52016-10-06 17:44:26 -07004199bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
Mathieu Chartierfbc31082016-01-24 11:59:56 -08004200 for (gc::space::ImageSpace* space : boot_image_spaces_) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -07004201 if (space->HasAddress(obj.Ptr())) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -08004202 return true;
4203 }
4204 }
4205 return false;
4206}
4207
Mingyao Yang6ea1a0e2016-01-29 12:12:49 -08004208bool Heap::IsInBootImageOatFile(const void* p) const {
4209 for (gc::space::ImageSpace* space : boot_image_spaces_) {
4210 if (space->GetOatFile()->Contains(p)) {
4211 return true;
4212 }
4213 }
4214 return false;
4215}
4216
Mathieu Chartierfbc31082016-01-24 11:59:56 -08004217void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
4218 uint32_t* boot_image_end,
4219 uint32_t* boot_oat_begin,
4220 uint32_t* boot_oat_end) {
4221 DCHECK(boot_image_begin != nullptr);
4222 DCHECK(boot_image_end != nullptr);
4223 DCHECK(boot_oat_begin != nullptr);
4224 DCHECK(boot_oat_end != nullptr);
4225 *boot_image_begin = 0u;
4226 *boot_image_end = 0u;
4227 *boot_oat_begin = 0u;
4228 *boot_oat_end = 0u;
4229 for (gc::space::ImageSpace* space_ : GetBootImageSpaces()) {
4230 const uint32_t image_begin = PointerToLowMemUInt32(space_->Begin());
4231 const uint32_t image_size = space_->GetImageHeader().GetImageSize();
4232 if (*boot_image_begin == 0 || image_begin < *boot_image_begin) {
4233 *boot_image_begin = image_begin;
4234 }
4235 *boot_image_end = std::max(*boot_image_end, image_begin + image_size);
4236 const OatFile* boot_oat_file = space_->GetOatFile();
4237 const uint32_t oat_begin = PointerToLowMemUInt32(boot_oat_file->Begin());
4238 const uint32_t oat_size = boot_oat_file->Size();
4239 if (*boot_oat_begin == 0 || oat_begin < *boot_oat_begin) {
4240 *boot_oat_begin = oat_begin;
4241 }
4242 *boot_oat_end = std::max(*boot_oat_end, oat_begin + oat_size);
4243 }
4244}
4245
Andreas Gampe27fa96c2016-10-07 15:05:24 -07004246void Heap::SetAllocationListener(AllocationListener* l) {
4247 AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l);
4248
4249 if (old == nullptr) {
4250 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4251 }
4252}
4253
4254void Heap::RemoveAllocationListener() {
4255 AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
4256
4257 if (old != nullptr) {
Andreas Gampe172ec8e2016-10-12 13:50:20 -07004258 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
Andreas Gampe27fa96c2016-10-07 15:05:24 -07004259 }
4260}
4261
Andreas Gampe9b8c5882016-10-21 15:27:46 -07004262void Heap::SetGcPauseListener(GcPauseListener* l) {
Orion Hodson88591fe2018-03-06 13:35:43 +00004263 gc_pause_listener_.store(l, std::memory_order_relaxed);
Andreas Gampe9b8c5882016-10-21 15:27:46 -07004264}
4265
4266void Heap::RemoveGcPauseListener() {
Orion Hodson88591fe2018-03-06 13:35:43 +00004267 gc_pause_listener_.store(nullptr, std::memory_order_relaxed);
Andreas Gampe9b8c5882016-10-21 15:27:46 -07004268}
Andreas Gampe27fa96c2016-10-07 15:05:24 -07004269
Mathieu Chartier5ace2012016-11-30 10:15:41 -08004270mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
4271 size_t alloc_size,
4272 bool grow,
4273 size_t* bytes_allocated,
4274 size_t* usable_size,
4275 size_t* bytes_tl_bulk_allocated) {
4276 const AllocatorType allocator_type = GetCurrentAllocator();
Mathieu Chartier6bc77742017-04-18 17:46:23 -07004277 if (kUsePartialTlabs && alloc_size <= self->TlabRemainingCapacity()) {
4278 DCHECK_GT(alloc_size, self->TlabSize());
4279 // There is enough space if we grow the TLAB. Lets do that. This increases the
4280 // TLAB bytes.
4281 const size_t min_expand_size = alloc_size - self->TlabSize();
4282 const size_t expand_bytes = std::max(
4283 min_expand_size,
4284 std::min(self->TlabRemainingCapacity() - self->TlabSize(), kPartialTlabSize));
4285 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, expand_bytes, grow))) {
4286 return nullptr;
4287 }
4288 *bytes_tl_bulk_allocated = expand_bytes;
4289 self->ExpandTlab(expand_bytes);
4290 DCHECK_LE(alloc_size, self->TlabSize());
4291 } else if (allocator_type == kAllocatorTypeTLAB) {
Mathieu Chartier5ace2012016-11-30 10:15:41 -08004292 DCHECK(bump_pointer_space_ != nullptr);
4293 const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
4294 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) {
4295 return nullptr;
4296 }
4297 // Try allocating a new thread local buffer, if the allocation fails the space must be
4298 // full so return null.
4299 if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
4300 return nullptr;
4301 }
4302 *bytes_tl_bulk_allocated = new_tlab_size;
4303 } else {
4304 DCHECK(allocator_type == kAllocatorTypeRegionTLAB);
4305 DCHECK(region_space_ != nullptr);
4306 if (space::RegionSpace::kRegionSize >= alloc_size) {
4307 // Non-large. Check OOME for a tlab.
4308 if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type,
4309 space::RegionSpace::kRegionSize,
4310 grow))) {
Mathieu Chartier6bc77742017-04-18 17:46:23 -07004311 const size_t new_tlab_size = kUsePartialTlabs
4312 ? std::max(alloc_size, kPartialTlabSize)
4313 : gc::space::RegionSpace::kRegionSize;
Mathieu Chartier5ace2012016-11-30 10:15:41 -08004314 // Try to allocate a tlab.
Mathieu Chartier6bc77742017-04-18 17:46:23 -07004315 if (!region_space_->AllocNewTlab(self, new_tlab_size)) {
Mathieu Chartier5ace2012016-11-30 10:15:41 -08004316 // Failed to allocate a tlab. Try non-tlab.
4317 return region_space_->AllocNonvirtual<false>(alloc_size,
4318 bytes_allocated,
4319 usable_size,
4320 bytes_tl_bulk_allocated);
4321 }
Mathieu Chartier6bc77742017-04-18 17:46:23 -07004322 *bytes_tl_bulk_allocated = new_tlab_size;
Mathieu Chartier5ace2012016-11-30 10:15:41 -08004323 // Fall-through to using the TLAB below.
4324 } else {
4325 // Check OOME for a non-tlab allocation.
4326 if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) {
4327 return region_space_->AllocNonvirtual<false>(alloc_size,
4328 bytes_allocated,
4329 usable_size,
4330 bytes_tl_bulk_allocated);
4331 }
4332 // Neither tlab or non-tlab works. Give up.
4333 return nullptr;
4334 }
4335 } else {
4336 // Large. Check OOME.
4337 if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) {
4338 return region_space_->AllocNonvirtual<false>(alloc_size,
4339 bytes_allocated,
4340 usable_size,
4341 bytes_tl_bulk_allocated);
4342 }
4343 return nullptr;
4344 }
4345 }
4346 // Refilled TLAB, return.
4347 mirror::Object* ret = self->AllocTlab(alloc_size);
4348 DCHECK(ret != nullptr);
4349 *bytes_allocated = alloc_size;
4350 *usable_size = alloc_size;
4351 return ret;
4352}
4353
Mathieu Chartier1ca68902017-04-18 11:26:22 -07004354const Verification* Heap::GetVerification() const {
4355 return verification_.get();
4356}
4357
Hans Boehmc220f982018-10-12 16:15:45 -07004358void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t alloc_size) {
4359 VLOG(heap) << "Growing heap from " << PrettySize(old_footprint) << " to "
Andreas Gampe170331f2017-12-07 18:41:03 -08004360 << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
4361}
4362
Mathieu Chartiera98a2822017-05-24 16:14:10 -07004363class Heap::TriggerPostForkCCGcTask : public HeapTask {
4364 public:
4365 explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
Roland Levillainbbc6e7e2018-08-24 16:58:47 +01004366 void Run(Thread* self) override {
Mathieu Chartiera98a2822017-05-24 16:14:10 -07004367 gc::Heap* heap = Runtime::Current()->GetHeap();
Hans Boehmfb8b4e22018-09-05 16:45:42 -07004368 // Trigger a GC, if not already done. The first GC after fork, whenever it
Mathieu Chartiera98a2822017-05-24 16:14:10 -07004369 // takes place, will adjust the thresholds to normal levels.
Hans Boehmc220f982018-10-12 16:15:45 -07004370 if (heap->target_footprint_.load(std::memory_order_relaxed) == heap->growth_limit_) {
Mathieu Chartiera98a2822017-05-24 16:14:10 -07004371 heap->RequestConcurrentGC(self, kGcCauseBackground, false);
4372 }
4373 }
4374};
4375
4376void Heap::PostForkChildAction(Thread* self) {
Hans Boehmc220f982018-10-12 16:15:45 -07004377 // Temporarily increase target_footprint_ and concurrent_start_bytes_ to
Mathieu Chartiera98a2822017-05-24 16:14:10 -07004378 // max values to avoid GC during app launch.
4379 if (collector_type_ == kCollectorTypeCC && !IsLowMemoryMode()) {
Hans Boehmc220f982018-10-12 16:15:45 -07004380 // Set target_footprint_ to the largest allowed value.
Mathieu Chartiera98a2822017-05-24 16:14:10 -07004381 SetIdealFootprint(growth_limit_);
4382 // Set concurrent_start_bytes_ to half of the heap size.
Hans Boehmc220f982018-10-12 16:15:45 -07004383 size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
4384 concurrent_start_bytes_ = std::max(target_footprint / 2, GetBytesAllocated());
Mathieu Chartiera98a2822017-05-24 16:14:10 -07004385
4386 GetTaskProcessor()->AddTask(
4387 self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS)));
4388 }
4389}
4390
Ian Rogers1d54e732013-05-02 21:10:01 -07004391} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07004392} // namespace art