blob: 6a915012384e9f4263d9fd7dee444a511c09c6c5 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Mathieu Chartier752a0e62013-06-27 11:03:27 -070019#define ATRACE_TAG ATRACE_TAG_DALVIK
20#include <cutils/trace.h>
Brian Carlstrom5643b782012-02-05 12:32:53 -080021
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Ian Rogers700a4022014-05-19 16:49:03 -070023#include <memory>
Carl Shapiro58551df2011-07-24 03:09:51 -070024#include <vector>
25
Mathieu Chartierb2f99362013-11-20 17:26:00 -080026#include "base/histogram-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080027#include "base/stl_util.h"
Mathieu Chartier987ccff2013-07-08 11:05:21 -070028#include "common_throws.h"
Ian Rogers48931882013-01-22 14:35:16 -080029#include "cutils/sched_policy.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070030#include "debugger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070031#include "gc/accounting/atomic_stack.h"
32#include "gc/accounting/card_table-inl.h"
33#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070034#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070035#include "gc/accounting/mod_union_table-inl.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080036#include "gc/accounting/remembered_set.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070037#include "gc/accounting/space_bitmap-inl.h"
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070038#include "gc/collector/concurrent_copying.h"
Mathieu Chartier52e4b432014-06-10 11:22:31 -070039#include "gc/collector/mark_compact.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070040#include "gc/collector/mark_sweep-inl.h"
41#include "gc/collector/partial_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070042#include "gc/collector/semi_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070043#include "gc/collector/sticky_mark_sweep.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070044#include "gc/reference_processor.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070045#include "gc/space/bump_pointer_space.h"
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070046#include "gc/space/dlmalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070047#include "gc/space/image_space.h"
48#include "gc/space/large_object_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070049#include "gc/space/rosalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070050#include "gc/space/space-inl.h"
Mathieu Chartiera1602f22014-01-13 17:19:19 -080051#include "gc/space/zygote_space.h"
Mathieu Chartierd8891782014-03-02 13:28:37 -080052#include "entrypoints/quick/quick_alloc_entrypoints.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070053#include "heap-inl.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070054#include "image.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070055#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080056#include "mirror/class-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080057#include "mirror/object.h"
58#include "mirror/object-inl.h"
59#include "mirror/object_array-inl.h"
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070060#include "mirror/reference-inl.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080061#include "os.h"
Ian Rogers53b8b092014-03-13 23:45:53 -070062#include "reflection.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080063#include "runtime.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070064#include "ScopedLocalRef.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070065#include "scoped_thread_state_change.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070066#include "handle_scope-inl.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070067#include "thread_list.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070068#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070069
70namespace art {
Mathieu Chartier50482232013-11-21 11:48:14 -080071
Ian Rogers1d54e732013-05-02 21:10:01 -070072namespace gc {
Carl Shapiro69759ea2011-07-21 18:13:35 -070073
Mathieu Chartier91e30632014-03-25 15:58:50 -070074static constexpr size_t kCollectorTransitionStressIterations = 0;
75static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
Mathieu Chartier720ef762013-08-17 14:46:54 -070076static constexpr bool kGCALotMode = false;
77static constexpr size_t kGcAlotInterval = KB;
Ian Rogers1d54e732013-05-02 21:10:01 -070078// Minimum amount of remaining bytes before a concurrent GC is triggered.
Mathieu Chartier720ef762013-08-17 14:46:54 -070079static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
Mathieu Chartier74762802014-01-24 10:21:35 -080080static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -070081// Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
Mathieu Chartier73d1e172014-04-11 17:53:48 -070082// relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -070083// threads (lower pauses, use less memory bandwidth).
Mathieu Chartier73d1e172014-04-11 17:53:48 -070084static constexpr double kStickyGcThroughputAdjustment = 1.0;
Mathieu Chartier31f44142014-04-08 14:40:03 -070085// Whether or not we use the free list large object space.
86static constexpr bool kUseFreeListSpaceForLOS = false;
Mathieu Chartierc1790162014-05-23 10:54:50 -070087// Whether or not we compact the zygote in PreZygoteFork.
Mathieu Chartier31f44142014-04-08 14:40:03 -070088static constexpr bool kCompactZygote = kMovingCollector;
Mathieu Chartierc1790162014-05-23 10:54:50 -070089// How many reserve entries are at the end of the allocation stack, these are only needed if the
90// allocation stack overflows.
91static constexpr size_t kAllocationStackReserveSize = 1024;
92// Default mark stack size in bytes.
93static const size_t kDefaultMarkStackSize = 64 * KB;
Zuo Wangf37a88b2014-07-10 04:26:41 -070094// Define space name.
95static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
96static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
97static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
Mathieu Chartierb363f662014-07-16 13:28:58 -070098static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
Mathieu Chartier0051be62012-10-12 17:47:11 -070099
Mathieu Chartier0051be62012-10-12 17:47:11 -0700100Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700101 double target_utilization, double foreground_heap_growth_multiplier,
102 size_t capacity, size_t non_moving_space_capacity, const std::string& image_file_name,
103 const InstructionSet image_instruction_set, CollectorType foreground_collector_type,
104 CollectorType background_collector_type, size_t parallel_gc_threads,
105 size_t conc_gc_threads, bool low_memory_mode,
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800106 size_t long_pause_log_threshold, size_t long_gc_log_threshold,
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700107 bool ignore_max_footprint, bool use_tlab,
108 bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
109 bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
Zuo Wangf37a88b2014-07-10 04:26:41 -0700110 bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction_for_oom,
111 uint64_t min_interval_homogeneous_space_compaction_by_oom)
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800112 : non_moving_space_(nullptr),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800113 rosalloc_space_(nullptr),
114 dlmalloc_space_(nullptr),
Mathieu Chartierfc5b5282014-01-09 16:15:36 -0800115 main_space_(nullptr),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800116 collector_type_(kCollectorTypeNone),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700117 foreground_collector_type_(foreground_collector_type),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800118 background_collector_type_(background_collector_type),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700119 desired_collector_type_(foreground_collector_type_),
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800120 heap_trim_request_lock_(nullptr),
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700121 last_trim_time_(0),
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700122 heap_transition_or_trim_target_time_(0),
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800123 heap_trim_request_pending_(false),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700124 parallel_gc_threads_(parallel_gc_threads),
125 conc_gc_threads_(conc_gc_threads),
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700126 low_memory_mode_(low_memory_mode),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700127 long_pause_log_threshold_(long_pause_log_threshold),
128 long_gc_log_threshold_(long_gc_log_threshold),
129 ignore_max_footprint_(ignore_max_footprint),
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -0700130 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700131 zygote_space_(nullptr),
132 large_object_threshold_(kDefaultLargeObjectThreshold), // Starts out disabled.
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800133 collector_type_running_(kCollectorTypeNone),
Ian Rogers1d54e732013-05-02 21:10:01 -0700134 last_gc_type_(collector::kGcTypeNone),
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -0700135 next_gc_type_(collector::kGcTypePartial),
Mathieu Chartier80de7a62012-11-27 17:21:50 -0800136 capacity_(capacity),
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700137 growth_limit_(growth_limit),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700138 max_allowed_footprint_(initial_size),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700139 native_footprint_gc_watermark_(initial_size),
140 native_footprint_limit_(2 * initial_size),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700141 native_need_to_run_finalization_(false),
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800142 // Initially assume we perceive jank in case the process state is never updated.
143 process_state_(kProcessStateJankPerceptible),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800144 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Ian Rogers1d54e732013-05-02 21:10:01 -0700145 total_bytes_freed_ever_(0),
146 total_objects_freed_ever_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800147 num_bytes_allocated_(0),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700148 native_bytes_allocated_(0),
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700149 gc_memory_overhead_(0),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700150 verify_missing_card_marks_(false),
151 verify_system_weaks_(false),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800152 verify_pre_gc_heap_(verify_pre_gc_heap),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700153 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800154 verify_post_gc_heap_(verify_post_gc_heap),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700155 verify_mod_union_table_(false),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800156 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700157 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800158 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
Mathieu Chartieraff59a82014-06-06 17:51:16 -0700159 last_gc_time_ns_(NanoTime()),
Mathieu Chartier65db8802012-11-20 12:36:46 -0800160 allocation_rate_(0),
Mathieu Chartier0418ae22013-07-31 13:35:46 -0700161 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
162 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
163 * verification is enabled, we limit the size of allocation stacks to speed up their
164 * searching.
165 */
166 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
Mathieu Chartier4e305412014-02-19 10:54:44 -0800167 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? KB : MB),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800168 current_allocator_(kAllocatorTypeDlMalloc),
169 current_non_moving_allocator_(kAllocatorTypeNonMoving),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700170 bump_pointer_space_(nullptr),
171 temp_space_(nullptr),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700172 min_free_(min_free),
173 max_free_(max_free),
174 target_utilization_(target_utilization),
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700175 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700176 total_wait_time_(0),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700177 total_allocation_time_(0),
Mathieu Chartier4e305412014-02-19 10:54:44 -0800178 verify_object_mode_(kVerifyObjectModeDisabled),
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800179 disable_moving_gc_count_(0),
Mathieu Chartierda44d772014-04-01 15:01:46 -0700180 running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
Zuo Wangf37a88b2014-07-10 04:26:41 -0700181 use_tlab_(use_tlab),
182 main_space_backup_(nullptr),
Mathieu Chartierb363f662014-07-16 13:28:58 -0700183 min_interval_homogeneous_space_compaction_by_oom_(
184 min_interval_homogeneous_space_compaction_by_oom),
Zuo Wangf37a88b2014-07-10 04:26:41 -0700185 last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
186 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800187 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800188 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700189 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800190 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
191 // entrypoints.
Mathieu Chartier8e219ae2014-08-19 14:29:46 -0700192 const bool is_zygote = Runtime::Current()->IsZygote();
193 if (!is_zygote) {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700194 // Background compaction is currently not supported for command line runs.
195 if (background_collector_type_ != foreground_collector_type_) {
Mathieu Chartier52ba1992014-05-07 14:39:21 -0700196 VLOG(heap) << "Disabling background compaction for non zygote";
Mathieu Chartier31f44142014-04-08 14:40:03 -0700197 background_collector_type_ = foreground_collector_type_;
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800198 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800199 }
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800200 ChangeCollector(desired_collector_type_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700201 live_bitmap_.reset(new accounting::HeapBitmap(this));
202 mark_bitmap_.reset(new accounting::HeapBitmap(this));
Ian Rogers30fab402012-01-23 15:43:46 -0800203 // Requested begin for the alloc space, to follow the mapped image and oat files
Mathieu Chartier50482232013-11-21 11:48:14 -0800204 byte* requested_alloc_space_begin = nullptr;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800205 if (!image_file_name.empty()) {
Alex Light64ad14d2014-08-19 14:23:13 -0700206 std::string error_msg;
Narayan Kamath11d9f062014-04-23 20:24:57 +0100207 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str(),
Alex Light64ad14d2014-08-19 14:23:13 -0700208 image_instruction_set,
209 &error_msg);
210 if (image_space != nullptr) {
211 AddSpace(image_space);
212 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
213 // isn't going to get in the middle
214 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
215 CHECK_GT(oat_file_end_addr, image_space->End());
216 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
217 } else {
218 LOG(WARNING) << "Could not create image space with image file '" << image_file_name << "'. "
219 << "Attempting to fall back to imageless running. Error was: " << error_msg;
220 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700221 }
Zuo Wangf37a88b2014-07-10 04:26:41 -0700222 /*
223 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700224 +- nonmoving space (non_moving_space_capacity)+-
Zuo Wangf37a88b2014-07-10 04:26:41 -0700225 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartier8e219ae2014-08-19 14:29:46 -0700226 +-????????????????????????????????????????????+-
227 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartierb363f662014-07-16 13:28:58 -0700228 +-main alloc space / bump space 1 (capacity_) +-
Zuo Wangf37a88b2014-07-10 04:26:41 -0700229 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
Mathieu Chartierb363f662014-07-16 13:28:58 -0700230 +-????????????????????????????????????????????+-
231 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
232 +-main alloc space2 / bump space 2 (capacity_)+-
Zuo Wangf37a88b2014-07-10 04:26:41 -0700233 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
234 */
Mathieu Chartierb363f662014-07-16 13:28:58 -0700235 bool support_homogeneous_space_compaction =
Mathieu Chartier0deeb812014-08-21 18:28:20 -0700236 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
Zuo Wangf37a88b2014-07-10 04:26:41 -0700237 use_homogeneous_space_compaction_for_oom;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700238 // We may use the same space the main space for the non moving space if we don't need to compact
239 // from the main space.
240 // This is not the case if we support homogeneous compaction or have a moving background
241 // collector type.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700242 bool separate_non_moving_space = is_zygote ||
243 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
244 IsMovingGc(background_collector_type_);
245 if (foreground_collector_type == kCollectorTypeGSS) {
246 separate_non_moving_space = false;
247 }
248 std::unique_ptr<MemMap> main_mem_map_1;
249 std::unique_ptr<MemMap> main_mem_map_2;
250 byte* request_begin = requested_alloc_space_begin;
251 if (request_begin != nullptr && separate_non_moving_space) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700252 request_begin += non_moving_space_capacity;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700253 }
254 std::string error_str;
255 std::unique_ptr<MemMap> non_moving_space_mem_map;
256 if (separate_non_moving_space) {
257 // Reserve the non moving mem map before the other two since it needs to be at a specific
258 // address.
259 non_moving_space_mem_map.reset(
260 MemMap::MapAnonymous("non moving space", requested_alloc_space_begin,
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700261 non_moving_space_capacity, PROT_READ | PROT_WRITE, true, &error_str));
Mathieu Chartierb363f662014-07-16 13:28:58 -0700262 CHECK(non_moving_space_mem_map != nullptr) << error_str;
Mathieu Chartierc44ce2e2014-08-25 16:32:41 -0700263 // Try to reserve virtual memory at a lower address if we have a separate non moving space.
264 request_begin = reinterpret_cast<byte*>(0x1000000);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700265 }
266 // Attempt to create 2 mem maps at or after the requested begin.
267 main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
268 PROT_READ | PROT_WRITE, &error_str));
269 CHECK(main_mem_map_1.get() != nullptr) << error_str;
270 if (support_homogeneous_space_compaction ||
271 background_collector_type_ == kCollectorTypeSS ||
272 foreground_collector_type_ == kCollectorTypeSS) {
273 main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
274 capacity_, PROT_READ | PROT_WRITE,
275 &error_str));
276 CHECK(main_mem_map_2.get() != nullptr) << error_str;
277 }
278 // Create the non moving space first so that bitmaps don't take up the address range.
279 if (separate_non_moving_space) {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700280 // Non moving space is always dlmalloc since we currently don't have support for multiple
Zuo Wangf37a88b2014-07-10 04:26:41 -0700281 // active rosalloc spaces.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700282 const size_t size = non_moving_space_mem_map->Size();
283 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700284 non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
Mathieu Chartierb363f662014-07-16 13:28:58 -0700285 initial_size, size, size, false);
Mathieu Chartier78408882014-04-11 18:06:01 -0700286 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
Mathieu Chartierb363f662014-07-16 13:28:58 -0700287 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
288 << requested_alloc_space_begin;
289 AddSpace(non_moving_space_);
290 }
291 // Create other spaces based on whether or not we have a moving GC.
292 if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) {
293 // Create bump pointer spaces.
294 // We only to create the bump pointer if the foreground collector is a compacting GC.
295 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
296 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
297 main_mem_map_1.release());
298 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
299 AddSpace(bump_pointer_space_);
300 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
301 main_mem_map_2.release());
302 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
303 AddSpace(temp_space_);
304 CHECK(separate_non_moving_space);
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700305 } else {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700306 CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
307 CHECK(main_space_ != nullptr);
308 AddSpace(main_space_);
309 if (!separate_non_moving_space) {
Zuo Wangf37a88b2014-07-10 04:26:41 -0700310 non_moving_space_ = main_space_;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700311 CHECK(!non_moving_space_->CanMoveObjects());
312 }
313 if (foreground_collector_type_ == kCollectorTypeGSS) {
314 CHECK_EQ(foreground_collector_type_, background_collector_type_);
315 // Create bump pointer spaces instead of a backup space.
316 main_mem_map_2.release();
317 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
318 kGSSBumpPointerSpaceCapacity, nullptr);
319 CHECK(bump_pointer_space_ != nullptr);
320 AddSpace(bump_pointer_space_);
321 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
322 kGSSBumpPointerSpaceCapacity, nullptr);
323 CHECK(temp_space_ != nullptr);
324 AddSpace(temp_space_);
325 } else if (main_mem_map_2.get() != nullptr) {
326 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
327 main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
328 growth_limit_, capacity_, name, true));
329 CHECK(main_space_backup_.get() != nullptr);
330 // Add the space so its accounted for in the heap_begin and heap_end.
331 AddSpace(main_space_backup_.get());
Zuo Wangf37a88b2014-07-10 04:26:41 -0700332 }
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700333 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700334 CHECK(non_moving_space_ != nullptr);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700335 CHECK(!non_moving_space_->CanMoveObjects());
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700336 // Allocate the large object space.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700337 if (kUseFreeListSpaceForLOS) {
Zuo Wangf37a88b2014-07-10 04:26:41 -0700338 large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700339 } else {
340 large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
341 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800342 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700343 AddSpace(large_object_space_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700344 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
Mathieu Chartier590fee92013-09-13 13:46:47 -0700345 CHECK(!continuous_spaces_.empty());
346 // Relies on the spaces being sorted.
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -0800347 byte* heap_begin = continuous_spaces_.front()->Begin();
348 byte* heap_end = continuous_spaces_.back()->Limit();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700349 size_t heap_capacity = heap_end - heap_begin;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700350 // Remove the main backup space since it slows down the GC to have unused extra spaces.
351 if (main_space_backup_.get() != nullptr) {
352 RemoveSpace(main_space_backup_.get());
353 }
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800354 // Allocate the card table.
Ian Rogers1d54e732013-05-02 21:10:01 -0700355 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700356 CHECK(card_table_.get() != NULL) << "Failed to create card table";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700357 // Card cache for now since it makes it easier for us to update the references to the copying
358 // spaces.
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700359 accounting::ModUnionTable* mod_union_table =
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700360 new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
361 GetImageSpace());
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700362 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
363 AddModUnionTable(mod_union_table);
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700364 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800365 accounting::RememberedSet* non_moving_space_rem_set =
366 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
367 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
368 AddRememberedSet(non_moving_space_rem_set);
369 }
Mathieu Chartierb363f662014-07-16 13:28:58 -0700370 // TODO: Count objects in the image space here?
Ian Rogers3e5cf302014-05-20 16:40:37 -0700371 num_bytes_allocated_.StoreRelaxed(0);
Mathieu Chartierc1790162014-05-23 10:54:50 -0700372 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
373 kDefaultMarkStackSize));
374 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
375 allocation_stack_.reset(accounting::ObjectStack::Create(
376 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
377 live_stack_.reset(accounting::ObjectStack::Create(
378 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
Mathieu Chartier65db8802012-11-20 12:36:46 -0800379 // It's still too early to take a lock because there are no threads yet, but we can create locks
380 // now. We don't create it earlier to make it clear that you can't use locks during heap
381 // initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700382 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogersc604d732012-10-14 16:09:54 -0700383 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
384 *gc_complete_lock_));
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800385 heap_trim_request_lock_ = new Mutex("Heap trim request lock");
Mathieu Chartier65db8802012-11-20 12:36:46 -0800386 last_gc_size_ = GetBytesAllocated();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700387 if (ignore_max_footprint_) {
388 SetIdealFootprint(std::numeric_limits<size_t>::max());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700389 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700390 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700391 CHECK_NE(max_allowed_footprint_, 0U);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800392 // Create our garbage collectors.
Mathieu Chartier50482232013-11-21 11:48:14 -0800393 for (size_t i = 0; i < 2; ++i) {
394 const bool concurrent = i != 0;
395 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
396 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
397 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
398 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800399 if (kMovingCollector) {
400 // TODO: Clean this up.
Zuo Wangf37a88b2014-07-10 04:26:41 -0700401 const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700402 semi_space_collector_ = new collector::SemiSpace(this, generational,
403 generational ? "generational" : "");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700404 garbage_collectors_.push_back(semi_space_collector_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700405 concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
406 garbage_collectors_.push_back(concurrent_copying_collector_);
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700407 mark_compact_collector_ = new collector::MarkCompact(this);
408 garbage_collectors_.push_back(mark_compact_collector_);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700409 }
Andreas Gampee1cb2982014-08-27 11:01:09 -0700410 if (GetImageSpace() != nullptr && non_moving_space_ != nullptr &&
411 (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700412 // Check that there's no gap between the image space and the non moving space so that the
Andreas Gampee1cb2982014-08-27 11:01:09 -0700413 // immune region won't break (eg. due to a large object allocated in the gap). This is only
414 // required when we're the zygote or using GSS.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700415 bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
416 non_moving_space_->GetMemMap());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700417 if (!no_gap) {
418 MemMap::DumpMaps(LOG(ERROR));
419 LOG(FATAL) << "There's a gap between the image space and the main space";
420 }
421 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700422 if (running_on_valgrind_) {
Ian Rogersfa824272013-11-05 16:12:57 -0800423 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700424 }
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800425 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800426 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700427 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700428}
429
Mathieu Chartierb363f662014-07-16 13:28:58 -0700430MemMap* Heap::MapAnonymousPreferredAddress(const char* name, byte* request_begin, size_t capacity,
431 int prot_flags, std::string* out_error_str) {
432 while (true) {
433 MemMap* map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity,
434 PROT_READ | PROT_WRITE, true, out_error_str);
435 if (map != nullptr || request_begin == nullptr) {
436 return map;
437 }
438 // Retry a second time with no specified request begin.
439 request_begin = nullptr;
440 }
441 return nullptr;
442}
443
Zuo Wangf37a88b2014-07-10 04:26:41 -0700444space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
445 size_t growth_limit, size_t capacity,
446 const char* name, bool can_move_objects) {
447 space::MallocSpace* malloc_space = nullptr;
448 if (kUseRosAlloc) {
449 // Create rosalloc space.
450 malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
451 initial_size, growth_limit, capacity,
452 low_memory_mode_, can_move_objects);
453 } else {
454 malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
455 initial_size, growth_limit, capacity,
456 can_move_objects);
457 }
458 if (collector::SemiSpace::kUseRememberedSet) {
459 accounting::RememberedSet* rem_set =
460 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
461 CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
462 AddRememberedSet(rem_set);
463 }
464 CHECK(malloc_space != nullptr) << "Failed to create " << name;
465 malloc_space->SetFootprintLimit(malloc_space->Capacity());
466 return malloc_space;
467}
468
Mathieu Chartier31f44142014-04-08 14:40:03 -0700469void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
470 size_t capacity) {
471 // Is background compaction is enabled?
472 bool can_move_objects = IsMovingGc(background_collector_type_) !=
Zuo Wangf37a88b2014-07-10 04:26:41 -0700473 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700474 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
475 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
476 // from the main space to the zygote space. If background compaction is enabled, always pass in
477 // that we can move objets.
478 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
479 // After the zygote we want this to be false if we don't have background compaction enabled so
480 // that getting primitive array elements is faster.
Mathieu Chartierb363f662014-07-16 13:28:58 -0700481 // We never have homogeneous compaction with GSS and don't need a space with movable objects.
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700482 can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700483 }
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700484 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
485 RemoveRememberedSet(main_space_);
486 }
Zuo Wangf37a88b2014-07-10 04:26:41 -0700487 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
488 main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
489 can_move_objects);
490 SetSpaceAsDefault(main_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700491 VLOG(heap) << "Created main space " << main_space_;
492}
493
Mathieu Chartier50482232013-11-21 11:48:14 -0800494void Heap::ChangeAllocator(AllocatorType allocator) {
Mathieu Chartier50482232013-11-21 11:48:14 -0800495 if (current_allocator_ != allocator) {
Mathieu Chartierd8891782014-03-02 13:28:37 -0800496 // These two allocators are only used internally and don't have any entrypoints.
497 CHECK_NE(allocator, kAllocatorTypeLOS);
498 CHECK_NE(allocator, kAllocatorTypeNonMoving);
Mathieu Chartier50482232013-11-21 11:48:14 -0800499 current_allocator_ = allocator;
Mathieu Chartierd8891782014-03-02 13:28:37 -0800500 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
Mathieu Chartier50482232013-11-21 11:48:14 -0800501 SetQuickAllocEntryPointsAllocator(current_allocator_);
502 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
503 }
504}
505
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700506void Heap::DisableMovingGc() {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700507 if (IsMovingGc(foreground_collector_type_)) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700508 foreground_collector_type_ = kCollectorTypeCMS;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800509 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700510 if (IsMovingGc(background_collector_type_)) {
511 background_collector_type_ = foreground_collector_type_;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800512 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700513 TransitionCollector(foreground_collector_type_);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700514 ThreadList* tl = Runtime::Current()->GetThreadList();
515 Thread* self = Thread::Current();
516 ScopedThreadStateChange tsc(self, kSuspended);
517 tl->SuspendAll();
518 // Something may have caused the transition to fail.
Mathieu Chartiere4927f62014-08-23 13:56:03 -0700519 if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700520 CHECK(main_space_ != nullptr);
521 // The allocation stack may have non movable objects in it. We need to flush it since the GC
522 // can't only handle marking allocation stack objects of one non moving space and one main
523 // space.
524 {
525 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
526 FlushAllocStack();
527 }
528 main_space_->DisableMovingObjects();
529 non_moving_space_ = main_space_;
530 CHECK(!non_moving_space_->CanMoveObjects());
531 }
532 tl->ResumeAll();
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800533}
534
Mathieu Chartier15d34022014-02-26 17:16:38 -0800535std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
536 if (!IsValidContinuousSpaceObjectAddress(klass)) {
537 return StringPrintf("<non heap address klass %p>", klass);
538 }
539 mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
540 if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
541 std::string result("[");
542 result += SafeGetClassDescriptor(component_type);
543 return result;
544 } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
545 return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800546 } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
Mathieu Chartier15d34022014-02-26 17:16:38 -0800547 return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
548 } else {
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800549 mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
Mathieu Chartier15d34022014-02-26 17:16:38 -0800550 if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
551 return StringPrintf("<non heap address dex_cache %p>", dex_cache);
552 }
553 const DexFile* dex_file = dex_cache->GetDexFile();
554 uint16_t class_def_idx = klass->GetDexClassDefIndex();
555 if (class_def_idx == DexFile::kDexNoIndex16) {
556 return "<class def not found>";
557 }
558 const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
559 const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
560 return dex_file->GetTypeDescriptor(type_id);
561 }
562}
563
564std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
565 if (obj == nullptr) {
566 return "null";
567 }
568 mirror::Class* klass = obj->GetClass<kVerifyNone>();
569 if (klass == nullptr) {
570 return "(class=null)";
571 }
572 std::string result(SafeGetClassDescriptor(klass));
573 if (obj->IsClass()) {
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800574 result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
Mathieu Chartier15d34022014-02-26 17:16:38 -0800575 }
576 return result;
577}
578
579void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
580 if (obj == nullptr) {
581 stream << "(obj=null)";
582 return;
583 }
584 if (IsAligned<kObjectAlignment>(obj)) {
585 space::Space* space = nullptr;
586 // Don't use find space since it only finds spaces which actually contain objects instead of
587 // spaces which may contain objects (e.g. cleared bump pointer spaces).
588 for (const auto& cur_space : continuous_spaces_) {
589 if (cur_space->HasAddress(obj)) {
590 space = cur_space;
591 break;
592 }
593 }
Mathieu Chartier15d34022014-02-26 17:16:38 -0800594 // Unprotect all the spaces.
595 for (const auto& space : continuous_spaces_) {
596 mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
597 }
598 stream << "Object " << obj;
599 if (space != nullptr) {
600 stream << " in space " << *space;
601 }
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800602 mirror::Class* klass = obj->GetClass<kVerifyNone>();
Mathieu Chartier15d34022014-02-26 17:16:38 -0800603 stream << "\nclass=" << klass;
604 if (klass != nullptr) {
605 stream << " type= " << SafePrettyTypeOf(obj);
606 }
607 // Re-protect the address we faulted on.
608 mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
609 }
610}
611
Mathieu Chartier590fee92013-09-13 13:46:47 -0700612bool Heap::IsCompilingBoot() const {
Alex Light64ad14d2014-08-19 14:23:13 -0700613 if (!Runtime::Current()->IsCompiler()) {
614 return false;
615 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700616 for (const auto& space : continuous_spaces_) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800617 if (space->IsImageSpace() || space->IsZygoteSpace()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700618 return false;
619 }
620 }
621 return true;
622}
623
624bool Heap::HasImageSpace() const {
625 for (const auto& space : continuous_spaces_) {
626 if (space->IsImageSpace()) {
627 return true;
628 }
629 }
630 return false;
631}
632
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800633void Heap::IncrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700634 // Need to do this holding the lock to prevent races where the GC is about to run / running when
635 // we attempt to disable it.
Mathieu Chartiercaa82d62014-02-02 16:51:17 -0800636 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700637 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800638 ++disable_moving_gc_count_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700639 if (IsMovingGc(collector_type_running_)) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -0700640 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800641 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700642}
643
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800644void Heap::DecrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700645 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800646 CHECK_GE(disable_moving_gc_count_, 0U);
647 --disable_moving_gc_count_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700648}
649
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800650void Heap::UpdateProcessState(ProcessState process_state) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800651 if (process_state_ != process_state) {
652 process_state_ = process_state;
Mathieu Chartier91e30632014-03-25 15:58:50 -0700653 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
654 // Start at index 1 to avoid "is always false" warning.
655 // Have iteration 1 always transition the collector.
656 TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible))
Mathieu Chartier31f44142014-04-08 14:40:03 -0700657 ? foreground_collector_type_ : background_collector_type_);
Mathieu Chartier91e30632014-03-25 15:58:50 -0700658 usleep(kCollectorTransitionStressWait);
659 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800660 if (process_state_ == kProcessStateJankPerceptible) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800661 // Transition back to foreground right away to prevent jank.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700662 RequestCollectorTransition(foreground_collector_type_, 0);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800663 } else {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800664 // Don't delay for debug builds since we may want to stress test the GC.
Zuo Wangf37a88b2014-07-10 04:26:41 -0700665 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
666 // special handling which does a homogenous space compaction once but then doesn't transition
667 // the collector.
668 RequestCollectorTransition(background_collector_type_,
669 kIsDebugBuild ? 0 : kCollectorTransitionWait);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800670 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800671 }
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800672}
673
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700674void Heap::CreateThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700675 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
676 if (num_threads != 0) {
Mathieu Chartierbcd5e9d2013-11-13 14:33:28 -0800677 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700678 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700679}
680
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800681void Heap::VisitObjects(ObjectCallback callback, void* arg) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700682 Thread* self = Thread::Current();
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800683 // GCs can move objects, so don't allow this.
684 const char* old_cause = self->StartAssertNoThreadSuspension("Visiting objects");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700685 if (bump_pointer_space_ != nullptr) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800686 // Visit objects in bump pointer space.
687 bump_pointer_space_->Walk(callback, arg);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700688 }
689 // TODO: Switch to standard begin and end to use ranged a based loop.
690 for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
691 it < end; ++it) {
692 mirror::Object* obj = *it;
Mathieu Chartierebdf3f32014-02-13 10:23:27 -0800693 if (obj != nullptr && obj->GetClass() != nullptr) {
694 // Avoid the race condition caused by the object not yet being written into the allocation
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800695 // stack or the class not yet being written in the object. Or, if kUseThreadLocalAllocationStack,
696 // there can be nulls on the allocation stack.
Mathieu Chartierebdf3f32014-02-13 10:23:27 -0800697 callback(obj, arg);
698 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700699 }
700 GetLiveBitmap()->Walk(callback, arg);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800701 self->EndAssertNoThreadSuspension(old_cause);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700702}
703
704void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
Mathieu Chartier00b59152014-07-25 10:13:51 -0700705 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
706 space::ContinuousSpace* space2 = non_moving_space_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800707 // TODO: Generalize this to n bitmaps?
Mathieu Chartier00b59152014-07-25 10:13:51 -0700708 CHECK(space1 != nullptr);
709 CHECK(space2 != nullptr);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800710 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700711 large_object_space_->GetLiveBitmap(), stack);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700712}
713
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700714void Heap::DeleteThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700715 thread_pool_.reset(nullptr);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700716}
717
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700718void Heap::AddSpace(space::Space* space) {
Zuo Wangf37a88b2014-07-10 04:26:41 -0700719 CHECK(space != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700720 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
721 if (space->IsContinuousSpace()) {
722 DCHECK(!space->IsDiscontinuousSpace());
723 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
724 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700725 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
726 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700727 if (live_bitmap != nullptr) {
Mathieu Chartier2796a162014-07-25 11:50:47 -0700728 CHECK(mark_bitmap != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700729 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
730 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700731 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700732 continuous_spaces_.push_back(continuous_space);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700733 // Ensure that spaces remain sorted in increasing order of start address.
734 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
735 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
736 return a->Begin() < b->Begin();
737 });
Mathieu Chartier590fee92013-09-13 13:46:47 -0700738 } else {
Mathieu Chartier2796a162014-07-25 11:50:47 -0700739 CHECK(space->IsDiscontinuousSpace());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700740 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700741 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
742 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700743 discontinuous_spaces_.push_back(discontinuous_space);
744 }
745 if (space->IsAllocSpace()) {
746 alloc_spaces_.push_back(space->AsAllocSpace());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700747 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800748}
749
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700750void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
751 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
752 if (continuous_space->IsDlMallocSpace()) {
753 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
754 } else if (continuous_space->IsRosAllocSpace()) {
755 rosalloc_space_ = continuous_space->AsRosAllocSpace();
756 }
757}
758
759void Heap::RemoveSpace(space::Space* space) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800760 DCHECK(space != nullptr);
761 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
762 if (space->IsContinuousSpace()) {
763 DCHECK(!space->IsDiscontinuousSpace());
764 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
765 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700766 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
767 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800768 if (live_bitmap != nullptr) {
769 DCHECK(mark_bitmap != nullptr);
770 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
771 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
772 }
773 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
774 DCHECK(it != continuous_spaces_.end());
775 continuous_spaces_.erase(it);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800776 } else {
777 DCHECK(space->IsDiscontinuousSpace());
778 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700779 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
780 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800781 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
782 discontinuous_space);
783 DCHECK(it != discontinuous_spaces_.end());
784 discontinuous_spaces_.erase(it);
785 }
786 if (space->IsAllocSpace()) {
787 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
788 DCHECK(it != alloc_spaces_.end());
789 alloc_spaces_.erase(it);
790 }
791}
792
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700793void Heap::RegisterGCAllocation(size_t bytes) {
Stephen Hinesb5f56492014-07-15 21:41:06 -0700794 gc_memory_overhead_.FetchAndAddSequentiallyConsistent(bytes);
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700795}
796
797void Heap::RegisterGCDeAllocation(size_t bytes) {
Stephen Hinesb5f56492014-07-15 21:41:06 -0700798 gc_memory_overhead_.FetchAndSubSequentiallyConsistent(bytes);
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700799}
800
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700801void Heap::DumpGcPerformanceInfo(std::ostream& os) {
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700802 // Dump cumulative timings.
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700803 os << "Dumping cumulative Gc timings\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700804 uint64_t total_duration = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800805 // Dump cumulative loggers for each GC type.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800806 uint64_t total_paused_time = 0;
Mathieu Chartier5a487192014-04-08 11:14:54 -0700807 for (auto& collector : garbage_collectors_) {
Mathieu Chartier104fa0c2014-08-07 14:26:27 -0700808 total_duration += collector->GetCumulativeTimings().GetTotalNs();
809 total_paused_time += collector->GetTotalPausedTimeNs();
810 collector->DumpPerformanceInfo(os);
Mathieu Chartier5a487192014-04-08 11:14:54 -0700811 collector->ResetMeasurements();
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700812 }
Ian Rogers3e5cf302014-05-20 16:40:37 -0700813 uint64_t allocation_time =
814 static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700815 if (total_duration != 0) {
Brian Carlstrom2d888622013-07-18 17:02:00 -0700816 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700817 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
818 os << "Mean GC size throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700819 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700820 os << "Mean GC object throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700821 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700822 }
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700823 uint64_t total_objects_allocated = GetObjectsAllocatedEver();
Mathieu Chartierc30a7252014-08-12 10:13:48 -0700824 os << "Total number of allocations " << total_objects_allocated << "\n";
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700825 uint64_t total_bytes_allocated = GetBytesAllocatedEver();
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700826 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
Mathieu Chartierc30a7252014-08-12 10:13:48 -0700827 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700828 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
829 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
Mathieu Chartierc30a7252014-08-12 10:13:48 -0700830 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
831 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700832 if (kMeasureAllocationTime) {
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700833 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
834 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
835 << "\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700836 }
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700837 if (HasZygoteSpace()) {
838 os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
839 }
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700840 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
841 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
Ian Rogers3e5cf302014-05-20 16:40:37 -0700842 os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_.LoadRelaxed();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700843 BaseMutex::DumpAll(os);
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700844}
845
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800846Heap::~Heap() {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700847 VLOG(heap) << "Starting ~Heap()";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700848 STLDeleteElements(&garbage_collectors_);
849 // If we don't reset then the mark stack complains in its destructor.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700850 allocation_stack_->Reset();
851 live_stack_->Reset();
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700852 STLDeleteValues(&mod_union_tables_);
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700853 STLDeleteValues(&remembered_sets_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700854 STLDeleteElements(&continuous_spaces_);
855 STLDeleteElements(&discontinuous_spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700856 delete gc_complete_lock_;
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700857 delete heap_trim_request_lock_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700858 VLOG(heap) << "Finished ~Heap()";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700859}
860
Ian Rogers1d54e732013-05-02 21:10:01 -0700861space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
862 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700863 for (const auto& space : continuous_spaces_) {
864 if (space->Contains(obj)) {
865 return space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700866 }
867 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700868 if (!fail_ok) {
869 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
870 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700871 return NULL;
872}
873
Ian Rogers1d54e732013-05-02 21:10:01 -0700874space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
875 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700876 for (const auto& space : discontinuous_spaces_) {
877 if (space->Contains(obj)) {
878 return space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700879 }
880 }
881 if (!fail_ok) {
882 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
883 }
884 return NULL;
885}
886
887space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
888 space::Space* result = FindContinuousSpaceFromObject(obj, true);
889 if (result != NULL) {
890 return result;
891 }
892 return FindDiscontinuousSpaceFromObject(obj, true);
893}
894
895space::ImageSpace* Heap::GetImageSpace() const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700896 for (const auto& space : continuous_spaces_) {
897 if (space->IsImageSpace()) {
898 return space->AsImageSpace();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700899 }
900 }
901 return NULL;
902}
903
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700904void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700905 std::ostringstream oss;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800906 size_t total_bytes_free = GetFreeMemory();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700907 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700908 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM";
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700909 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
Zuo Wangf37a88b2014-07-10 04:26:41 -0700910 if (total_bytes_free >= byte_count) {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700911 space::AllocSpace* space = nullptr;
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700912 if (allocator_type == kAllocatorTypeNonMoving) {
913 space = non_moving_space_;
914 } else if (allocator_type == kAllocatorTypeRosAlloc ||
915 allocator_type == kAllocatorTypeDlMalloc) {
916 space = main_space_;
Mathieu Chartierb363f662014-07-16 13:28:58 -0700917 } else if (allocator_type == kAllocatorTypeBumpPointer ||
918 allocator_type == kAllocatorTypeTLAB) {
919 space = bump_pointer_space_;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700920 }
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700921 if (space != nullptr) {
922 space->LogFragmentationAllocFailure(oss, byte_count);
923 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700924 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700925 self->ThrowOutOfMemoryError(oss.str().c_str());
926}
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700927
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800928void Heap::DoPendingTransitionOrTrim() {
929 Thread* self = Thread::Current();
930 CollectorType desired_collector_type;
931 // Wait until we reach the desired transition time.
932 while (true) {
933 uint64_t wait_time;
934 {
935 MutexLock mu(self, *heap_trim_request_lock_);
936 desired_collector_type = desired_collector_type_;
937 uint64_t current_time = NanoTime();
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700938 if (current_time >= heap_transition_or_trim_target_time_) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800939 break;
940 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700941 wait_time = heap_transition_or_trim_target_time_ - current_time;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800942 }
943 ScopedThreadStateChange tsc(self, kSleeping);
944 usleep(wait_time / 1000); // Usleep takes microseconds.
945 }
Zuo Wangf37a88b2014-07-10 04:26:41 -0700946 // Launch homogeneous space compaction if it is desired.
947 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
948 if (!CareAboutPauseTimes()) {
949 PerformHomogeneousSpaceCompact();
950 }
951 // No need to Trim(). Homogeneous space compaction may free more virtual and physical memory.
952 desired_collector_type = collector_type_;
953 return;
954 }
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700955 // Transition the collector if the desired collector type is not the same as the current
956 // collector type.
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800957 TransitionCollector(desired_collector_type);
Mathieu Chartier440e4ce2014-03-31 16:36:35 -0700958 if (!CareAboutPauseTimes()) {
959 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
960 // about pauses.
961 Runtime* runtime = Runtime::Current();
962 runtime->GetThreadList()->SuspendAll();
Mathieu Chartier48ab6872014-06-24 11:21:59 -0700963 uint64_t start_time = NanoTime();
964 size_t count = runtime->GetMonitorList()->DeflateMonitors();
965 VLOG(heap) << "Deflating " << count << " monitors took "
966 << PrettyDuration(NanoTime() - start_time);
Mathieu Chartier440e4ce2014-03-31 16:36:35 -0700967 runtime->GetThreadList()->ResumeAll();
Mathieu Chartier440e4ce2014-03-31 16:36:35 -0700968 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700969 // Do a heap trim if it is needed.
970 Trim();
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800971}
972
Mathieu Chartier590fee92013-09-13 13:46:47 -0700973void Heap::Trim() {
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800974 Thread* self = Thread::Current();
975 {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800976 MutexLock mu(self, *heap_trim_request_lock_);
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700977 if (!heap_trim_request_pending_ || last_trim_time_ + kHeapTrimWait >= NanoTime()) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800978 return;
979 }
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700980 last_trim_time_ = NanoTime();
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800981 heap_trim_request_pending_ = false;
982 }
983 {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -0800984 // Need to do this before acquiring the locks since we don't want to get suspended while
985 // holding any locks.
986 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800987 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
988 // trimming.
989 MutexLock mu(self, *gc_complete_lock_);
990 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -0700991 WaitForGcToCompleteLocked(kGcCauseTrim, self);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800992 collector_type_running_ = kCollectorTypeHeapTrim;
993 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700994 uint64_t start_ns = NanoTime();
995 // Trim the managed spaces.
996 uint64_t total_alloc_space_allocated = 0;
997 uint64_t total_alloc_space_size = 0;
998 uint64_t managed_reclaimed = 0;
999 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001000 if (space->IsMallocSpace()) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001001 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1002 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1003 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1004 // for a long period of time.
1005 managed_reclaimed += malloc_space->Trim();
1006 }
1007 total_alloc_space_size += malloc_space->Size();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001008 }
1009 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07001010 total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated();
1011 if (bump_pointer_space_ != nullptr) {
1012 total_alloc_space_allocated -= bump_pointer_space_->Size();
1013 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001014 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1015 static_cast<float>(total_alloc_space_size);
1016 uint64_t gc_heap_end_ns = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001017 // We never move things in the native heap, so we can finish the GC at this point.
1018 FinishGC(self, collector::kGcTypeNone);
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001019 size_t native_reclaimed = 0;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001020 // Only trim the native heap if we don't care about pauses.
1021 if (!CareAboutPauseTimes()) {
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001022#if defined(USE_DLMALLOC)
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001023 // Trim the native heap.
1024 dlmalloc_trim(0);
1025 dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001026#elif defined(USE_JEMALLOC)
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001027 // Jemalloc does it's own internal trimming.
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001028#else
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001029 UNIMPLEMENTED(WARNING) << "Add trimming support";
Christopher Ferrisc4ddc042014-05-13 14:47:50 -07001030#endif
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001031 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001032 uint64_t end_ns = NanoTime();
1033 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1034 << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
1035 << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
1036 << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
1037 << "%.";
1038}
1039
1040bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
1041 // Note: we deliberately don't take the lock here, and mustn't test anything that would require
1042 // taking the lock.
1043 if (obj == nullptr) {
Elliott Hughes88c5c352012-03-15 18:49:48 -07001044 return true;
1045 }
Mathieu Chartier15d34022014-02-26 17:16:38 -08001046 return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001047}
1048
Mathieu Chartierd68ac702014-02-11 14:50:51 -08001049bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
1050 return FindContinuousSpaceFromObject(obj, true) != nullptr;
1051}
1052
Mathieu Chartier15d34022014-02-26 17:16:38 -08001053bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
1054 if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
1055 return false;
1056 }
1057 for (const auto& space : continuous_spaces_) {
1058 if (space->HasAddress(obj)) {
1059 return true;
1060 }
1061 }
1062 return false;
Elliott Hughesa2501992011-08-26 19:39:54 -07001063}
1064
Ian Rogersef7d42f2014-01-06 12:55:46 -08001065bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001066 bool search_live_stack, bool sorted) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001067 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
1068 return false;
1069 }
1070 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001071 mirror::Class* klass = obj->GetClass<kVerifyNone>();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001072 if (obj == klass) {
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -08001073 // This case happens for java.lang.Class.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001074 return true;
1075 }
1076 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1077 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
Mathieu Chartier4e305412014-02-19 10:54:44 -08001078 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1079 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1080 return temp_space_->Contains(obj);
Ian Rogers1d54e732013-05-02 21:10:01 -07001081 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001082 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001083 space::DiscontinuousSpace* d_space = nullptr;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001084 if (c_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001085 if (c_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001086 return true;
1087 }
1088 } else {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001089 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001090 if (d_space != nullptr) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001091 if (d_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001092 return true;
1093 }
1094 }
1095 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001096 // This is covering the allocation/live stack swapping that is done without mutators suspended.
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001097 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1098 if (i > 0) {
1099 NanoSleep(MsToNs(10));
Ian Rogers1d54e732013-05-02 21:10:01 -07001100 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001101 if (search_allocation_stack) {
1102 if (sorted) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001103 if (allocation_stack_->ContainsSorted(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001104 return true;
1105 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001106 } else if (allocation_stack_->Contains(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001107 return true;
1108 }
1109 }
1110
1111 if (search_live_stack) {
1112 if (sorted) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001113 if (live_stack_->ContainsSorted(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001114 return true;
1115 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001116 } else if (live_stack_->Contains(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001117 return true;
1118 }
1119 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001120 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001121 // We need to check the bitmaps again since there is a race where we mark something as live and
1122 // then clear the stack containing it.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001123 if (c_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001124 if (c_space->GetLiveBitmap()->Test(obj)) {
1125 return true;
1126 }
1127 } else {
1128 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001129 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001130 return true;
1131 }
1132 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001133 return false;
Elliott Hughes6a5bd492011-10-28 14:33:57 -07001134}
1135
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07001136std::string Heap::DumpSpaces() const {
1137 std::ostringstream oss;
1138 DumpSpaces(oss);
1139 return oss.str();
1140}
1141
1142void Heap::DumpSpaces(std::ostream& stream) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001143 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001144 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1145 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001146 stream << space << " " << *space << "\n";
1147 if (live_bitmap != nullptr) {
1148 stream << live_bitmap << " " << *live_bitmap << "\n";
1149 }
1150 if (mark_bitmap != nullptr) {
1151 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1152 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001153 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07001154 for (const auto& space : discontinuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001155 stream << space << " " << *space << "\n";
Mathieu Chartier128c52c2012-10-16 14:12:41 -07001156 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001157}
1158
Ian Rogersef7d42f2014-01-06 12:55:46 -08001159void Heap::VerifyObjectBody(mirror::Object* obj) {
Stephen Hines22c6a812014-07-16 11:03:43 -07001160 if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1161 return;
1162 }
1163
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001164 // Ignore early dawn of the universe verifications.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001165 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
Ian Rogers62d6c772013-02-27 08:32:07 -08001166 return;
1167 }
Mathieu Chartier4e305412014-02-19 10:54:44 -08001168 CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001169 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
Mathieu Chartier4e305412014-02-19 10:54:44 -08001170 CHECK(c != nullptr) << "Null class in object " << obj;
1171 CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001172 CHECK(VerifyClassClass(c));
Mathieu Chartier0325e622012-09-05 14:22:51 -07001173
Mathieu Chartier4e305412014-02-19 10:54:44 -08001174 if (verify_object_mode_ > kVerifyObjectModeFast) {
1175 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07001176 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
Mathieu Chartierdcf8d722012-08-02 14:55:54 -07001177 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001178}
1179
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001180void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001181 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001182}
1183
1184void Heap::VerifyHeap() {
Ian Rogers50b35e22012-10-04 10:09:15 -07001185 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001186 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001187}
1188
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001189void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
Mathieu Chartier601276a2014-03-20 15:12:30 -07001190 // Use signed comparison since freed bytes can be negative when background compaction foreground
1191 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1192 // free list backed space typically increasing memory footprint due to padding and binning.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001193 DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001194 // Note: This relies on 2s complement for handling negative freed_bytes.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001195 num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001196 if (Runtime::Current()->HasStatsEnabled()) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001197 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001198 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -07001199 thread_stats->freed_bytes += freed_bytes;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001200 // TODO: Do this concurrently.
1201 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1202 global_stats->freed_objects += freed_objects;
1203 global_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001204 }
Carl Shapiro58551df2011-07-24 03:09:51 -07001205}
1206
Zuo Wangf37a88b2014-07-10 04:26:41 -07001207space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1208 for (const auto& space : continuous_spaces_) {
1209 if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1210 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1211 return space->AsContinuousSpace()->AsRosAllocSpace();
1212 }
1213 }
1214 }
1215 return nullptr;
1216}
1217
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001218mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001219 size_t alloc_size, size_t* bytes_allocated,
Ian Rogers6fac4472014-02-25 17:01:10 -08001220 size_t* usable_size,
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001221 mirror::Class** klass) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001222 bool was_default_allocator = allocator == GetCurrentAllocator();
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001223 DCHECK(klass != nullptr);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001224 StackHandleScope<1> hs(self);
1225 HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
1226 klass = nullptr; // Invalidate for safety.
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001227 // The allocation failed. If the GC is running, block until it completes, and then retry the
1228 // allocation.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001229 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
Ian Rogers1d54e732013-05-02 21:10:01 -07001230 if (last_gc != collector::kGcTypeNone) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001231 // If we were the default allocator but the allocator changed while we were suspended,
1232 // abort the allocation.
1233 if (was_default_allocator && allocator != GetCurrentAllocator()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001234 return nullptr;
1235 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001236 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001237 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1238 usable_size);
1239 if (ptr != nullptr) {
1240 return ptr;
1241 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001242 }
1243
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001244 collector::GcType tried_type = next_gc_type_;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001245 const bool gc_ran =
1246 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1247 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1248 return nullptr;
1249 }
1250 if (gc_ran) {
1251 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1252 usable_size);
1253 if (ptr != nullptr) {
1254 return ptr;
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001255 }
1256 }
1257
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001258 // Loop through our different Gc types and try to Gc until we get enough free memory.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001259 for (collector::GcType gc_type : gc_plan_) {
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001260 if (gc_type == tried_type) {
1261 continue;
1262 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001263 // Attempt to run the collector, if we succeed, re-try the allocation.
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001264 const bool gc_ran =
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001265 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1266 if (was_default_allocator && allocator != GetCurrentAllocator()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001267 return nullptr;
1268 }
1269 if (gc_ran) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001270 // Did we free sufficient memory for the allocation to succeed?
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001271 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1272 usable_size);
1273 if (ptr != nullptr) {
1274 return ptr;
1275 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001276 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001277 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001278 // Allocations have failed after GCs; this is an exceptional state.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001279 // Try harder, growing the heap if necessary.
1280 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1281 usable_size);
1282 if (ptr != nullptr) {
1283 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001284 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001285 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1286 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1287 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1288 // OOME.
1289 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1290 << " allocation";
1291 // TODO: Run finalization, but this may cause more allocations to occur.
1292 // We don't need a WaitForGcToComplete here either.
1293 DCHECK(!gc_plan_.empty());
1294 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1295 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1296 return nullptr;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001297 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001298 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001299 if (ptr == nullptr) {
Zuo Wangf37a88b2014-07-10 04:26:41 -07001300 const uint64_t current_time = NanoTime();
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001301 switch (allocator) {
1302 case kAllocatorTypeRosAlloc:
1303 // Fall-through.
1304 case kAllocatorTypeDlMalloc: {
1305 if (use_homogeneous_space_compaction_for_oom_ &&
1306 current_time - last_time_homogeneous_space_compaction_by_oom_ >
1307 min_interval_homogeneous_space_compaction_by_oom_) {
1308 last_time_homogeneous_space_compaction_by_oom_ = current_time;
1309 HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1310 switch (result) {
1311 case HomogeneousSpaceCompactResult::kSuccess:
1312 // If the allocation succeeded, we delayed an oom.
1313 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1314 usable_size);
1315 if (ptr != nullptr) {
1316 count_delayed_oom_++;
1317 }
1318 break;
1319 case HomogeneousSpaceCompactResult::kErrorReject:
1320 // Reject due to disabled moving GC.
1321 break;
1322 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1323 // Throw OOM by default.
1324 break;
1325 default: {
1326 LOG(FATAL) << "Unimplemented homogeneous space compaction result "
1327 << static_cast<size_t>(result);
1328 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07001329 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001330 // Always print that we ran homogeneous space compation since this can cause jank.
1331 VLOG(heap) << "Ran heap homogeneous space compaction, "
1332 << " requested defragmentation "
1333 << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1334 << " performed defragmentation "
1335 << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1336 << " ignored homogeneous space compaction "
1337 << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1338 << " delayed count = "
1339 << count_delayed_oom_.LoadSequentiallyConsistent();
Zuo Wangf37a88b2014-07-10 04:26:41 -07001340 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001341 break;
Zuo Wangf37a88b2014-07-10 04:26:41 -07001342 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001343 case kAllocatorTypeNonMoving: {
1344 // Try to transition the heap if the allocation failure was due to the space being full.
1345 if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
1346 // If we aren't out of memory then the OOM was probably from the non moving space being
1347 // full. Attempt to disable compaction and turn the main space into a non moving space.
1348 DisableMovingGc();
1349 // If we are still a moving GC then something must have caused the transition to fail.
1350 if (IsMovingGc(collector_type_)) {
1351 MutexLock mu(self, *gc_complete_lock_);
1352 // If we couldn't disable moving GC, just throw OOME and return null.
1353 LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1354 << disable_moving_gc_count_;
1355 } else {
1356 LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1357 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1358 usable_size);
1359 }
1360 }
1361 break;
1362 }
1363 default: {
1364 // Do nothing for others allocators.
1365 }
Zuo Wangf37a88b2014-07-10 04:26:41 -07001366 }
1367 }
1368 // If the allocation hasn't succeeded by this point, throw an OOM error.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001369 if (ptr == nullptr) {
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001370 ThrowOutOfMemoryError(self, alloc_size, allocator);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001371 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001372 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001373}
1374
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001375void Heap::SetTargetHeapUtilization(float target) {
1376 DCHECK_GT(target, 0.0f); // asserted in Java code
1377 DCHECK_LT(target, 1.0f);
1378 target_utilization_ = target;
1379}
1380
Ian Rogers1d54e732013-05-02 21:10:01 -07001381size_t Heap::GetObjectsAllocated() const {
1382 size_t total = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001383 for (space::AllocSpace* space : alloc_spaces_) {
1384 total += space->GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001385 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001386 return total;
1387}
1388
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001389uint64_t Heap::GetObjectsAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001390 return GetObjectsFreedEver() + GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001391}
1392
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001393uint64_t Heap::GetBytesAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001394 return GetBytesFreedEver() + GetBytesAllocated();
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001395}
1396
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001397class InstanceCounter {
1398 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001399 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001400 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001401 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001402 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001403 static void Callback(mirror::Object* obj, void* arg)
1404 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1405 InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1406 mirror::Class* instance_class = obj->GetClass();
1407 CHECK(instance_class != nullptr);
1408 for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1409 if (instance_counter->use_is_assignable_from_) {
1410 if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) {
1411 ++instance_counter->counts_[i];
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001412 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001413 } else if (instance_class == instance_counter->classes_[i]) {
1414 ++instance_counter->counts_[i];
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001415 }
1416 }
1417 }
1418
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001419 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001420 const std::vector<mirror::Class*>& classes_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001421 bool use_is_assignable_from_;
1422 uint64_t* const counts_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001423 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001424};
1425
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001426void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001427 uint64_t* counts) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001428 // Can't do any GC in this function since this may move classes.
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001429 Thread* self = Thread::Current();
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001430 auto* old_cause = self->StartAssertNoThreadSuspension("CountInstances");
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001431 InstanceCounter counter(classes, use_is_assignable_from, counts);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001432 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1433 VisitObjects(InstanceCounter::Callback, &counter);
1434 self->EndAssertNoThreadSuspension(old_cause);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001435}
1436
Elliott Hughes3b78c942013-01-15 17:35:41 -08001437class InstanceCollector {
1438 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001439 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
Elliott Hughes3b78c942013-01-15 17:35:41 -08001440 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1441 : class_(c), max_count_(max_count), instances_(instances) {
1442 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001443 static void Callback(mirror::Object* obj, void* arg)
1444 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1445 DCHECK(arg != nullptr);
1446 InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
1447 mirror::Class* instance_class = obj->GetClass();
1448 if (instance_class == instance_collector->class_) {
1449 if (instance_collector->max_count_ == 0 ||
1450 instance_collector->instances_.size() < instance_collector->max_count_) {
1451 instance_collector->instances_.push_back(obj);
Elliott Hughes3b78c942013-01-15 17:35:41 -08001452 }
1453 }
1454 }
1455
1456 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001457 mirror::Class* class_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001458 uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001459 std::vector<mirror::Object*>& instances_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001460 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1461};
1462
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001463void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1464 std::vector<mirror::Object*>& instances) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001465 // Can't do any GC in this function since this may move classes.
Elliott Hughes3b78c942013-01-15 17:35:41 -08001466 Thread* self = Thread::Current();
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001467 auto* old_cause = self->StartAssertNoThreadSuspension("GetInstances");
Elliott Hughes3b78c942013-01-15 17:35:41 -08001468 InstanceCollector collector(c, max_count, instances);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001469 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1470 VisitObjects(&InstanceCollector::Callback, &collector);
1471 self->EndAssertNoThreadSuspension(old_cause);
Elliott Hughes3b78c942013-01-15 17:35:41 -08001472}
1473
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001474class ReferringObjectsFinder {
1475 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001476 ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1477 std::vector<mirror::Object*>& referring_objects)
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001478 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1479 : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1480 }
1481
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001482 static void Callback(mirror::Object* obj, void* arg)
1483 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1484 reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1485 }
1486
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001487 // For bitmap Visit.
1488 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1489 // annotalysis on visitors.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001490 void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001491 o->VisitReferences<true>(*this, VoidFunctor());
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001492 }
1493
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -07001494 // For Object::VisitReferences.
Mathieu Chartier407f7022014-02-18 14:37:05 -08001495 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
1496 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001497 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartier407f7022014-02-18 14:37:05 -08001498 if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1499 referring_objects_.push_back(obj);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001500 }
1501 }
1502
1503 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001504 mirror::Object* object_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001505 uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001506 std::vector<mirror::Object*>& referring_objects_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001507 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1508};
1509
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001510void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1511 std::vector<mirror::Object*>& referring_objects) {
Mathieu Chartier83c8ee02014-01-28 14:50:23 -08001512 // Can't do any GC in this function since this may move the object o.
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001513 Thread* self = Thread::Current();
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001514 auto* old_cause = self->StartAssertNoThreadSuspension("GetReferringObjects");
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001515 ReferringObjectsFinder finder(o, max_count, referring_objects);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001516 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1517 VisitObjects(&ReferringObjectsFinder::Callback, &finder);
1518 self->EndAssertNoThreadSuspension(old_cause);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001519}
1520
Ian Rogers30fab402012-01-23 15:43:46 -08001521void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001522 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1523 // last GC will not have necessarily been cleared.
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001524 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001525}
1526
Zuo Wangf37a88b2014-07-10 04:26:41 -07001527HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
1528 Thread* self = Thread::Current();
1529 // Inc requested homogeneous space compaction.
1530 count_requested_homogeneous_space_compaction_++;
1531 // Store performed homogeneous space compaction at a new request arrival.
1532 ThreadList* tl = Runtime::Current()->GetThreadList();
1533 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1534 Locks::mutator_lock_->AssertNotHeld(self);
1535 {
1536 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1537 MutexLock mu(self, *gc_complete_lock_);
1538 // Ensure there is only one GC at a time.
1539 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
1540 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
1541 // is non zero.
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001542 // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
Zuo Wangf37a88b2014-07-10 04:26:41 -07001543 // exit.
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001544 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
1545 !main_space_->CanMoveObjects()) {
Zuo Wangf37a88b2014-07-10 04:26:41 -07001546 return HomogeneousSpaceCompactResult::kErrorReject;
1547 }
1548 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
1549 }
1550 if (Runtime::Current()->IsShuttingDown(self)) {
1551 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1552 // cause objects to get finalized.
1553 FinishGC(self, collector::kGcTypeNone);
1554 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
1555 }
1556 // Suspend all threads.
1557 tl->SuspendAll();
1558 uint64_t start_time = NanoTime();
1559 // Launch compaction.
Mathieu Chartierb363f662014-07-16 13:28:58 -07001560 space::MallocSpace* to_space = main_space_backup_.release();
Zuo Wangf37a88b2014-07-10 04:26:41 -07001561 space::MallocSpace* from_space = main_space_;
1562 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1563 const uint64_t space_size_before_compaction = from_space->Size();
Mathieu Chartierb363f662014-07-16 13:28:58 -07001564 AddSpace(to_space);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001565 Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
1566 // Leave as prot read so that we can still run ROSAlloc verification on this space.
1567 from_space->GetMemMap()->Protect(PROT_READ);
1568 const uint64_t space_size_after_compaction = to_space->Size();
Mathieu Chartierb363f662014-07-16 13:28:58 -07001569 main_space_ = to_space;
1570 main_space_backup_.reset(from_space);
1571 RemoveSpace(from_space);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001572 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
1573 // Update performed homogeneous space compaction count.
1574 count_performed_homogeneous_space_compaction_++;
1575 // Print statics log and resume all threads.
1576 uint64_t duration = NanoTime() - start_time;
Mathieu Chartier91c2f712014-08-25 19:46:57 -07001577 VLOG(gc) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
Zuo Wangf37a88b2014-07-10 04:26:41 -07001578 << PrettySize(space_size_before_compaction) << " -> "
1579 << PrettySize(space_size_after_compaction) << " compact-ratio: "
1580 << std::fixed << static_cast<double>(space_size_after_compaction) /
1581 static_cast<double>(space_size_before_compaction);
1582 tl->ResumeAll();
1583 // Finish GC.
1584 reference_processor_.EnqueueClearedReferences(self);
1585 GrowForUtilization(semi_space_collector_);
1586 FinishGC(self, collector::kGcTypeFull);
1587 return HomogeneousSpaceCompactResult::kSuccess;
1588}
1589
1590
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001591void Heap::TransitionCollector(CollectorType collector_type) {
1592 if (collector_type == collector_type_) {
1593 return;
1594 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001595 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1596 << " -> " << static_cast<int>(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001597 uint64_t start_time = NanoTime();
Ian Rogers3e5cf302014-05-20 16:40:37 -07001598 uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001599 Runtime* const runtime = Runtime::Current();
1600 ThreadList* const tl = runtime->GetThreadList();
1601 Thread* const self = Thread::Current();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001602 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1603 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001604 // Busy wait until we can GC (StartGC can fail if we have a non-zero
1605 // compacting_gc_disable_count_, this should rarely occurs).
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001606 for (;;) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001607 {
1608 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1609 MutexLock mu(self, *gc_complete_lock_);
1610 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001611 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
Mathieu Chartiere4927f62014-08-23 13:56:03 -07001612 // Currently we only need a heap transition if we switch from a moving collector to a
1613 // non-moving one, or visa versa.
1614 const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
Mathieu Chartierb38d4832014-04-10 10:56:55 -07001615 // If someone else beat us to it and changed the collector before we could, exit.
1616 // This is safe to do before the suspend all since we set the collector_type_running_ before
1617 // we exit the loop. If another thread attempts to do the heap transition before we exit,
1618 // then it would get blocked on WaitForGcToCompleteLocked.
1619 if (collector_type == collector_type_) {
1620 return;
1621 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001622 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1623 if (!copying_transition || disable_moving_gc_count_ == 0) {
1624 // TODO: Not hard code in semi-space collector?
1625 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
1626 break;
1627 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001628 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001629 usleep(1000);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001630 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001631 if (runtime->IsShuttingDown(self)) {
Hiroshi Yamauchia6a8d142014-05-12 16:57:33 -07001632 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1633 // cause objects to get finalized.
1634 FinishGC(self, collector::kGcTypeNone);
1635 return;
1636 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001637 tl->SuspendAll();
1638 switch (collector_type) {
Mathieu Chartierb363f662014-07-16 13:28:58 -07001639 case kCollectorTypeSS: {
Mathieu Chartier31f44142014-04-08 14:40:03 -07001640 if (!IsMovingGc(collector_type_)) {
Mathieu Chartierb363f662014-07-16 13:28:58 -07001641 // Create the bump pointer space from the backup space.
1642 CHECK(main_space_backup_ != nullptr);
1643 std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
Mathieu Chartier31f44142014-04-08 14:40:03 -07001644 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
1645 // pointer space last transition it will be protected.
Mathieu Chartierb363f662014-07-16 13:28:58 -07001646 CHECK(mem_map != nullptr);
1647 mem_map->Protect(PROT_READ | PROT_WRITE);
1648 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
1649 mem_map.release());
1650 AddSpace(bump_pointer_space_);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001651 Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
Mathieu Chartierb363f662014-07-16 13:28:58 -07001652 // Use the now empty main space mem map for the bump pointer temp space.
1653 mem_map.reset(main_space_->ReleaseMemMap());
Mathieu Chartier00b59152014-07-25 10:13:51 -07001654 // Unset the pointers just in case.
1655 if (dlmalloc_space_ == main_space_) {
1656 dlmalloc_space_ = nullptr;
1657 } else if (rosalloc_space_ == main_space_) {
1658 rosalloc_space_ = nullptr;
1659 }
Mathieu Chartier2796a162014-07-25 11:50:47 -07001660 // Remove the main space so that we don't try to trim it, this doens't work for debug
1661 // builds since RosAlloc attempts to read the magic number from a protected page.
1662 RemoveSpace(main_space_);
Mathieu Chartierc5a83472014-07-23 18:45:17 -07001663 RemoveRememberedSet(main_space_);
Mathieu Chartier2796a162014-07-25 11:50:47 -07001664 delete main_space_; // Delete the space since it has been removed.
Mathieu Chartierc5a83472014-07-23 18:45:17 -07001665 main_space_ = nullptr;
Mathieu Chartier2796a162014-07-25 11:50:47 -07001666 RemoveRememberedSet(main_space_backup_.get());
1667 main_space_backup_.reset(nullptr); // Deletes the space.
Mathieu Chartierb363f662014-07-16 13:28:58 -07001668 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
1669 mem_map.release());
1670 AddSpace(temp_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001671 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001672 break;
1673 }
1674 case kCollectorTypeMS:
1675 // Fall through.
1676 case kCollectorTypeCMS: {
Mathieu Chartier31f44142014-04-08 14:40:03 -07001677 if (IsMovingGc(collector_type_)) {
Mathieu Chartierb363f662014-07-16 13:28:58 -07001678 CHECK(temp_space_ != nullptr);
1679 std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
1680 RemoveSpace(temp_space_);
1681 temp_space_ = nullptr;
Mathieu Chartier36dab362014-07-30 14:59:56 -07001682 mem_map->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartierb363f662014-07-16 13:28:58 -07001683 CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize, mem_map->Size(),
1684 mem_map->Size());
1685 mem_map.release();
Mathieu Chartier31f44142014-04-08 14:40:03 -07001686 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001687 AddSpace(main_space_);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001688 Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
Mathieu Chartierb363f662014-07-16 13:28:58 -07001689 mem_map.reset(bump_pointer_space_->ReleaseMemMap());
1690 RemoveSpace(bump_pointer_space_);
1691 bump_pointer_space_ = nullptr;
1692 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
Hiroshi Yamauchic1276c82014-08-07 10:27:17 -07001693 // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
1694 if (kIsDebugBuild && kUseRosAlloc) {
1695 mem_map->Protect(PROT_READ | PROT_WRITE);
1696 }
Mathieu Chartierb363f662014-07-16 13:28:58 -07001697 main_space_backup_.reset(CreateMallocSpaceFromMemMap(mem_map.get(), kDefaultInitialSize,
1698 mem_map->Size(), mem_map->Size(),
1699 name, true));
Hiroshi Yamauchic1276c82014-08-07 10:27:17 -07001700 if (kIsDebugBuild && kUseRosAlloc) {
1701 mem_map->Protect(PROT_NONE);
1702 }
Mathieu Chartierb363f662014-07-16 13:28:58 -07001703 mem_map.release();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001704 }
1705 break;
1706 }
1707 default: {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001708 LOG(FATAL) << "Attempted to transition to invalid collector type "
1709 << static_cast<size_t>(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001710 break;
1711 }
1712 }
1713 ChangeCollector(collector_type);
1714 tl->ResumeAll();
1715 // Can't call into java code with all threads suspended.
Mathieu Chartier308351a2014-06-15 12:39:02 -07001716 reference_processor_.EnqueueClearedReferences(self);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001717 uint64_t duration = NanoTime() - start_time;
Mathieu Chartierafe49982014-03-27 10:55:04 -07001718 GrowForUtilization(semi_space_collector_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001719 FinishGC(self, collector::kGcTypeFull);
Ian Rogers3e5cf302014-05-20 16:40:37 -07001720 int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001721 int32_t delta_allocated = before_allocated - after_allocated;
Mathieu Chartier19d46b42014-06-17 15:04:40 -07001722 std::string saved_str;
1723 if (delta_allocated >= 0) {
1724 saved_str = " saved at least " + PrettySize(delta_allocated);
1725 } else {
1726 saved_str = " expanded " + PrettySize(-delta_allocated);
1727 }
Mathieu Chartier91c2f712014-08-25 19:46:57 -07001728 VLOG(gc) << "Heap transition to " << process_state_ << " took "
Mathieu Chartier19d46b42014-06-17 15:04:40 -07001729 << PrettyDuration(duration) << saved_str;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001730}
1731
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001732void Heap::ChangeCollector(CollectorType collector_type) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001733 // TODO: Only do this with all mutators suspended to avoid races.
1734 if (collector_type != collector_type_) {
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001735 if (collector_type == kCollectorTypeMC) {
1736 // Don't allow mark compact unless support is compiled in.
1737 CHECK(kMarkCompactSupport);
1738 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001739 collector_type_ = collector_type;
1740 gc_plan_.clear();
1741 switch (collector_type_) {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001742 case kCollectorTypeCC: // Fall-through.
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001743 case kCollectorTypeMC: // Fall-through.
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001744 case kCollectorTypeSS: // Fall-through.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001745 case kCollectorTypeGSS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001746 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001747 if (use_tlab_) {
1748 ChangeAllocator(kAllocatorTypeTLAB);
1749 } else {
1750 ChangeAllocator(kAllocatorTypeBumpPointer);
1751 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001752 break;
1753 }
1754 case kCollectorTypeMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001755 gc_plan_.push_back(collector::kGcTypeSticky);
1756 gc_plan_.push_back(collector::kGcTypePartial);
1757 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001758 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001759 break;
1760 }
1761 case kCollectorTypeCMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001762 gc_plan_.push_back(collector::kGcTypeSticky);
1763 gc_plan_.push_back(collector::kGcTypePartial);
1764 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001765 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001766 break;
1767 }
1768 default: {
1769 LOG(FATAL) << "Unimplemented";
1770 }
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001771 }
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001772 if (IsGcConcurrent()) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001773 concurrent_start_bytes_ =
1774 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
1775 } else {
1776 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001777 }
1778 }
1779}
1780
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001781// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
Ian Rogers6fac4472014-02-25 17:01:10 -08001782class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001783 public:
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001784 explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, false, "zygote collector"),
Ian Rogers6fac4472014-02-25 17:01:10 -08001785 bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001786 }
1787
1788 void BuildBins(space::ContinuousSpace* space) {
1789 bin_live_bitmap_ = space->GetLiveBitmap();
1790 bin_mark_bitmap_ = space->GetMarkBitmap();
1791 BinContext context;
1792 context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
1793 context.collector_ = this;
1794 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1795 // Note: This requires traversing the space in increasing order of object addresses.
1796 bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
1797 // Add the last bin which spans after the last object to the end of the space.
1798 AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
1799 }
1800
1801 private:
1802 struct BinContext {
1803 uintptr_t prev_; // The end of the previous object.
1804 ZygoteCompactingCollector* collector_;
1805 };
1806 // Maps from bin sizes to locations.
1807 std::multimap<size_t, uintptr_t> bins_;
1808 // Live bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001809 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001810 // Mark bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001811 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001812
1813 static void Callback(mirror::Object* obj, void* arg)
1814 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1815 DCHECK(arg != nullptr);
1816 BinContext* context = reinterpret_cast<BinContext*>(arg);
1817 ZygoteCompactingCollector* collector = context->collector_;
1818 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
1819 size_t bin_size = object_addr - context->prev_;
1820 // Add the bin consisting of the end of the previous object to the start of the current object.
1821 collector->AddBin(bin_size, context->prev_);
1822 context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
1823 }
1824
1825 void AddBin(size_t size, uintptr_t position) {
1826 if (size != 0) {
1827 bins_.insert(std::make_pair(size, position));
1828 }
1829 }
1830
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001831 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001832 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
1833 // allocator.
1834 return false;
1835 }
1836
1837 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
1838 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
1839 size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08001840 mirror::Object* forward_address;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001841 // Find the smallest bin which we can move obj in.
1842 auto it = bins_.lower_bound(object_size);
1843 if (it == bins_.end()) {
1844 // No available space in the bins, place it in the target space instead (grows the zygote
1845 // space).
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08001846 size_t bytes_allocated;
Ian Rogers6fac4472014-02-25 17:01:10 -08001847 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001848 if (to_space_live_bitmap_ != nullptr) {
1849 to_space_live_bitmap_->Set(forward_address);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001850 } else {
1851 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
1852 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001853 }
1854 } else {
1855 size_t size = it->first;
1856 uintptr_t pos = it->second;
1857 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
1858 forward_address = reinterpret_cast<mirror::Object*>(pos);
1859 // Set the live and mark bits so that sweeping system weaks works properly.
1860 bin_live_bitmap_->Set(forward_address);
1861 bin_mark_bitmap_->Set(forward_address);
1862 DCHECK_GE(size, object_size);
1863 AddBin(size - object_size, pos + object_size); // Add a new bin with the remaining space.
1864 }
1865 // Copy the object over to its new location.
1866 memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -07001867 if (kUseBakerOrBrooksReadBarrier) {
1868 obj->AssertReadBarrierPointer();
1869 if (kUseBrooksReadBarrier) {
1870 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
1871 forward_address->SetReadBarrierPointer(forward_address);
1872 }
1873 forward_address->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -08001874 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001875 return forward_address;
1876 }
1877};
1878
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001879void Heap::UnBindBitmaps() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001880 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001881 for (const auto& space : GetContinuousSpaces()) {
1882 if (space->IsContinuousMemMapAllocSpace()) {
1883 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1884 if (alloc_space->HasBoundBitmaps()) {
1885 alloc_space->UnBindBitmaps();
1886 }
1887 }
1888 }
1889}
1890
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001891void Heap::PreZygoteFork() {
Mathieu Chartier1f3b5352014-02-03 14:00:42 -08001892 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
Ian Rogers81d425b2012-09-27 16:03:43 -07001893 Thread* self = Thread::Current();
1894 MutexLock mu(self, zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001895 // Try to see if we have any Zygote spaces.
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001896 if (HasZygoteSpace()) {
1897 LOG(WARNING) << __FUNCTION__ << " called when we already have a zygote space.";
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001898 return;
1899 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001900 VLOG(heap) << "Starting PreZygoteFork";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001901 // Trim the pages at the end of the non moving space.
1902 non_moving_space_->Trim();
Mathieu Chartier31f44142014-04-08 14:40:03 -07001903 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
1904 // there.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001905 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001906 const bool same_space = non_moving_space_ == main_space_;
Mathieu Chartier31f44142014-04-08 14:40:03 -07001907 if (kCompactZygote) {
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001908 // Can't compact if the non moving space is the same as the main space.
Mathieu Chartier31f44142014-04-08 14:40:03 -07001909 DCHECK(semi_space_collector_ != nullptr);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001910 // Temporarily disable rosalloc verification because the zygote
1911 // compaction will mess up the rosalloc internal metadata.
1912 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001913 ZygoteCompactingCollector zygote_collector(this);
1914 zygote_collector.BuildBins(non_moving_space_);
Mathieu Chartier50482232013-11-21 11:48:14 -08001915 // Create a new bump pointer space which we will compact into.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001916 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1917 non_moving_space_->Limit());
1918 // Compact the bump pointer space to a new zygote bump pointer space.
Mathieu Chartier31f44142014-04-08 14:40:03 -07001919 bool reset_main_space = false;
1920 if (IsMovingGc(collector_type_)) {
1921 zygote_collector.SetFromSpace(bump_pointer_space_);
1922 } else {
1923 CHECK(main_space_ != nullptr);
1924 // Copy from the main space.
1925 zygote_collector.SetFromSpace(main_space_);
1926 reset_main_space = true;
1927 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001928 zygote_collector.SetToSpace(&target_space);
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001929 zygote_collector.SetSwapSemiSpaces(false);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001930 zygote_collector.Run(kGcCauseCollectorTransition, false);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001931 if (reset_main_space) {
1932 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1933 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
1934 MemMap* mem_map = main_space_->ReleaseMemMap();
1935 RemoveSpace(main_space_);
Mathieu Chartier96bcd452014-06-17 09:50:02 -07001936 space::Space* old_main_space = main_space_;
Mathieu Chartier31f44142014-04-08 14:40:03 -07001937 CreateMainMallocSpace(mem_map, kDefaultInitialSize, mem_map->Size(), mem_map->Size());
Mathieu Chartier96bcd452014-06-17 09:50:02 -07001938 delete old_main_space;
Mathieu Chartier31f44142014-04-08 14:40:03 -07001939 AddSpace(main_space_);
1940 } else {
1941 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1942 }
1943 if (temp_space_ != nullptr) {
1944 CHECK(temp_space_->IsEmpty());
1945 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001946 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
1947 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001948 // Update the end and write out image.
1949 non_moving_space_->SetEnd(target_space.End());
1950 non_moving_space_->SetLimit(target_space.Limit());
Mathieu Chartier31f44142014-04-08 14:40:03 -07001951 VLOG(heap) << "Zygote space size " << non_moving_space_->Size() << " bytes";
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001952 }
Mathieu Chartier6a7824d2014-08-22 14:53:04 -07001953 // Change the collector to the post zygote one.
Mathieu Chartier31f44142014-04-08 14:40:03 -07001954 ChangeCollector(foreground_collector_type_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001955 // Save the old space so that we can remove it after we complete creating the zygote space.
1956 space::MallocSpace* old_alloc_space = non_moving_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001957 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001958 // the remaining available space.
1959 // Remove the old space before creating the zygote space since creating the zygote space sets
1960 // the old alloc space's bitmaps to nullptr.
1961 RemoveSpace(old_alloc_space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001962 if (collector::SemiSpace::kUseRememberedSet) {
1963 // Sanity bound check.
1964 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
1965 // Remove the remembered set for the now zygote space (the old
1966 // non-moving space). Note now that we have compacted objects into
1967 // the zygote space, the data in the remembered set is no longer
1968 // needed. The zygote space will instead have a mod-union table
1969 // from this point on.
1970 RemoveRememberedSet(old_alloc_space);
1971 }
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001972 zygote_space_ = old_alloc_space->CreateZygoteSpace("alloc space", low_memory_mode_,
1973 &non_moving_space_);
Mathieu Chartierb363f662014-07-16 13:28:58 -07001974 CHECK(!non_moving_space_->CanMoveObjects());
1975 if (same_space) {
1976 main_space_ = non_moving_space_;
1977 SetSpaceAsDefault(main_space_);
1978 }
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001979 delete old_alloc_space;
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001980 CHECK(HasZygoteSpace()) << "Failed creating zygote space";
1981 AddSpace(zygote_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001982 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
1983 AddSpace(non_moving_space_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001984 // Create the zygote space mod union table.
1985 accounting::ModUnionTable* mod_union_table =
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001986 new accounting::ModUnionTableCardCache("zygote space mod-union table", this,
1987 zygote_space_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001988 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001989 // Set all the cards in the mod-union table since we don't know which objects contain references
1990 // to large objects.
1991 mod_union_table->SetCards();
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001992 AddModUnionTable(mod_union_table);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001993 if (collector::SemiSpace::kUseRememberedSet) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001994 // Add a new remembered set for the post-zygote non-moving space.
1995 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
1996 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
1997 non_moving_space_);
1998 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
1999 << "Failed to create post-zygote non-moving space remembered set";
2000 AddRememberedSet(post_zygote_non_moving_space_rem_set);
2001 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002002}
2003
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002004void Heap::FlushAllocStack() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002005 MarkAllocStackAsLive(allocation_stack_.get());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002006 allocation_stack_->Reset();
2007}
2008
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002009void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2010 accounting::ContinuousSpaceBitmap* bitmap2,
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07002011 accounting::LargeObjectBitmap* large_objects,
Ian Rogers1d54e732013-05-02 21:10:01 -07002012 accounting::ObjectStack* stack) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002013 DCHECK(bitmap1 != nullptr);
2014 DCHECK(bitmap2 != nullptr);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002015 mirror::Object** limit = stack->End();
2016 for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
2017 const mirror::Object* obj = *it;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002018 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2019 if (bitmap1->HasAddress(obj)) {
2020 bitmap1->Set(obj);
2021 } else if (bitmap2->HasAddress(obj)) {
2022 bitmap2->Set(obj);
2023 } else {
2024 large_objects->Set(obj);
2025 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07002026 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07002027 }
2028}
2029
Mathieu Chartier590fee92013-09-13 13:46:47 -07002030void Heap::SwapSemiSpaces() {
Mathieu Chartier31f44142014-04-08 14:40:03 -07002031 CHECK(bump_pointer_space_ != nullptr);
2032 CHECK(temp_space_ != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002033 std::swap(bump_pointer_space_, temp_space_);
2034}
2035
2036void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
Zuo Wangf37a88b2014-07-10 04:26:41 -07002037 space::ContinuousMemMapAllocSpace* source_space,
2038 GcCause gc_cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002039 CHECK(kMovingCollector);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002040 if (target_space != source_space) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002041 // Don't swap spaces since this isn't a typical semi space collection.
2042 semi_space_collector_->SetSwapSemiSpaces(false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002043 semi_space_collector_->SetFromSpace(source_space);
2044 semi_space_collector_->SetToSpace(target_space);
Zuo Wangf37a88b2014-07-10 04:26:41 -07002045 semi_space_collector_->Run(gc_cause, false);
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002046 } else {
2047 CHECK(target_space->IsBumpPointerSpace())
2048 << "In-place compaction is only supported for bump pointer spaces";
2049 mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2050 mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002051 }
2052}
Anwar Ghuloum67f99412013-08-12 14:19:48 -07002053
Ian Rogers1d54e732013-05-02 21:10:01 -07002054collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
2055 bool clear_soft_references) {
Ian Rogers81d425b2012-09-27 16:03:43 -07002056 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002057 Runtime* runtime = Runtime::Current();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002058 // If the heap can't run the GC, silently fail and return that no GC was run.
2059 switch (gc_type) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002060 case collector::kGcTypePartial: {
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002061 if (!HasZygoteSpace()) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002062 return collector::kGcTypeNone;
2063 }
2064 break;
2065 }
2066 default: {
2067 // Other GC types don't have any special cases which makes them not runnable. The main case
2068 // here is full GC.
2069 }
2070 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002071 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Ian Rogers81d425b2012-09-27 16:03:43 -07002072 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers120f1c72012-09-28 17:17:10 -07002073 if (self->IsHandlingStackOverflow()) {
2074 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
2075 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002076 bool compacting_gc;
2077 {
2078 gc_complete_lock_->AssertNotHeld(self);
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002079 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002080 MutexLock mu(self, *gc_complete_lock_);
2081 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002082 WaitForGcToCompleteLocked(gc_cause, self);
Mathieu Chartier31f44142014-04-08 14:40:03 -07002083 compacting_gc = IsMovingGc(collector_type_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002084 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2085 if (compacting_gc && disable_moving_gc_count_ != 0) {
2086 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2087 return collector::kGcTypeNone;
2088 }
2089 collector_type_running_ = collector_type_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002090 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002091
Mathieu Chartier590fee92013-09-13 13:46:47 -07002092 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2093 ++runtime->GetStats()->gc_for_alloc_count;
2094 ++self->GetStats()->gc_for_alloc_count;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002095 }
Ian Rogers1d54e732013-05-02 21:10:01 -07002096 uint64_t gc_start_time_ns = NanoTime();
Mathieu Chartier65db8802012-11-20 12:36:46 -08002097 uint64_t gc_start_size = GetBytesAllocated();
2098 // Approximate allocation rate in bytes / second.
Ian Rogers1d54e732013-05-02 21:10:01 -07002099 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002100 // Back to back GCs can cause 0 ms of wait time in between GC invocations.
2101 if (LIKELY(ms_delta != 0)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002102 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
Mathieu Chartier65db8802012-11-20 12:36:46 -08002103 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
2104 }
2105
Ian Rogers1d54e732013-05-02 21:10:01 -07002106 DCHECK_LT(gc_type, collector::kGcTypeMax);
2107 DCHECK_NE(gc_type, collector::kGcTypeNone);
Anwar Ghuloum67f99412013-08-12 14:19:48 -07002108
Mathieu Chartier590fee92013-09-13 13:46:47 -07002109 collector::GarbageCollector* collector = nullptr;
Mathieu Chartier50482232013-11-21 11:48:14 -08002110 // TODO: Clean this up.
Mathieu Chartier1d27b342014-01-28 12:51:09 -08002111 if (compacting_gc) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002112 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2113 current_allocator_ == kAllocatorTypeTLAB);
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002114 switch (collector_type_) {
2115 case kCollectorTypeSS:
2116 // Fall-through.
2117 case kCollectorTypeGSS:
2118 semi_space_collector_->SetFromSpace(bump_pointer_space_);
2119 semi_space_collector_->SetToSpace(temp_space_);
2120 semi_space_collector_->SetSwapSemiSpaces(true);
2121 collector = semi_space_collector_;
2122 break;
2123 case kCollectorTypeCC:
2124 collector = concurrent_copying_collector_;
2125 break;
2126 case kCollectorTypeMC:
2127 mark_compact_collector_->SetSpace(bump_pointer_space_);
2128 collector = mark_compact_collector_;
2129 break;
2130 default:
2131 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002132 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07002133 if (collector != mark_compact_collector_) {
2134 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2135 CHECK(temp_space_->IsEmpty());
2136 }
2137 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002138 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2139 current_allocator_ == kAllocatorTypeDlMalloc) {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002140 collector = FindCollectorByGcType(gc_type);
Mathieu Chartier50482232013-11-21 11:48:14 -08002141 } else {
2142 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002143 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002144 CHECK(collector != nullptr)
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002145 << "Could not find garbage collector with collector_type="
2146 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002147 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002148 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2149 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07002150 RequestHeapTrim();
Mathieu Chartier39e32612013-11-12 16:28:05 -08002151 // Enqueue cleared references.
Mathieu Chartier308351a2014-06-15 12:39:02 -07002152 reference_processor_.EnqueueClearedReferences(self);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002153 // Grow the heap so that we know when to perform the next GC.
Mathieu Chartierafe49982014-03-27 10:55:04 -07002154 GrowForUtilization(collector);
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002155 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2156 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002157 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002158 // (mutator time blocked >= long_pause_log_threshold_).
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002159 bool log_gc = gc_cause == kGcCauseExplicit;
2160 if (!log_gc && CareAboutPauseTimes()) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002161 // GC for alloc pauses the allocating thread, so consider it as a pause.
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002162 log_gc = duration > long_gc_log_threshold_ ||
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002163 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002164 for (uint64_t pause : pause_times) {
2165 log_gc = log_gc || pause >= long_pause_log_threshold_;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002166 }
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002167 }
2168 if (log_gc) {
2169 const size_t percent_free = GetPercentFree();
2170 const size_t current_heap_size = GetBytesAllocated();
2171 const size_t total_memory = GetTotalMemory();
2172 std::ostringstream pause_string;
2173 for (size_t i = 0; i < pause_times.size(); ++i) {
2174 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002175 << ((i != pause_times.size() - 1) ? "," : "");
Mathieu Chartiere53225c2013-08-19 10:59:11 -07002176 }
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002177 LOG(INFO) << gc_cause << " " << collector->GetName()
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002178 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
2179 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2180 << current_gc_iteration_.GetFreedLargeObjects() << "("
2181 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07002182 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2183 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2184 << " total " << PrettyDuration((duration / 1000) * 1000);
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002185 VLOG(heap) << ConstDumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002186 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002187 FinishGC(self, gc_type);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07002188 // Inform DDMS that a GC completed.
Ian Rogers15bf2d32012-08-28 17:33:04 -07002189 Dbg::GcDidFinish();
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07002190 return gc_type;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002191}
Mathieu Chartiera6399032012-06-11 18:49:50 -07002192
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002193void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2194 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002195 collector_type_running_ = kCollectorTypeNone;
2196 if (gc_type != collector::kGcTypeNone) {
2197 last_gc_type_ = gc_type;
2198 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002199 // Wake anyone who may have been waiting for the GC to complete.
2200 gc_complete_cond_->Broadcast(self);
2201}
2202
Mathieu Chartier815873e2014-02-13 18:02:13 -08002203static void RootMatchesObjectVisitor(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
2204 RootType /*root_type*/) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002205 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
Mathieu Chartier815873e2014-02-13 18:02:13 -08002206 if (*root == obj) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002207 LOG(INFO) << "Object " << obj << " is a root";
2208 }
2209}
2210
2211class ScanVisitor {
2212 public:
Brian Carlstromdf629502013-07-17 22:39:56 -07002213 void operator()(const mirror::Object* obj) const {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002214 LOG(ERROR) << "Would have rescanned object " << obj;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002215 }
2216};
2217
Ian Rogers1d54e732013-05-02 21:10:01 -07002218// Verify a reference from an object.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002219class VerifyReferenceVisitor {
2220 public:
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002221 explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
Ian Rogers1d54e732013-05-02 21:10:01 -07002222 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002223 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
Ian Rogers1d54e732013-05-02 21:10:01 -07002224
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002225 size_t GetFailureCount() const {
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07002226 return fail_count_->LoadSequentiallyConsistent();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002227 }
2228
Mathieu Chartier407f7022014-02-18 14:37:05 -08002229 void operator()(mirror::Class* klass, mirror::Reference* ref) const
2230 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002231 if (verify_referent_) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002232 VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002233 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08002234 }
2235
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -07002236 void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
Mathieu Chartier407f7022014-02-18 14:37:05 -08002237 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002238 VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
Mathieu Chartier407f7022014-02-18 14:37:05 -08002239 }
2240
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002241 bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
2242 return heap_->IsLiveObjectLocked(obj, true, false, true);
2243 }
2244
2245 static void VerifyRootCallback(mirror::Object** root, void* arg, uint32_t thread_id,
2246 RootType root_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2247 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
2248 if (!visitor->VerifyReference(nullptr, *root, MemberOffset(0))) {
2249 LOG(ERROR) << "Root " << *root << " is dead with type " << PrettyTypeOf(*root)
2250 << " thread_id= " << thread_id << " root_type= " << root_type;
2251 }
2252 }
2253
2254 private:
Mathieu Chartier407f7022014-02-18 14:37:05 -08002255 // TODO: Fix the no thread safety analysis.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002256 // Returns false on failure.
2257 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002258 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002259 if (ref == nullptr || IsLive(ref)) {
2260 // Verify that the reference is live.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002261 return true;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002262 }
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07002263 if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002264 // Print message on only on first failure to prevent spam.
2265 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002266 }
2267 if (obj != nullptr) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002268 // Only do this part for non roots.
Ian Rogers1d54e732013-05-02 21:10:01 -07002269 accounting::CardTable* card_table = heap_->GetCardTable();
2270 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2271 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002272 byte* card_addr = card_table->CardFromAddr(obj);
2273 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2274 << offset << "\n card value = " << static_cast<int>(*card_addr);
2275 if (heap_->IsValidObjectAddress(obj->GetClass())) {
2276 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
2277 } else {
2278 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002279 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002280
Mathieu Chartierb363f662014-07-16 13:28:58 -07002281 // Attempt to find the class inside of the recently freed objects.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002282 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2283 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2284 space::MallocSpace* space = ref_space->AsMallocSpace();
2285 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2286 if (ref_class != nullptr) {
2287 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2288 << PrettyClass(ref_class);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002289 } else {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002290 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002291 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002292 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002293
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002294 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2295 ref->GetClass()->IsClass()) {
2296 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
2297 } else {
2298 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2299 << ") is not a valid heap address";
2300 }
2301
2302 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
2303 void* cover_begin = card_table->AddrFromCard(card_addr);
2304 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2305 accounting::CardTable::kCardSize);
2306 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2307 << "-" << cover_end;
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002308 accounting::ContinuousSpaceBitmap* bitmap =
2309 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002310
2311 if (bitmap == nullptr) {
2312 LOG(ERROR) << "Object " << obj << " has no bitmap";
Mathieu Chartier4e305412014-02-19 10:54:44 -08002313 if (!VerifyClassClass(obj->GetClass())) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002314 LOG(ERROR) << "Object " << obj << " failed class verification!";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002315 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002316 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07002317 // Print out how the object is live.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002318 if (bitmap->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002319 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2320 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002321 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002322 LOG(ERROR) << "Object " << obj << " found in allocation stack";
2323 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002324 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002325 LOG(ERROR) << "Object " << obj << " found in live stack";
2326 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002327 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2328 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2329 }
2330 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2331 LOG(ERROR) << "Ref " << ref << " found in live stack";
2332 }
Ian Rogers1d54e732013-05-02 21:10:01 -07002333 // Attempt to see if the card table missed the reference.
2334 ScanVisitor scan_visitor;
2335 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
2336 card_table->Scan(bitmap, byte_cover_begin,
Mathieu Chartier184e3222013-08-03 14:02:57 -07002337 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002338 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002339
2340 // Search to see if any of the roots reference our object.
2341 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
Mathieu Chartier893263b2014-03-04 11:07:42 -08002342 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002343
2344 // Search to see if any of the roots reference our reference.
2345 arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
Mathieu Chartier893263b2014-03-04 11:07:42 -08002346 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002347 }
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002348 return false;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002349 }
2350
Ian Rogers1d54e732013-05-02 21:10:01 -07002351 Heap* const heap_;
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002352 Atomic<size_t>* const fail_count_;
2353 const bool verify_referent_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002354};
2355
Ian Rogers1d54e732013-05-02 21:10:01 -07002356// Verify all references within an object, for use with HeapBitmap::Visit.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002357class VerifyObjectVisitor {
2358 public:
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002359 explicit VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2360 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002361 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002362
Mathieu Chartier590fee92013-09-13 13:46:47 -07002363 void operator()(mirror::Object* obj) const
Ian Rogersb726dcb2012-09-05 08:57:23 -07002364 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002365 // Note: we are verifying the references in obj but not obj itself, this is because obj must
2366 // be live or else how did we find it in the live bitmap?
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002367 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002368 // The class doesn't count as a reference but we should verify it anyways.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002369 obj->VisitReferences<true>(visitor, visitor);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002370 }
2371
Mathieu Chartier590fee92013-09-13 13:46:47 -07002372 static void VisitCallback(mirror::Object* obj, void* arg)
2373 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2374 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
2375 visitor->operator()(obj);
2376 }
2377
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002378 size_t GetFailureCount() const {
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07002379 return fail_count_->LoadSequentiallyConsistent();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002380 }
2381
2382 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002383 Heap* const heap_;
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002384 Atomic<size_t>* const fail_count_;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002385 const bool verify_referent_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002386};
2387
Mathieu Chartierc1790162014-05-23 10:54:50 -07002388void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2389 // Slow path, the allocation stack push back must have already failed.
2390 DCHECK(!allocation_stack_->AtomicPushBack(*obj));
2391 do {
2392 // TODO: Add handle VerifyObject.
2393 StackHandleScope<1> hs(self);
2394 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2395 // Push our object into the reserve region of the allocaiton stack. This is only required due
2396 // to heap verification requiring that roots are live (either in the live bitmap or in the
2397 // allocation stack).
2398 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2399 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2400 } while (!allocation_stack_->AtomicPushBack(*obj));
2401}
2402
2403void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2404 // Slow path, the allocation stack push back must have already failed.
2405 DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
2406 mirror::Object** start_address;
2407 mirror::Object** end_address;
2408 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
2409 &end_address)) {
2410 // TODO: Add handle VerifyObject.
2411 StackHandleScope<1> hs(self);
2412 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2413 // Push our object into the reserve region of the allocaiton stack. This is only required due
2414 // to heap verification requiring that roots are live (either in the live bitmap or in the
2415 // allocation stack).
2416 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2417 // Push into the reserve allocation stack.
2418 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2419 }
2420 self->SetThreadLocalAllocationStack(start_address, end_address);
2421 // Retry on the new thread-local allocation stack.
2422 CHECK(self->PushOnThreadLocalAllocationStack(*obj)); // Must succeed.
2423}
2424
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002425// Must do this with mutators suspended since we are directly accessing the allocation stacks.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002426size_t Heap::VerifyHeapReferences(bool verify_referents) {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002427 Thread* self = Thread::Current();
2428 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002429 // Lets sort our allocation stacks so that we can efficiently binary search them.
Ian Rogers1d54e732013-05-02 21:10:01 -07002430 allocation_stack_->Sort();
2431 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002432 // Since we sorted the allocation stack content, need to revoke all
2433 // thread-local allocation stacks.
2434 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002435 Atomic<size_t> fail_count_(0);
2436 VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002437 // Verify objects in the allocation stack since these will be objects which were:
2438 // 1. Allocated prior to the GC (pre GC verification).
2439 // 2. Allocated during the GC (pre sweep GC verification).
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002440 // We don't want to verify the objects in the live stack since they themselves may be
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002441 // pointing to dead objects if they are not reachable.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002442 VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
2443 // Verify the roots:
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002444 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRootCallback, &visitor);
2445 if (visitor.GetFailureCount() > 0) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002446 // Dump mod-union tables.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002447 for (const auto& table_pair : mod_union_tables_) {
2448 accounting::ModUnionTable* mod_union_table = table_pair.second;
2449 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
2450 }
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002451 // Dump remembered sets.
2452 for (const auto& table_pair : remembered_sets_) {
2453 accounting::RememberedSet* remembered_set = table_pair.second;
2454 remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
2455 }
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07002456 DumpSpaces(LOG(ERROR));
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002457 }
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002458 return visitor.GetFailureCount();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002459}
2460
2461class VerifyReferenceCardVisitor {
2462 public:
2463 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
2464 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
2465 Locks::heap_bitmap_lock_)
Ian Rogers1d54e732013-05-02 21:10:01 -07002466 : heap_(heap), failed_(failed) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002467 }
2468
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002469 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
2470 // annotalysis on visitors.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002471 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
2472 NO_THREAD_SAFETY_ANALYSIS {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07002473 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002474 // Filter out class references since changing an object's class does not mark the card as dirty.
2475 // Also handles large objects, since the only reference they hold is a class reference.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002476 if (ref != nullptr && !ref->IsClass()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002477 accounting::CardTable* card_table = heap_->GetCardTable();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002478 // If the object is not dirty and it is referencing something in the live stack other than
2479 // class, then it must be on a dirty card.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07002480 if (!card_table->AddrIsInCardTable(obj)) {
2481 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
2482 *failed_ = true;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002483 } else if (!card_table->IsDirty(obj)) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002484 // TODO: Check mod-union tables.
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002485 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
2486 // kCardDirty - 1 if it didnt get touched since we aged it.
Ian Rogers1d54e732013-05-02 21:10:01 -07002487 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier407f7022014-02-18 14:37:05 -08002488 if (live_stack->ContainsSorted(ref)) {
2489 if (live_stack->ContainsSorted(obj)) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002490 LOG(ERROR) << "Object " << obj << " found in live stack";
2491 }
2492 if (heap_->GetLiveBitmap()->Test(obj)) {
2493 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2494 }
2495 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
2496 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
2497
2498 // Print which field of the object is dead.
2499 if (!obj->IsObjectArray()) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002500 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002501 CHECK(klass != NULL);
Ian Rogersef7d42f2014-01-06 12:55:46 -08002502 mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
2503 : klass->GetIFields();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002504 CHECK(fields != NULL);
2505 for (int32_t i = 0; i < fields->GetLength(); ++i) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002506 mirror::ArtField* cur = fields->Get(i);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002507 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
2508 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
2509 << PrettyField(cur);
2510 break;
2511 }
2512 }
2513 } else {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002514 mirror::ObjectArray<mirror::Object>* object_array =
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002515 obj->AsObjectArray<mirror::Object>();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002516 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
2517 if (object_array->Get(i) == ref) {
2518 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
2519 }
2520 }
2521 }
2522
2523 *failed_ = true;
2524 }
2525 }
2526 }
2527 }
2528
2529 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002530 Heap* const heap_;
2531 bool* const failed_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002532};
2533
2534class VerifyLiveStackReferences {
2535 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07002536 explicit VerifyLiveStackReferences(Heap* heap)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002537 : heap_(heap),
Brian Carlstrom93ba8932013-07-17 21:31:49 -07002538 failed_(false) {}
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002539
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002540 void operator()(mirror::Object* obj) const
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002541 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2542 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07002543 obj->VisitReferences<true>(visitor, VoidFunctor());
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002544 }
2545
2546 bool Failed() const {
2547 return failed_;
2548 }
2549
2550 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002551 Heap* const heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002552 bool failed_;
2553};
2554
2555bool Heap::VerifyMissingCardMarks() {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002556 Thread* self = Thread::Current();
2557 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002558 // We need to sort the live stack since we binary search it.
Ian Rogers1d54e732013-05-02 21:10:01 -07002559 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002560 // Since we sorted the allocation stack content, need to revoke all
2561 // thread-local allocation stacks.
2562 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002563 VerifyLiveStackReferences visitor(this);
2564 GetLiveBitmap()->Visit(visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002565 // We can verify objects in the live stack since none of these should reference dead objects.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002566 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002567 if (!kUseThreadLocalAllocationStack || *it != nullptr) {
2568 visitor(*it);
2569 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002570 }
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07002571 return !visitor.Failed();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002572}
2573
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002574void Heap::SwapStacks(Thread* self) {
2575 if (kUseThreadLocalAllocationStack) {
2576 live_stack_->AssertAllZero();
2577 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -08002578 allocation_stack_.swap(live_stack_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002579}
2580
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002581void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002582 // This must be called only during the pause.
2583 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
2584 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
2585 MutexLock mu2(self, *Locks::thread_list_lock_);
2586 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2587 for (Thread* t : thread_list) {
2588 t->RevokeThreadLocalAllocationStack();
2589 }
2590}
2591
Ian Rogers68d8b422014-07-17 11:09:10 -07002592void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
2593 if (kIsDebugBuild) {
2594 if (rosalloc_space_ != nullptr) {
2595 rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
2596 }
2597 if (bump_pointer_space_ != nullptr) {
2598 bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
2599 }
2600 }
2601}
2602
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07002603void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
2604 if (kIsDebugBuild) {
2605 if (bump_pointer_space_ != nullptr) {
2606 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
2607 }
2608 }
2609}
2610
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002611accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
2612 auto it = mod_union_tables_.find(space);
2613 if (it == mod_union_tables_.end()) {
2614 return nullptr;
2615 }
2616 return it->second;
2617}
2618
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002619accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
2620 auto it = remembered_sets_.find(space);
2621 if (it == remembered_sets_.end()) {
2622 return nullptr;
2623 }
2624 return it->second;
2625}
2626
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002627void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002628 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Ian Rogers1d54e732013-05-02 21:10:01 -07002629 // Clear cards and keep track of cards cleared in the mod-union table.
Mathieu Chartier02e25112013-08-14 16:14:24 -07002630 for (const auto& space : continuous_spaces_) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002631 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002632 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002633 if (table != nullptr) {
2634 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
2635 "ImageModUnionClearCards";
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002636 TimingLogger::ScopedTiming t(name, timings);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002637 table->ClearCards();
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002638 } else if (use_rem_sets && rem_set != nullptr) {
2639 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
2640 << static_cast<int>(collector_type_);
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002641 TimingLogger::ScopedTiming t("AllocSpaceRemSetClearCards", timings);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002642 rem_set->ClearCards();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002643 } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002644 TimingLogger::ScopedTiming t("AllocSpaceClearCards", timings);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08002645 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
2646 // were dirty before the GC started.
Mathieu Chartierbd0a6532014-02-27 11:14:21 -08002647 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
2648 // -> clean(cleaning thread).
Mathieu Chartier590fee92013-09-13 13:46:47 -07002649 // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002650 // roots and then we scan / update mod union tables after. We will always scan either card.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002651 // If we end up with the non aged card, we scan it it in the pause.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002652 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
2653 VoidFunctor());
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07002654 }
2655 }
2656}
2657
Mathieu Chartier407f7022014-02-18 14:37:05 -08002658static void IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*, void*) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002659}
2660
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002661void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
2662 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002663 TimingLogger* const timings = current_gc_iteration_.GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002664 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002665 if (verify_pre_gc_heap_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002666 TimingLogger::ScopedTiming t("(Paused)PreGcVerifyHeapReferences", timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002667 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002668 size_t failures = VerifyHeapReferences();
2669 if (failures > 0) {
2670 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
2671 << " failures";
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002672 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002673 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002674 // Check that all objects which reference things in the live stack are on dirty cards.
2675 if (verify_missing_card_marks_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002676 TimingLogger::ScopedTiming t("(Paused)PreGcVerifyMissingCardMarks", timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002677 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2678 SwapStacks(self);
2679 // Sort the live stack so that we can quickly binary search it later.
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -07002680 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
2681 << " missing card mark verification failed\n" << DumpSpaces();
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002682 SwapStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002683 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002684 if (verify_mod_union_table_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002685 TimingLogger::ScopedTiming t("(Paused)PreGcVerifyModUnionTables", timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002686 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002687 for (const auto& table_pair : mod_union_tables_) {
2688 accounting::ModUnionTable* mod_union_table = table_pair.second;
Mathieu Chartier407f7022014-02-18 14:37:05 -08002689 mod_union_table->UpdateAndMarkReferences(IdentityMarkHeapReferenceCallback, nullptr);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002690 mod_union_table->Verify();
2691 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002692 }
2693}
2694
2695void Heap::PreGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07002696 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002697 collector::GarbageCollector::ScopedPause pause(gc);
2698 PreGcVerificationPaused(gc);
2699 }
2700}
2701
2702void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
2703 // TODO: Add a new runtime option for this?
2704 if (verify_pre_gc_rosalloc_) {
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002705 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002706 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002707}
2708
Ian Rogers1d54e732013-05-02 21:10:01 -07002709void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002710 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002711 TimingLogger* const timings = current_gc_iteration_.GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002712 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002713 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
2714 // reachable objects.
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002715 if (verify_pre_sweeping_heap_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002716 TimingLogger::ScopedTiming t("(Paused)PostSweepingVerifyHeapReferences", timings);
Ian Rogers1d54e732013-05-02 21:10:01 -07002717 CHECK_NE(self->GetState(), kRunnable);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002718 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2719 // Swapping bound bitmaps does nothing.
2720 gc->SwapBitmaps();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002721 // Pass in false since concurrent reference processing can mean that the reference referents
2722 // may point to dead objects at the point which PreSweepingGcVerification is called.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002723 size_t failures = VerifyHeapReferences(false);
2724 if (failures > 0) {
2725 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
2726 << " failures";
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002727 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002728 gc->SwapBitmaps();
2729 }
2730 if (verify_pre_sweeping_rosalloc_) {
2731 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
2732 }
2733}
2734
2735void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
2736 // Only pause if we have to do some verification.
2737 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002738 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002739 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002740 if (verify_system_weaks_) {
2741 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2742 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
2743 mark_sweep->VerifySystemWeaks();
2744 }
2745 if (verify_post_gc_rosalloc_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002746 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002747 }
2748 if (verify_post_gc_heap_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002749 TimingLogger::ScopedTiming t("(Paused)PostGcVerifyHeapReferences", timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002750 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002751 size_t failures = VerifyHeapReferences();
2752 if (failures > 0) {
2753 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
2754 << " failures";
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002755 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002756 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002757}
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002758
Ian Rogers1d54e732013-05-02 21:10:01 -07002759void Heap::PostGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002760 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
2761 collector::GarbageCollector::ScopedPause pause(gc);
Mathieu Chartierd35326f2014-08-18 15:02:59 -07002762 PostGcVerificationPaused(gc);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002763 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07002764}
2765
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002766void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002767 TimingLogger::ScopedTiming t(name, timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002768 for (const auto& space : continuous_spaces_) {
2769 if (space->IsRosAllocSpace()) {
2770 VLOG(heap) << name << " : " << space->GetName();
2771 space->AsRosAllocSpace()->Verify();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002772 }
2773 }
2774}
2775
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002776collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002777 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002778 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002779 return WaitForGcToCompleteLocked(cause, self);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002780}
2781
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002782collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002783 collector::GcType last_gc_type = collector::kGcTypeNone;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002784 uint64_t wait_start = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002785 while (collector_type_running_ != kCollectorTypeNone) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002786 ATRACE_BEGIN("GC: Wait For Completion");
2787 // We must wait, change thread state then sleep on gc_complete_cond_;
2788 gc_complete_cond_->Wait(self);
2789 last_gc_type = last_gc_type_;
Mathieu Chartier752a0e62013-06-27 11:03:27 -07002790 ATRACE_END();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002791 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002792 uint64_t wait_time = NanoTime() - wait_start;
2793 total_wait_time_ += wait_time;
2794 if (wait_time > long_pause_log_threshold_) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002795 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
2796 << " for cause " << cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002797 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07002798 return last_gc_type;
Carl Shapiro69759ea2011-07-21 18:13:35 -07002799}
2800
Elliott Hughesc967f782012-04-16 10:23:15 -07002801void Heap::DumpForSigQuit(std::ostream& os) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002802 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002803 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -07002804 DumpGcPerformanceInfo(os);
Elliott Hughesc967f782012-04-16 10:23:15 -07002805}
2806
2807size_t Heap::GetPercentFree() {
Mathieu Chartierd30e1d62014-06-09 13:25:22 -07002808 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
Elliott Hughesc967f782012-04-16 10:23:15 -07002809}
2810
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08002811void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002812 if (max_allowed_footprint > GetMaxMemory()) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002813 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002814 << PrettySize(GetMaxMemory());
2815 max_allowed_footprint = GetMaxMemory();
2816 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07002817 max_allowed_footprint_ = max_allowed_footprint;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07002818}
2819
Mathieu Chartier590fee92013-09-13 13:46:47 -07002820bool Heap::IsMovableObject(const mirror::Object* obj) const {
2821 if (kMovingCollector) {
Mathieu Chartier31f44142014-04-08 14:40:03 -07002822 space::Space* space = FindContinuousSpaceFromObject(obj, true);
2823 if (space != nullptr) {
2824 // TODO: Check large object?
2825 return space->CanMoveObjects();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002826 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002827 }
2828 return false;
2829}
2830
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002831void Heap::UpdateMaxNativeFootprint() {
Ian Rogers3e5cf302014-05-20 16:40:37 -07002832 size_t native_size = native_bytes_allocated_.LoadRelaxed();
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002833 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
2834 size_t target_size = native_size / GetTargetHeapUtilization();
2835 if (target_size > native_size + max_free_) {
2836 target_size = native_size + max_free_;
2837 } else if (target_size < native_size + min_free_) {
2838 target_size = native_size + min_free_;
2839 }
2840 native_footprint_gc_watermark_ = target_size;
2841 native_footprint_limit_ = 2 * target_size - native_size;
2842}
2843
Mathieu Chartierafe49982014-03-27 10:55:04 -07002844collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
2845 for (const auto& collector : garbage_collectors_) {
2846 if (collector->GetCollectorType() == collector_type_ &&
2847 collector->GetGcType() == gc_type) {
2848 return collector;
2849 }
2850 }
2851 return nullptr;
2852}
2853
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002854double Heap::HeapGrowthMultiplier() const {
2855 // If we don't care about pause times we are background, so return 1.0.
2856 if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
2857 return 1.0;
2858 }
2859 return foreground_heap_growth_multiplier_;
2860}
2861
Mathieu Chartierafe49982014-03-27 10:55:04 -07002862void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002863 // We know what our utilization is at this moment.
2864 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002865 const uint64_t bytes_allocated = GetBytesAllocated();
Mathieu Chartier65db8802012-11-20 12:36:46 -08002866 last_gc_size_ = bytes_allocated;
Ian Rogers1d54e732013-05-02 21:10:01 -07002867 last_gc_time_ns_ = NanoTime();
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002868 uint64_t target_size;
Mathieu Chartierafe49982014-03-27 10:55:04 -07002869 collector::GcType gc_type = collector_ran->GetGcType();
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002870 if (gc_type != collector::kGcTypeSticky) {
2871 // Grow the heap for non sticky GC.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002872 const float multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
2873 // foreground.
2874 intptr_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
2875 CHECK_GE(delta, 0);
2876 target_size = bytes_allocated + delta * multiplier;
2877 target_size = std::min(target_size,
2878 bytes_allocated + static_cast<uint64_t>(max_free_ * multiplier));
2879 target_size = std::max(target_size,
2880 bytes_allocated + static_cast<uint64_t>(min_free_ * multiplier));
Mathieu Chartier590fee92013-09-13 13:46:47 -07002881 native_need_to_run_finalization_ = true;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002882 next_gc_type_ = collector::kGcTypeSticky;
2883 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002884 collector::GcType non_sticky_gc_type =
Mathieu Chartiere4cab172014-08-19 18:24:04 -07002885 HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
Mathieu Chartierafe49982014-03-27 10:55:04 -07002886 // Find what the next non sticky collector will be.
2887 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
2888 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
2889 // do another sticky collection next.
2890 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
2891 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
2892 // if the sticky GC throughput always remained >= the full/partial throughput.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002893 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
Mathieu Chartierafe49982014-03-27 10:55:04 -07002894 non_sticky_collector->GetEstimatedMeanThroughput() &&
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002895 non_sticky_collector->NumberOfIterations() > 0 &&
Mathieu Chartierafe49982014-03-27 10:55:04 -07002896 bytes_allocated <= max_allowed_footprint_) {
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002897 next_gc_type_ = collector::kGcTypeSticky;
2898 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002899 next_gc_type_ = non_sticky_gc_type;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002900 }
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002901 // If we have freed enough memory, shrink the heap back down.
2902 if (bytes_allocated + max_free_ < max_allowed_footprint_) {
2903 target_size = bytes_allocated + max_free_;
2904 } else {
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002905 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002906 }
2907 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002908 if (!ignore_max_footprint_) {
2909 SetIdealFootprint(target_size);
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002910 if (IsGcConcurrent()) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002911 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002912 // Calculate the estimated GC duration.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002913 const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002914 // Estimate how many remaining bytes we will have when we need to start the next GC.
2915 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
Mathieu Chartier74762802014-01-24 10:21:35 -08002916 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002917 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
2918 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
2919 // A never going to happen situation that from the estimated allocation rate we will exceed
2920 // the applications entire footprint with the given estimated allocation rate. Schedule
Mathieu Chartier74762802014-01-24 10:21:35 -08002921 // another GC nearly straight away.
2922 remaining_bytes = kMinConcurrentRemainingBytes;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002923 }
Mathieu Chartier74762802014-01-24 10:21:35 -08002924 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07002925 DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
Mathieu Chartier74762802014-01-24 10:21:35 -08002926 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
2927 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
2928 // right away.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002929 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
2930 static_cast<size_t>(bytes_allocated));
Mathieu Chartier65db8802012-11-20 12:36:46 -08002931 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002932 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07002933}
2934
jeffhaoc1160702011-10-27 15:48:45 -07002935void Heap::ClearGrowthLimit() {
Mathieu Chartier80de7a62012-11-27 17:21:50 -08002936 growth_limit_ = capacity_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002937 non_moving_space_->ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -07002938}
2939
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07002940void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002941 ScopedObjectAccess soa(self);
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07002942 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
Ian Rogers53b8b092014-03-13 23:45:53 -07002943 jvalue args[1];
2944 args[0].l = arg.get();
2945 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07002946 // Restore object in case it gets moved.
2947 *object = soa.Decode<mirror::Object*>(arg.get());
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002948}
2949
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07002950void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) {
2951 StackHandleScope<1> hs(self);
2952 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2953 RequestConcurrentGC(self);
2954}
2955
Ian Rogers1f539342012-10-03 21:09:42 -07002956void Heap::RequestConcurrentGC(Thread* self) {
Mathieu Chartier069387a2012-06-18 12:01:01 -07002957 // Make sure that we can do a concurrent GC.
Ian Rogers120f1c72012-09-28 17:17:10 -07002958 Runtime* runtime = Runtime::Current();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002959 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
Mathieu Chartier590fee92013-09-13 13:46:47 -07002960 self->IsHandlingStackOverflow()) {
Ian Rogers120f1c72012-09-28 17:17:10 -07002961 return;
2962 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002963 // We already have a request pending, no reason to start more until we update
2964 // concurrent_start_bytes_.
2965 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Ian Rogers120f1c72012-09-28 17:17:10 -07002966 JNIEnv* env = self->GetJniEnv();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002967 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2968 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002969 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2970 WellKnownClasses::java_lang_Daemons_requestGC);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002971 CHECK(!env->ExceptionCheck());
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002972}
2973
Ian Rogers81d425b2012-09-27 16:03:43 -07002974void Heap::ConcurrentGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002975 if (Runtime::Current()->IsShuttingDown(self)) {
2976 return;
Mathieu Chartier2542d662012-06-21 17:14:11 -07002977 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002978 // Wait for any GCs currently running to finish.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002979 if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
Mathieu Chartierf9ed0d32013-11-21 16:42:47 -08002980 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
2981 // instead. E.g. can't do partial, so do full instead.
2982 if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
2983 collector::kGcTypeNone) {
2984 for (collector::GcType gc_type : gc_plan_) {
2985 // Attempt to run the collector, if we succeed, we are done.
2986 if (gc_type > next_gc_type_ &&
2987 CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
2988 break;
2989 }
2990 }
2991 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002992 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002993}
2994
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07002995void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08002996 Thread* self = Thread::Current();
2997 {
2998 MutexLock mu(self, *heap_trim_request_lock_);
2999 if (desired_collector_type_ == desired_collector_type) {
3000 return;
3001 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07003002 heap_transition_or_trim_target_time_ =
3003 std::max(heap_transition_or_trim_target_time_, NanoTime() + delta_time);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003004 desired_collector_type_ = desired_collector_type;
3005 }
3006 SignalHeapTrimDaemon(self);
3007}
3008
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07003009void Heap::RequestHeapTrim() {
Ian Rogers48931882013-01-22 14:35:16 -08003010 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3011 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3012 // a space it will hold its lock and can become a cause of jank.
3013 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3014 // forking.
3015
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08003016 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3017 // because that only marks object heads, so a large array looks like lots of empty space. We
3018 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3019 // to utilization (which is probably inversely proportional to how much benefit we can expect).
3020 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3021 // not how much use we're making of those pages.
Ian Rogers120f1c72012-09-28 17:17:10 -07003022
3023 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07003024 Runtime* runtime = Runtime::Current();
3025 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) {
3026 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
3027 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
3028 // as we don't hold the lock while requesting the trim).
3029 return;
Ian Rogerse1d490c2012-02-03 09:09:07 -08003030 }
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07003031 {
3032 MutexLock mu(self, *heap_trim_request_lock_);
3033 if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
3034 // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
3035 // just yet.
3036 return;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003037 }
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07003038 heap_trim_request_pending_ = true;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07003039 uint64_t current_time = NanoTime();
3040 if (heap_transition_or_trim_target_time_ < current_time) {
3041 heap_transition_or_trim_target_time_ = current_time + kHeapTrimWait;
3042 }
Mathieu Chartierc39e3422013-08-07 16:41:36 -07003043 }
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07003044 // Notify the daemon thread which will actually do the heap trim.
3045 SignalHeapTrimDaemon(self);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08003046}
3047
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08003048void Heap::SignalHeapTrimDaemon(Thread* self) {
3049 JNIEnv* env = self->GetJniEnv();
3050 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
3051 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != nullptr);
3052 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
3053 WellKnownClasses::java_lang_Daemons_requestHeapTrim);
3054 CHECK(!env->ExceptionCheck());
3055}
3056
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003057void Heap::RevokeThreadLocalBuffers(Thread* thread) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003058 if (rosalloc_space_ != nullptr) {
3059 rosalloc_space_->RevokeThreadLocalBuffers(thread);
3060 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003061 if (bump_pointer_space_ != nullptr) {
3062 bump_pointer_space_->RevokeThreadLocalBuffers(thread);
3063 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003064}
3065
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07003066void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3067 if (rosalloc_space_ != nullptr) {
3068 rosalloc_space_->RevokeThreadLocalBuffers(thread);
3069 }
3070}
3071
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003072void Heap::RevokeAllThreadLocalBuffers() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08003073 if (rosalloc_space_ != nullptr) {
3074 rosalloc_space_->RevokeAllThreadLocalBuffers();
3075 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08003076 if (bump_pointer_space_ != nullptr) {
3077 bump_pointer_space_->RevokeAllThreadLocalBuffers();
3078 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07003079}
3080
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003081bool Heap::IsGCRequestPending() const {
3082 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
3083}
3084
Mathieu Chartier590fee92013-09-13 13:46:47 -07003085void Heap::RunFinalization(JNIEnv* env) {
3086 // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
3087 if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
3088 CHECK(WellKnownClasses::java_lang_System != nullptr);
3089 WellKnownClasses::java_lang_System_runFinalization =
3090 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
3091 CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
3092 }
3093 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
3094 WellKnownClasses::java_lang_System_runFinalization);
3095}
3096
Ian Rogers1eb512d2013-10-18 15:42:20 -07003097void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003098 Thread* self = ThreadForEnv(env);
3099 if (native_need_to_run_finalization_) {
3100 RunFinalization(env);
3101 UpdateMaxNativeFootprint();
3102 native_need_to_run_finalization_ = false;
3103 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003104 // Total number of native bytes allocated.
Ian Rogers3e5cf302014-05-20 16:40:37 -07003105 size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
3106 new_native_bytes_allocated += bytes;
3107 if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
Mathieu Chartiere4cab172014-08-19 18:24:04 -07003108 collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08003109 collector::kGcTypeFull;
3110
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003111 // The second watermark is higher than the gc watermark. If you hit this it means you are
3112 // allocating native objects faster than the GC can keep up with.
Ian Rogers3e5cf302014-05-20 16:40:37 -07003113 if (new_native_bytes_allocated > native_footprint_limit_) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -07003114 if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003115 // Just finished a GC, attempt to run finalizers.
3116 RunFinalization(env);
3117 CHECK(!env->ExceptionCheck());
3118 }
3119 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
Ian Rogers3e5cf302014-05-20 16:40:37 -07003120 if (new_native_bytes_allocated > native_footprint_limit_) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08003121 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07003122 RunFinalization(env);
3123 native_need_to_run_finalization_ = false;
3124 CHECK(!env->ExceptionCheck());
3125 }
3126 // We have just run finalizers, update the native watermark since it is very likely that
3127 // finalizers released native managed allocations.
3128 UpdateMaxNativeFootprint();
3129 } else if (!IsGCRequestPending()) {
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07003130 if (IsGcConcurrent()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07003131 RequestConcurrentGC(self);
3132 } else {
Hiroshi Yamauchid20aba12014-04-11 15:31:09 -07003133 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003134 }
3135 }
3136 }
3137}
3138
Ian Rogers1eb512d2013-10-18 15:42:20 -07003139void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003140 int expected_size, new_size;
3141 do {
Ian Rogers3e5cf302014-05-20 16:40:37 -07003142 expected_size = native_bytes_allocated_.LoadRelaxed();
Mathieu Chartier590fee92013-09-13 13:46:47 -07003143 new_size = expected_size - bytes;
3144 if (UNLIKELY(new_size < 0)) {
3145 ScopedObjectAccess soa(env);
3146 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
3147 StringPrintf("Attempted to free %d native bytes with only %d native bytes "
3148 "registered as allocated", bytes, expected_size).c_str());
3149 break;
3150 }
Ian Rogers3e5cf302014-05-20 16:40:37 -07003151 } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size, new_size));
Mathieu Chartier987ccff2013-07-08 11:05:21 -07003152}
3153
Ian Rogersef7d42f2014-01-06 12:55:46 -08003154size_t Heap::GetTotalMemory() const {
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07003155 return std::max(max_allowed_footprint_, GetBytesAllocated());
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07003156}
3157
Mathieu Chartier11409ae2013-09-23 11:49:36 -07003158void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3159 DCHECK(mod_union_table != nullptr);
3160 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3161}
3162
Mathieu Chartierc645f1d2014-03-06 18:11:53 -08003163void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
3164 CHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
Ian Rogers1ff3c982014-08-12 02:30:58 -07003165 (c->IsVariableSize() || c->GetObjectSize() == byte_count));
Mathieu Chartierc645f1d2014-03-06 18:11:53 -08003166 CHECK_GE(byte_count, sizeof(mirror::Object));
3167}
3168
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003169void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3170 CHECK(remembered_set != nullptr);
3171 space::Space* space = remembered_set->GetSpace();
3172 CHECK(space != nullptr);
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07003173 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003174 remembered_sets_.Put(space, remembered_set);
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07003175 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003176}
3177
3178void Heap::RemoveRememberedSet(space::Space* space) {
3179 CHECK(space != nullptr);
3180 auto it = remembered_sets_.find(space);
3181 CHECK(it != remembered_sets_.end());
Mathieu Chartier5189e242014-07-24 11:11:05 -07003182 delete it->second;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08003183 remembered_sets_.erase(it);
3184 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
3185}
3186
Mathieu Chartier4aeec172014-03-27 16:09:46 -07003187void Heap::ClearMarkedObjects() {
3188 // Clear all of the spaces' mark bitmaps.
3189 for (const auto& space : GetContinuousSpaces()) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07003190 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07003191 if (space->GetLiveBitmap() != mark_bitmap) {
3192 mark_bitmap->Clear();
3193 }
3194 }
3195 // Clear the marked objects in the discontinous space object sets.
3196 for (const auto& space : GetDiscontinuousSpaces()) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07003197 space->GetMarkBitmap()->Clear();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07003198 }
3199}
3200
Ian Rogers1d54e732013-05-02 21:10:01 -07003201} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07003202} // namespace art