blob: 1e053eba7f5944f1ebed8a871b46df222a981ca0 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Mathieu Chartier752a0e62013-06-27 11:03:27 -070019#define ATRACE_TAG ATRACE_TAG_DALVIK
20#include <cutils/trace.h>
Brian Carlstrom5643b782012-02-05 12:32:53 -080021
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Ian Rogers700a4022014-05-19 16:49:03 -070023#include <memory>
Carl Shapiro58551df2011-07-24 03:09:51 -070024#include <vector>
25
Mathieu Chartierb2f99362013-11-20 17:26:00 -080026#include "base/histogram-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080027#include "base/stl_util.h"
Mathieu Chartier987ccff2013-07-08 11:05:21 -070028#include "common_throws.h"
Ian Rogers48931882013-01-22 14:35:16 -080029#include "cutils/sched_policy.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070030#include "debugger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070031#include "gc/accounting/atomic_stack.h"
32#include "gc/accounting/card_table-inl.h"
33#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070034#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070035#include "gc/accounting/mod_union_table-inl.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080036#include "gc/accounting/remembered_set.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070037#include "gc/accounting/space_bitmap-inl.h"
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070038#include "gc/collector/concurrent_copying.h"
Mathieu Chartier52e4b432014-06-10 11:22:31 -070039#include "gc/collector/mark_compact.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070040#include "gc/collector/mark_sweep-inl.h"
41#include "gc/collector/partial_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070042#include "gc/collector/semi_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070043#include "gc/collector/sticky_mark_sweep.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070044#include "gc/reference_processor.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070045#include "gc/space/bump_pointer_space.h"
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070046#include "gc/space/dlmalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070047#include "gc/space/image_space.h"
48#include "gc/space/large_object_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070049#include "gc/space/rosalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070050#include "gc/space/space-inl.h"
Mathieu Chartiera1602f22014-01-13 17:19:19 -080051#include "gc/space/zygote_space.h"
Mathieu Chartierd8891782014-03-02 13:28:37 -080052#include "entrypoints/quick/quick_alloc_entrypoints.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070053#include "heap-inl.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070054#include "image.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070055#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080056#include "mirror/class-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080057#include "mirror/object.h"
58#include "mirror/object-inl.h"
59#include "mirror/object_array-inl.h"
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070060#include "mirror/reference-inl.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080061#include "object_utils.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080062#include "os.h"
Ian Rogers53b8b092014-03-13 23:45:53 -070063#include "reflection.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080064#include "runtime.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070065#include "ScopedLocalRef.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070066#include "scoped_thread_state_change.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070067#include "handle_scope-inl.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070068#include "thread_list.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070069#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070070
71namespace art {
Mathieu Chartier50482232013-11-21 11:48:14 -080072
Ian Rogers1d54e732013-05-02 21:10:01 -070073namespace gc {
Carl Shapiro69759ea2011-07-21 18:13:35 -070074
Mathieu Chartier91e30632014-03-25 15:58:50 -070075static constexpr size_t kCollectorTransitionStressIterations = 0;
76static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
Mathieu Chartier720ef762013-08-17 14:46:54 -070077static constexpr bool kGCALotMode = false;
78static constexpr size_t kGcAlotInterval = KB;
Ian Rogers1d54e732013-05-02 21:10:01 -070079// Minimum amount of remaining bytes before a concurrent GC is triggered.
Mathieu Chartier720ef762013-08-17 14:46:54 -070080static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
Mathieu Chartier74762802014-01-24 10:21:35 -080081static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -070082// Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
Mathieu Chartier73d1e172014-04-11 17:53:48 -070083// relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
Mathieu Chartierdf86d1f2014-04-08 13:44:04 -070084// threads (lower pauses, use less memory bandwidth).
Mathieu Chartier73d1e172014-04-11 17:53:48 -070085static constexpr double kStickyGcThroughputAdjustment = 1.0;
Mathieu Chartier31f44142014-04-08 14:40:03 -070086// Whether or not we use the free list large object space.
87static constexpr bool kUseFreeListSpaceForLOS = false;
Mathieu Chartierc1790162014-05-23 10:54:50 -070088// Whether or not we compact the zygote in PreZygoteFork.
Mathieu Chartier31f44142014-04-08 14:40:03 -070089static constexpr bool kCompactZygote = kMovingCollector;
90static constexpr size_t kNonMovingSpaceCapacity = 64 * MB;
Mathieu Chartierc1790162014-05-23 10:54:50 -070091// How many reserve entries are at the end of the allocation stack, these are only needed if the
92// allocation stack overflows.
93static constexpr size_t kAllocationStackReserveSize = 1024;
94// Default mark stack size in bytes.
95static const size_t kDefaultMarkStackSize = 64 * KB;
Mathieu Chartier0051be62012-10-12 17:47:11 -070096
Mathieu Chartier0051be62012-10-12 17:47:11 -070097Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -070098 double target_utilization, double foreground_heap_growth_multiplier, size_t capacity,
Narayan Kamath11d9f062014-04-23 20:24:57 +010099 const std::string& image_file_name, const InstructionSet image_instruction_set,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700100 CollectorType foreground_collector_type, CollectorType background_collector_type,
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800101 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
102 size_t long_pause_log_threshold, size_t long_gc_log_threshold,
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700103 bool ignore_max_footprint, bool use_tlab,
104 bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
105 bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800106 bool verify_post_gc_rosalloc)
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800107 : non_moving_space_(nullptr),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800108 rosalloc_space_(nullptr),
109 dlmalloc_space_(nullptr),
Mathieu Chartierfc5b5282014-01-09 16:15:36 -0800110 main_space_(nullptr),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800111 collector_type_(kCollectorTypeNone),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700112 foreground_collector_type_(foreground_collector_type),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800113 background_collector_type_(background_collector_type),
Mathieu Chartier31f44142014-04-08 14:40:03 -0700114 desired_collector_type_(foreground_collector_type_),
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800115 heap_trim_request_lock_(nullptr),
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700116 last_trim_time_(0),
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700117 heap_transition_or_trim_target_time_(0),
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800118 heap_trim_request_pending_(false),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700119 parallel_gc_threads_(parallel_gc_threads),
120 conc_gc_threads_(conc_gc_threads),
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700121 low_memory_mode_(low_memory_mode),
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700122 long_pause_log_threshold_(long_pause_log_threshold),
123 long_gc_log_threshold_(long_gc_log_threshold),
124 ignore_max_footprint_(ignore_max_footprint),
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -0700125 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700126 have_zygote_space_(false),
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800127 large_object_threshold_(std::numeric_limits<size_t>::max()), // Starts out disabled.
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800128 collector_type_running_(kCollectorTypeNone),
Ian Rogers1d54e732013-05-02 21:10:01 -0700129 last_gc_type_(collector::kGcTypeNone),
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -0700130 next_gc_type_(collector::kGcTypePartial),
Mathieu Chartier80de7a62012-11-27 17:21:50 -0800131 capacity_(capacity),
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700132 growth_limit_(growth_limit),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700133 max_allowed_footprint_(initial_size),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700134 native_footprint_gc_watermark_(initial_size),
135 native_footprint_limit_(2 * initial_size),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700136 native_need_to_run_finalization_(false),
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800137 // Initially assume we perceive jank in case the process state is never updated.
138 process_state_(kProcessStateJankPerceptible),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800139 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Ian Rogers1d54e732013-05-02 21:10:01 -0700140 total_bytes_freed_ever_(0),
141 total_objects_freed_ever_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800142 num_bytes_allocated_(0),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700143 native_bytes_allocated_(0),
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700144 gc_memory_overhead_(0),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700145 verify_missing_card_marks_(false),
146 verify_system_weaks_(false),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800147 verify_pre_gc_heap_(verify_pre_gc_heap),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700148 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800149 verify_post_gc_heap_(verify_post_gc_heap),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700150 verify_mod_union_table_(false),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800151 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700152 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800153 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
Mathieu Chartieraff59a82014-06-06 17:51:16 -0700154 last_gc_time_ns_(NanoTime()),
Mathieu Chartier65db8802012-11-20 12:36:46 -0800155 allocation_rate_(0),
Mathieu Chartier0418ae22013-07-31 13:35:46 -0700156 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
157 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
158 * verification is enabled, we limit the size of allocation stacks to speed up their
159 * searching.
160 */
161 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
Mathieu Chartier4e305412014-02-19 10:54:44 -0800162 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? KB : MB),
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800163 current_allocator_(kAllocatorTypeDlMalloc),
164 current_non_moving_allocator_(kAllocatorTypeNonMoving),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700165 bump_pointer_space_(nullptr),
166 temp_space_(nullptr),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700167 min_free_(min_free),
168 max_free_(max_free),
169 target_utilization_(target_utilization),
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700170 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700171 total_wait_time_(0),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700172 total_allocation_time_(0),
Mathieu Chartier4e305412014-02-19 10:54:44 -0800173 verify_object_mode_(kVerifyObjectModeDisabled),
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800174 disable_moving_gc_count_(0),
Mathieu Chartierda44d772014-04-01 15:01:46 -0700175 running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800176 use_tlab_(use_tlab) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800177 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800178 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700179 }
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700180 const bool is_zygote = Runtime::Current()->IsZygote();
Mathieu Chartier50482232013-11-21 11:48:14 -0800181 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
182 // entrypoints.
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700183 if (!is_zygote) {
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800184 large_object_threshold_ = kDefaultLargeObjectThreshold;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700185 // Background compaction is currently not supported for command line runs.
186 if (background_collector_type_ != foreground_collector_type_) {
Mathieu Chartier52ba1992014-05-07 14:39:21 -0700187 VLOG(heap) << "Disabling background compaction for non zygote";
Mathieu Chartier31f44142014-04-08 14:40:03 -0700188 background_collector_type_ = foreground_collector_type_;
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800189 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800190 }
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800191 ChangeCollector(desired_collector_type_);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800192
Ian Rogers1d54e732013-05-02 21:10:01 -0700193 live_bitmap_.reset(new accounting::HeapBitmap(this));
194 mark_bitmap_.reset(new accounting::HeapBitmap(this));
Ian Rogers30fab402012-01-23 15:43:46 -0800195 // Requested begin for the alloc space, to follow the mapped image and oat files
Mathieu Chartier50482232013-11-21 11:48:14 -0800196 byte* requested_alloc_space_begin = nullptr;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800197 if (!image_file_name.empty()) {
Narayan Kamath11d9f062014-04-23 20:24:57 +0100198 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str(),
199 image_instruction_set);
Mathieu Chartier50482232013-11-21 11:48:14 -0800200 CHECK(image_space != nullptr) << "Failed to create space for " << image_file_name;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700201 AddSpace(image_space);
Ian Rogers30fab402012-01-23 15:43:46 -0800202 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
203 // isn't going to get in the middle
Brian Carlstrom700c8d32012-11-05 10:42:02 -0800204 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
205 CHECK_GT(oat_file_end_addr, image_space->End());
Mathieu Chartier31f44142014-04-08 14:40:03 -0700206 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700207 }
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700208 if (is_zygote) {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700209 // Reserve the address range before we create the non moving space to make sure bitmaps don't
210 // take it.
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700211 std::string error_str;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700212 MemMap* mem_map = MemMap::MapAnonymous(
213 "main space", requested_alloc_space_begin + kNonMovingSpaceCapacity, capacity,
214 PROT_READ | PROT_WRITE, true, &error_str);
215 CHECK(mem_map != nullptr) << error_str;
216 // Non moving space is always dlmalloc since we currently don't have support for multiple
217 // rosalloc spaces.
218 non_moving_space_ = space::DlMallocSpace::Create(
219 "zygote / non moving space", initial_size, kNonMovingSpaceCapacity, kNonMovingSpaceCapacity,
220 requested_alloc_space_begin, false);
Mathieu Chartier78408882014-04-11 18:06:01 -0700221 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
Mathieu Chartier31f44142014-04-08 14:40:03 -0700222 CreateMainMallocSpace(mem_map, initial_size, growth_limit, capacity);
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700223 } else {
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700224 std::string error_str;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700225 MemMap* mem_map = MemMap::MapAnonymous("main/non-moving space", requested_alloc_space_begin,
226 capacity, PROT_READ | PROT_WRITE, true, &error_str);
227 CHECK(mem_map != nullptr) << error_str;
228 // Create the main free list space, which doubles as the non moving space. We can do this since
229 // non zygote means that we won't have any background compaction.
230 CreateMainMallocSpace(mem_map, initial_size, growth_limit, capacity);
231 non_moving_space_ = main_space_;
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700232 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700233 CHECK(non_moving_space_ != nullptr);
234
235 // We need to create the bump pointer if the foreground collector is a compacting GC. We only
236 // create the bump pointer space if we are not a moving foreground collector but have a moving
237 // background collector since the heap transition code will create the temp space by recycling
238 // the bitmap from the main space.
Mathieu Chartier4240c512014-05-27 10:10:11 -0700239 if (kMovingCollector &&
240 (IsMovingGc(foreground_collector_type_) || IsMovingGc(background_collector_type_))) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700241 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
Mathieu Chartier4240c512014-05-27 10:10:11 -0700242 // Divide by 2 for a temporary fix for reducing virtual memory usage.
Mathieu Chartier309e3bf2014-04-14 11:30:39 -0700243 const size_t bump_pointer_space_capacity = capacity / 2;
244 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
245 bump_pointer_space_capacity, nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700246 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
247 AddSpace(bump_pointer_space_);
Mathieu Chartier309e3bf2014-04-14 11:30:39 -0700248 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
249 bump_pointer_space_capacity, nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700250 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
251 AddSpace(temp_space_);
252 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700253 if (non_moving_space_ != main_space_) {
254 AddSpace(non_moving_space_);
255 }
256 if (main_space_ != nullptr) {
257 AddSpace(main_space_);
258 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700259
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700260 // Allocate the large object space.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700261 if (kUseFreeListSpaceForLOS) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800262 large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700263 } else {
264 large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
265 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800266 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700267 AddSpace(large_object_space_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700268
Ian Rogers1d54e732013-05-02 21:10:01 -0700269 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
Mathieu Chartier590fee92013-09-13 13:46:47 -0700270 CHECK(!continuous_spaces_.empty());
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800271
Mathieu Chartier590fee92013-09-13 13:46:47 -0700272 // Relies on the spaces being sorted.
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -0800273 byte* heap_begin = continuous_spaces_.front()->Begin();
274 byte* heap_end = continuous_spaces_.back()->Limit();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700275 size_t heap_capacity = heap_end - heap_begin;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700276
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800277 // Allocate the card table.
Ian Rogers1d54e732013-05-02 21:10:01 -0700278 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700279 CHECK(card_table_.get() != NULL) << "Failed to create card table";
Ian Rogers5d76c432011-10-31 21:42:49 -0700280
Mathieu Chartier590fee92013-09-13 13:46:47 -0700281 // Card cache for now since it makes it easier for us to update the references to the copying
282 // spaces.
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700283 accounting::ModUnionTable* mod_union_table =
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700284 new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
285 GetImageSpace());
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700286 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
287 AddModUnionTable(mod_union_table);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700288
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700289 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800290 accounting::RememberedSet* non_moving_space_rem_set =
291 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
292 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
293 AddRememberedSet(non_moving_space_rem_set);
294 }
295
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700296 // TODO: Count objects in the image space here.
Ian Rogers3e5cf302014-05-20 16:40:37 -0700297 num_bytes_allocated_.StoreRelaxed(0);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700298
Mathieu Chartierc1790162014-05-23 10:54:50 -0700299 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
300 kDefaultMarkStackSize));
301 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
302 allocation_stack_.reset(accounting::ObjectStack::Create(
303 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
304 live_stack_.reset(accounting::ObjectStack::Create(
305 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700306
Mathieu Chartier65db8802012-11-20 12:36:46 -0800307 // It's still too early to take a lock because there are no threads yet, but we can create locks
308 // now. We don't create it earlier to make it clear that you can't use locks during heap
309 // initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700310 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogersc604d732012-10-14 16:09:54 -0700311 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
312 *gc_complete_lock_));
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800313 heap_trim_request_lock_ = new Mutex("Heap trim request lock");
Mathieu Chartier65db8802012-11-20 12:36:46 -0800314 last_gc_size_ = GetBytesAllocated();
315
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700316 if (ignore_max_footprint_) {
317 SetIdealFootprint(std::numeric_limits<size_t>::max());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700318 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700319 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700320 CHECK_NE(max_allowed_footprint_, 0U);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700321
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800322 // Create our garbage collectors.
Mathieu Chartier50482232013-11-21 11:48:14 -0800323 for (size_t i = 0; i < 2; ++i) {
324 const bool concurrent = i != 0;
325 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
326 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
327 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
328 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800329 if (kMovingCollector) {
330 // TODO: Clean this up.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700331 bool generational = foreground_collector_type_ == kCollectorTypeGSS;
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700332 semi_space_collector_ = new collector::SemiSpace(this, generational,
333 generational ? "generational" : "");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700334 garbage_collectors_.push_back(semi_space_collector_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700335 concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
336 garbage_collectors_.push_back(concurrent_copying_collector_);
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700337 mark_compact_collector_ = new collector::MarkCompact(this);
338 garbage_collectors_.push_back(mark_compact_collector_);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700339 }
340
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700341 if (GetImageSpace() != nullptr && main_space_ != nullptr) {
342 // Check that there's no gap between the image space and the main
343 // space so that the immune region won't break (eg. due to a large
344 // object allocated in the gap).
345 bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(), main_space_->GetMemMap());
346 if (!no_gap) {
347 MemMap::DumpMaps(LOG(ERROR));
348 LOG(FATAL) << "There's a gap between the image space and the main space";
349 }
350 }
351
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700352 if (running_on_valgrind_) {
Ian Rogersfa824272013-11-05 16:12:57 -0800353 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700354 }
355
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800356 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800357 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700358 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700359}
360
Mathieu Chartier31f44142014-04-08 14:40:03 -0700361void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
362 size_t capacity) {
363 // Is background compaction is enabled?
364 bool can_move_objects = IsMovingGc(background_collector_type_) !=
365 IsMovingGc(foreground_collector_type_);
366 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
367 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
368 // from the main space to the zygote space. If background compaction is enabled, always pass in
369 // that we can move objets.
370 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
371 // After the zygote we want this to be false if we don't have background compaction enabled so
372 // that getting primitive array elements is faster.
373 can_move_objects = !have_zygote_space_;
374 }
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700375 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
376 RemoveRememberedSet(main_space_);
377 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700378 if (kUseRosAlloc) {
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700379 rosalloc_space_ = space::RosAllocSpace::CreateFromMemMap(
380 mem_map, "main rosalloc space", kDefaultStartingSize, initial_size, growth_limit, capacity,
381 low_memory_mode_, can_move_objects);
382 main_space_ = rosalloc_space_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700383 CHECK(main_space_ != nullptr) << "Failed to create rosalloc space";
384 } else {
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700385 dlmalloc_space_ = space::DlMallocSpace::CreateFromMemMap(
386 mem_map, "main dlmalloc space", kDefaultStartingSize, initial_size, growth_limit, capacity,
387 can_move_objects);
Mathieu Chartier41961e22014-05-06 16:24:35 -0700388 main_space_ = dlmalloc_space_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700389 CHECK(main_space_ != nullptr) << "Failed to create dlmalloc space";
390 }
391 main_space_->SetFootprintLimit(main_space_->Capacity());
Mathieu Chartier96bcd452014-06-17 09:50:02 -0700392 if (collector::SemiSpace::kUseRememberedSet) {
393 accounting::RememberedSet* main_space_rem_set =
394 new accounting::RememberedSet("Main space remembered set", this, main_space_);
395 CHECK(main_space_rem_set != nullptr) << "Failed to create main space remembered set";
396 AddRememberedSet(main_space_rem_set);
397 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700398 VLOG(heap) << "Created main space " << main_space_;
399}
400
Mathieu Chartier50482232013-11-21 11:48:14 -0800401void Heap::ChangeAllocator(AllocatorType allocator) {
Mathieu Chartier50482232013-11-21 11:48:14 -0800402 if (current_allocator_ != allocator) {
Mathieu Chartierd8891782014-03-02 13:28:37 -0800403 // These two allocators are only used internally and don't have any entrypoints.
404 CHECK_NE(allocator, kAllocatorTypeLOS);
405 CHECK_NE(allocator, kAllocatorTypeNonMoving);
Mathieu Chartier50482232013-11-21 11:48:14 -0800406 current_allocator_ = allocator;
Mathieu Chartierd8891782014-03-02 13:28:37 -0800407 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
Mathieu Chartier50482232013-11-21 11:48:14 -0800408 SetQuickAllocEntryPointsAllocator(current_allocator_);
409 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
410 }
411}
412
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800413void Heap::DisableCompaction() {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700414 if (IsMovingGc(foreground_collector_type_)) {
415 foreground_collector_type_ = kCollectorTypeCMS;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800416 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700417 if (IsMovingGc(background_collector_type_)) {
418 background_collector_type_ = foreground_collector_type_;
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800419 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700420 TransitionCollector(foreground_collector_type_);
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800421}
422
Mathieu Chartier15d34022014-02-26 17:16:38 -0800423std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
424 if (!IsValidContinuousSpaceObjectAddress(klass)) {
425 return StringPrintf("<non heap address klass %p>", klass);
426 }
427 mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
428 if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
429 std::string result("[");
430 result += SafeGetClassDescriptor(component_type);
431 return result;
432 } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
433 return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800434 } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
Mathieu Chartier15d34022014-02-26 17:16:38 -0800435 return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
436 } else {
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800437 mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
Mathieu Chartier15d34022014-02-26 17:16:38 -0800438 if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
439 return StringPrintf("<non heap address dex_cache %p>", dex_cache);
440 }
441 const DexFile* dex_file = dex_cache->GetDexFile();
442 uint16_t class_def_idx = klass->GetDexClassDefIndex();
443 if (class_def_idx == DexFile::kDexNoIndex16) {
444 return "<class def not found>";
445 }
446 const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
447 const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
448 return dex_file->GetTypeDescriptor(type_id);
449 }
450}
451
452std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
453 if (obj == nullptr) {
454 return "null";
455 }
456 mirror::Class* klass = obj->GetClass<kVerifyNone>();
457 if (klass == nullptr) {
458 return "(class=null)";
459 }
460 std::string result(SafeGetClassDescriptor(klass));
461 if (obj->IsClass()) {
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800462 result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
Mathieu Chartier15d34022014-02-26 17:16:38 -0800463 }
464 return result;
465}
466
467void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
468 if (obj == nullptr) {
469 stream << "(obj=null)";
470 return;
471 }
472 if (IsAligned<kObjectAlignment>(obj)) {
473 space::Space* space = nullptr;
474 // Don't use find space since it only finds spaces which actually contain objects instead of
475 // spaces which may contain objects (e.g. cleared bump pointer spaces).
476 for (const auto& cur_space : continuous_spaces_) {
477 if (cur_space->HasAddress(obj)) {
478 space = cur_space;
479 break;
480 }
481 }
Mathieu Chartier15d34022014-02-26 17:16:38 -0800482 // Unprotect all the spaces.
483 for (const auto& space : continuous_spaces_) {
484 mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
485 }
486 stream << "Object " << obj;
487 if (space != nullptr) {
488 stream << " in space " << *space;
489 }
Mathieu Chartierc2f4d022014-03-03 16:11:42 -0800490 mirror::Class* klass = obj->GetClass<kVerifyNone>();
Mathieu Chartier15d34022014-02-26 17:16:38 -0800491 stream << "\nclass=" << klass;
492 if (klass != nullptr) {
493 stream << " type= " << SafePrettyTypeOf(obj);
494 }
495 // Re-protect the address we faulted on.
496 mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
497 }
498}
499
Mathieu Chartier590fee92013-09-13 13:46:47 -0700500bool Heap::IsCompilingBoot() const {
501 for (const auto& space : continuous_spaces_) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800502 if (space->IsImageSpace() || space->IsZygoteSpace()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700503 return false;
504 }
505 }
506 return true;
507}
508
509bool Heap::HasImageSpace() const {
510 for (const auto& space : continuous_spaces_) {
511 if (space->IsImageSpace()) {
512 return true;
513 }
514 }
515 return false;
516}
517
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800518void Heap::IncrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700519 // Need to do this holding the lock to prevent races where the GC is about to run / running when
520 // we attempt to disable it.
Mathieu Chartiercaa82d62014-02-02 16:51:17 -0800521 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700522 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800523 ++disable_moving_gc_count_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700524 if (IsMovingGc(collector_type_running_)) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -0700525 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800526 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700527}
528
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800529void Heap::DecrementDisableMovingGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700530 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800531 CHECK_GE(disable_moving_gc_count_, 0U);
532 --disable_moving_gc_count_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700533}
534
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800535void Heap::UpdateProcessState(ProcessState process_state) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800536 if (process_state_ != process_state) {
537 process_state_ = process_state;
Mathieu Chartier91e30632014-03-25 15:58:50 -0700538 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
539 // Start at index 1 to avoid "is always false" warning.
540 // Have iteration 1 always transition the collector.
541 TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible))
Mathieu Chartier31f44142014-04-08 14:40:03 -0700542 ? foreground_collector_type_ : background_collector_type_);
Mathieu Chartier91e30632014-03-25 15:58:50 -0700543 usleep(kCollectorTransitionStressWait);
544 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800545 if (process_state_ == kProcessStateJankPerceptible) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800546 // Transition back to foreground right away to prevent jank.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700547 RequestCollectorTransition(foreground_collector_type_, 0);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800548 } else {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800549 // Don't delay for debug builds since we may want to stress test the GC.
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700550 RequestCollectorTransition(background_collector_type_, kIsDebugBuild ? 0 :
551 kCollectorTransitionWait);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800552 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800553 }
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800554}
555
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700556void Heap::CreateThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700557 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
558 if (num_threads != 0) {
Mathieu Chartierbcd5e9d2013-11-13 14:33:28 -0800559 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700560 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700561}
562
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800563void Heap::VisitObjects(ObjectCallback callback, void* arg) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700564 Thread* self = Thread::Current();
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800565 // GCs can move objects, so don't allow this.
566 const char* old_cause = self->StartAssertNoThreadSuspension("Visiting objects");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700567 if (bump_pointer_space_ != nullptr) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800568 // Visit objects in bump pointer space.
569 bump_pointer_space_->Walk(callback, arg);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700570 }
571 // TODO: Switch to standard begin and end to use ranged a based loop.
572 for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
573 it < end; ++it) {
574 mirror::Object* obj = *it;
Mathieu Chartierebdf3f32014-02-13 10:23:27 -0800575 if (obj != nullptr && obj->GetClass() != nullptr) {
576 // Avoid the race condition caused by the object not yet being written into the allocation
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800577 // stack or the class not yet being written in the object. Or, if kUseThreadLocalAllocationStack,
578 // there can be nulls on the allocation stack.
Mathieu Chartierebdf3f32014-02-13 10:23:27 -0800579 callback(obj, arg);
580 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700581 }
582 GetLiveBitmap()->Walk(callback, arg);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800583 self->EndAssertNoThreadSuspension(old_cause);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700584}
585
586void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800587 space::ContinuousSpace* space1 = rosalloc_space_ != nullptr ? rosalloc_space_ : non_moving_space_;
588 space::ContinuousSpace* space2 = dlmalloc_space_ != nullptr ? dlmalloc_space_ : non_moving_space_;
589 // This is just logic to handle a case of either not having a rosalloc or dlmalloc space.
590 // TODO: Generalize this to n bitmaps?
591 if (space1 == nullptr) {
592 DCHECK(space2 != nullptr);
593 space1 = space2;
594 }
595 if (space2 == nullptr) {
596 DCHECK(space1 != nullptr);
597 space2 = space1;
598 }
599 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700600 large_object_space_->GetLiveBitmap(), stack);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700601}
602
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700603void Heap::DeleteThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700604 thread_pool_.reset(nullptr);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700605}
606
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700607void Heap::AddSpace(space::Space* space) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800608 DCHECK(space != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700609 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
610 if (space->IsContinuousSpace()) {
611 DCHECK(!space->IsDiscontinuousSpace());
612 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
613 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700614 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
615 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700616 if (live_bitmap != nullptr) {
617 DCHECK(mark_bitmap != nullptr);
618 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
619 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700620 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700621 continuous_spaces_.push_back(continuous_space);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700622 // Ensure that spaces remain sorted in increasing order of start address.
623 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
624 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
625 return a->Begin() < b->Begin();
626 });
Mathieu Chartier590fee92013-09-13 13:46:47 -0700627 } else {
628 DCHECK(space->IsDiscontinuousSpace());
629 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700630 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
631 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700632 discontinuous_spaces_.push_back(discontinuous_space);
633 }
634 if (space->IsAllocSpace()) {
635 alloc_spaces_.push_back(space->AsAllocSpace());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700636 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800637}
638
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700639void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
640 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
641 if (continuous_space->IsDlMallocSpace()) {
642 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
643 } else if (continuous_space->IsRosAllocSpace()) {
644 rosalloc_space_ = continuous_space->AsRosAllocSpace();
645 }
646}
647
648void Heap::RemoveSpace(space::Space* space) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800649 DCHECK(space != nullptr);
650 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
651 if (space->IsContinuousSpace()) {
652 DCHECK(!space->IsDiscontinuousSpace());
653 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
654 // Continuous spaces don't necessarily have bitmaps.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700655 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
656 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800657 if (live_bitmap != nullptr) {
658 DCHECK(mark_bitmap != nullptr);
659 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
660 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
661 }
662 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
663 DCHECK(it != continuous_spaces_.end());
664 continuous_spaces_.erase(it);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800665 } else {
666 DCHECK(space->IsDiscontinuousSpace());
667 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700668 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
669 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800670 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
671 discontinuous_space);
672 DCHECK(it != discontinuous_spaces_.end());
673 discontinuous_spaces_.erase(it);
674 }
675 if (space->IsAllocSpace()) {
676 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
677 DCHECK(it != alloc_spaces_.end());
678 alloc_spaces_.erase(it);
679 }
680}
681
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700682void Heap::RegisterGCAllocation(size_t bytes) {
Stephen Hinesb5f56492014-07-15 21:41:06 -0700683 gc_memory_overhead_.FetchAndAddSequentiallyConsistent(bytes);
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700684}
685
686void Heap::RegisterGCDeAllocation(size_t bytes) {
Stephen Hinesb5f56492014-07-15 21:41:06 -0700687 gc_memory_overhead_.FetchAndSubSequentiallyConsistent(bytes);
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700688}
689
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700690void Heap::DumpGcPerformanceInfo(std::ostream& os) {
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700691 // Dump cumulative timings.
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700692 os << "Dumping cumulative Gc timings\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700693 uint64_t total_duration = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800694 // Dump cumulative loggers for each GC type.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800695 uint64_t total_paused_time = 0;
Mathieu Chartier5a487192014-04-08 11:14:54 -0700696 for (auto& collector : garbage_collectors_) {
Mathieu Chartierafe49982014-03-27 10:55:04 -0700697 const CumulativeLogger& logger = collector->GetCumulativeTimings();
Mathieu Chartierb6898f52014-04-09 11:41:49 -0700698 const size_t iterations = logger.GetIterations();
Hiroshi Yamauchid20aba12014-04-11 15:31:09 -0700699 const Histogram<uint64_t>& pause_histogram = collector->GetPauseHistogram();
700 if (iterations != 0 && pause_histogram.SampleSize() != 0) {
Mathieu Chartierafe49982014-03-27 10:55:04 -0700701 os << ConstDumpable<CumulativeLogger>(logger);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800702 const uint64_t total_ns = logger.GetTotalNs();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700703 const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800704 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
705 const uint64_t freed_bytes = collector->GetTotalFreedBytes();
706 const uint64_t freed_objects = collector->GetTotalFreedObjects();
Mathieu Chartierb2f99362013-11-20 17:26:00 -0800707 Histogram<uint64_t>::CumulativeData cumulative_data;
Hiroshi Yamauchid20aba12014-04-11 15:31:09 -0700708 pause_histogram.CreateHistogram(&cumulative_data);
709 pause_histogram.PrintConfidenceIntervals(os, 0.99, cumulative_data);
Mathieu Chartierb6898f52014-04-09 11:41:49 -0700710 os << collector->GetName() << " total time: " << PrettyDuration(total_ns)
711 << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700712 << collector->GetName() << " freed: " << freed_objects
713 << " objects with total size " << PrettySize(freed_bytes) << "\n"
714 << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
715 << PrettySize(freed_bytes / seconds) << "/s\n";
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800716 total_duration += total_ns;
717 total_paused_time += total_pause_ns;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700718 }
Mathieu Chartier5a487192014-04-08 11:14:54 -0700719 collector->ResetMeasurements();
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700720 }
Ian Rogers3e5cf302014-05-20 16:40:37 -0700721 uint64_t allocation_time =
722 static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700723 if (total_duration != 0) {
Brian Carlstrom2d888622013-07-18 17:02:00 -0700724 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700725 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
726 os << "Mean GC size throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700727 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700728 os << "Mean GC object throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700729 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700730 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800731 size_t total_objects_allocated = GetObjectsAllocatedEver();
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700732 os << "Total number of allocations: " << total_objects_allocated << "\n";
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800733 size_t total_bytes_allocated = GetBytesAllocatedEver();
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700734 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700735 if (kMeasureAllocationTime) {
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700736 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
737 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
738 << "\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700739 }
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700740 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
741 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
Ian Rogers3e5cf302014-05-20 16:40:37 -0700742 os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_.LoadRelaxed();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700743 BaseMutex::DumpAll(os);
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700744}
745
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800746Heap::~Heap() {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700747 VLOG(heap) << "Starting ~Heap()";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700748 STLDeleteElements(&garbage_collectors_);
749 // If we don't reset then the mark stack complains in its destructor.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700750 allocation_stack_->Reset();
751 live_stack_->Reset();
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700752 STLDeleteValues(&mod_union_tables_);
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700753 STLDeleteValues(&remembered_sets_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700754 STLDeleteElements(&continuous_spaces_);
755 STLDeleteElements(&discontinuous_spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700756 delete gc_complete_lock_;
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700757 delete heap_trim_request_lock_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700758 VLOG(heap) << "Finished ~Heap()";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700759}
760
Ian Rogers1d54e732013-05-02 21:10:01 -0700761space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
762 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700763 for (const auto& space : continuous_spaces_) {
764 if (space->Contains(obj)) {
765 return space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700766 }
767 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700768 if (!fail_ok) {
769 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
770 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700771 return NULL;
772}
773
Ian Rogers1d54e732013-05-02 21:10:01 -0700774space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
775 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700776 for (const auto& space : discontinuous_spaces_) {
777 if (space->Contains(obj)) {
778 return space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700779 }
780 }
781 if (!fail_ok) {
782 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
783 }
784 return NULL;
785}
786
787space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
788 space::Space* result = FindContinuousSpaceFromObject(obj, true);
789 if (result != NULL) {
790 return result;
791 }
792 return FindDiscontinuousSpaceFromObject(obj, true);
793}
794
795space::ImageSpace* Heap::GetImageSpace() const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700796 for (const auto& space : continuous_spaces_) {
797 if (space->IsImageSpace()) {
798 return space->AsImageSpace();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700799 }
800 }
801 return NULL;
802}
803
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700804static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700805 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700806 if (used_bytes < chunk_size) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700807 size_t chunk_free_bytes = chunk_size - used_bytes;
808 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
809 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700810 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700811}
812
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700813void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) {
814 std::ostringstream oss;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800815 size_t total_bytes_free = GetFreeMemory();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700816 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
817 << " free bytes";
818 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
819 if (!large_object_allocation && total_bytes_free >= byte_count) {
820 size_t max_contiguous_allocation = 0;
821 for (const auto& space : continuous_spaces_) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700822 if (space->IsMallocSpace()) {
823 // To allow the Walk/InspectAll() to exclusively-lock the mutator
824 // lock, temporarily release the shared access to the mutator
825 // lock here by transitioning to the suspended state.
826 Locks::mutator_lock_->AssertSharedHeld(self);
827 self->TransitionFromRunnableToSuspended(kSuspended);
828 space->AsMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
829 self->TransitionFromSuspendedToRunnable();
830 Locks::mutator_lock_->AssertSharedHeld(self);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700831 }
832 }
833 oss << "; failed due to fragmentation (largest possible contiguous allocation "
834 << max_contiguous_allocation << " bytes)";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700835 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700836 self->ThrowOutOfMemoryError(oss.str().c_str());
837}
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700838
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800839void Heap::DoPendingTransitionOrTrim() {
840 Thread* self = Thread::Current();
841 CollectorType desired_collector_type;
842 // Wait until we reach the desired transition time.
843 while (true) {
844 uint64_t wait_time;
845 {
846 MutexLock mu(self, *heap_trim_request_lock_);
847 desired_collector_type = desired_collector_type_;
848 uint64_t current_time = NanoTime();
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700849 if (current_time >= heap_transition_or_trim_target_time_) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800850 break;
851 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700852 wait_time = heap_transition_or_trim_target_time_ - current_time;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800853 }
854 ScopedThreadStateChange tsc(self, kSleeping);
855 usleep(wait_time / 1000); // Usleep takes microseconds.
856 }
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700857 // Transition the collector if the desired collector type is not the same as the current
858 // collector type.
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800859 TransitionCollector(desired_collector_type);
Mathieu Chartier440e4ce2014-03-31 16:36:35 -0700860 if (!CareAboutPauseTimes()) {
861 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
862 // about pauses.
863 Runtime* runtime = Runtime::Current();
864 runtime->GetThreadList()->SuspendAll();
Mathieu Chartier48ab6872014-06-24 11:21:59 -0700865 uint64_t start_time = NanoTime();
866 size_t count = runtime->GetMonitorList()->DeflateMonitors();
867 VLOG(heap) << "Deflating " << count << " monitors took "
868 << PrettyDuration(NanoTime() - start_time);
Mathieu Chartier440e4ce2014-03-31 16:36:35 -0700869 runtime->GetThreadList()->ResumeAll();
Mathieu Chartier440e4ce2014-03-31 16:36:35 -0700870 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700871 // Do a heap trim if it is needed.
872 Trim();
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800873}
874
Mathieu Chartier590fee92013-09-13 13:46:47 -0700875void Heap::Trim() {
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800876 Thread* self = Thread::Current();
877 {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800878 MutexLock mu(self, *heap_trim_request_lock_);
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700879 if (!heap_trim_request_pending_ || last_trim_time_ + kHeapTrimWait >= NanoTime()) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800880 return;
881 }
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700882 last_trim_time_ = NanoTime();
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800883 heap_trim_request_pending_ = false;
884 }
885 {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -0800886 // Need to do this before acquiring the locks since we don't want to get suspended while
887 // holding any locks.
888 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800889 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
890 // trimming.
891 MutexLock mu(self, *gc_complete_lock_);
892 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -0700893 WaitForGcToCompleteLocked(kGcCauseTrim, self);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800894 collector_type_running_ = kCollectorTypeHeapTrim;
895 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700896 uint64_t start_ns = NanoTime();
897 // Trim the managed spaces.
898 uint64_t total_alloc_space_allocated = 0;
899 uint64_t total_alloc_space_size = 0;
900 uint64_t managed_reclaimed = 0;
901 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800902 if (space->IsMallocSpace()) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700903 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
904 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
905 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
906 // for a long period of time.
907 managed_reclaimed += malloc_space->Trim();
908 }
909 total_alloc_space_size += malloc_space->Size();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700910 }
911 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700912 total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated();
913 if (bump_pointer_space_ != nullptr) {
914 total_alloc_space_allocated -= bump_pointer_space_->Size();
915 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700916 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
917 static_cast<float>(total_alloc_space_size);
918 uint64_t gc_heap_end_ns = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800919 // We never move things in the native heap, so we can finish the GC at this point.
920 FinishGC(self, collector::kGcTypeNone);
Christopher Ferrisc4ddc042014-05-13 14:47:50 -0700921 size_t native_reclaimed = 0;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700922 // Only trim the native heap if we don't care about pauses.
923 if (!CareAboutPauseTimes()) {
Christopher Ferrisc4ddc042014-05-13 14:47:50 -0700924#if defined(USE_DLMALLOC)
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700925 // Trim the native heap.
926 dlmalloc_trim(0);
927 dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
Christopher Ferrisc4ddc042014-05-13 14:47:50 -0700928#elif defined(USE_JEMALLOC)
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700929 // Jemalloc does it's own internal trimming.
Christopher Ferrisc4ddc042014-05-13 14:47:50 -0700930#else
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700931 UNIMPLEMENTED(WARNING) << "Add trimming support";
Christopher Ferrisc4ddc042014-05-13 14:47:50 -0700932#endif
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700933 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700934 uint64_t end_ns = NanoTime();
935 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
936 << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
937 << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
938 << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
939 << "%.";
940}
941
942bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
943 // Note: we deliberately don't take the lock here, and mustn't test anything that would require
944 // taking the lock.
945 if (obj == nullptr) {
Elliott Hughes88c5c352012-03-15 18:49:48 -0700946 return true;
947 }
Mathieu Chartier15d34022014-02-26 17:16:38 -0800948 return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700949}
950
Mathieu Chartierd68ac702014-02-11 14:50:51 -0800951bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
952 return FindContinuousSpaceFromObject(obj, true) != nullptr;
953}
954
Mathieu Chartier15d34022014-02-26 17:16:38 -0800955bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
956 if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
957 return false;
958 }
959 for (const auto& space : continuous_spaces_) {
960 if (space->HasAddress(obj)) {
961 return true;
962 }
963 }
964 return false;
Elliott Hughesa2501992011-08-26 19:39:54 -0700965}
966
Ian Rogersef7d42f2014-01-06 12:55:46 -0800967bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700968 bool search_live_stack, bool sorted) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800969 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
970 return false;
971 }
972 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800973 mirror::Class* klass = obj->GetClass<kVerifyNone>();
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800974 if (obj == klass) {
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -0800975 // This case happens for java.lang.Class.
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800976 return true;
977 }
978 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
979 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800980 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
981 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
982 return temp_space_->Contains(obj);
Ian Rogers1d54e732013-05-02 21:10:01 -0700983 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700984 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700985 space::DiscontinuousSpace* d_space = nullptr;
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800986 if (c_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700987 if (c_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700988 return true;
989 }
990 } else {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700991 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800992 if (d_space != nullptr) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700993 if (d_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700994 return true;
995 }
996 }
997 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700998 // This is covering the allocation/live stack swapping that is done without mutators suspended.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700999 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1000 if (i > 0) {
1001 NanoSleep(MsToNs(10));
Ian Rogers1d54e732013-05-02 21:10:01 -07001002 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001003 if (search_allocation_stack) {
1004 if (sorted) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001005 if (allocation_stack_->ContainsSorted(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001006 return true;
1007 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001008 } else if (allocation_stack_->Contains(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001009 return true;
1010 }
1011 }
1012
1013 if (search_live_stack) {
1014 if (sorted) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001015 if (live_stack_->ContainsSorted(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001016 return true;
1017 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001018 } else if (live_stack_->Contains(obj)) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001019 return true;
1020 }
1021 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001022 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001023 // We need to check the bitmaps again since there is a race where we mark something as live and
1024 // then clear the stack containing it.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001025 if (c_space != nullptr) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001026 if (c_space->GetLiveBitmap()->Test(obj)) {
1027 return true;
1028 }
1029 } else {
1030 d_space = FindDiscontinuousSpaceFromObject(obj, true);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001031 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001032 return true;
1033 }
1034 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001035 return false;
Elliott Hughes6a5bd492011-10-28 14:33:57 -07001036}
1037
Mathieu Chartier590fee92013-09-13 13:46:47 -07001038void Heap::DumpSpaces(std::ostream& stream) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001039 for (const auto& space : continuous_spaces_) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001040 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1041 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001042 stream << space << " " << *space << "\n";
1043 if (live_bitmap != nullptr) {
1044 stream << live_bitmap << " " << *live_bitmap << "\n";
1045 }
1046 if (mark_bitmap != nullptr) {
1047 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1048 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001049 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07001050 for (const auto& space : discontinuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001051 stream << space << " " << *space << "\n";
Mathieu Chartier128c52c2012-10-16 14:12:41 -07001052 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001053}
1054
Ian Rogersef7d42f2014-01-06 12:55:46 -08001055void Heap::VerifyObjectBody(mirror::Object* obj) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001056 // Ignore early dawn of the universe verifications.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001057 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
Ian Rogers62d6c772013-02-27 08:32:07 -08001058 return;
1059 }
Mathieu Chartier4e305412014-02-19 10:54:44 -08001060 CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001061 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
Mathieu Chartier4e305412014-02-19 10:54:44 -08001062 CHECK(c != nullptr) << "Null class in object " << obj;
1063 CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001064 CHECK(VerifyClassClass(c));
Mathieu Chartier0325e622012-09-05 14:22:51 -07001065
Mathieu Chartier4e305412014-02-19 10:54:44 -08001066 if (verify_object_mode_ > kVerifyObjectModeFast) {
1067 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
Ian Rogers1d54e732013-05-02 21:10:01 -07001068 if (!IsLiveObjectLocked(obj)) {
1069 DumpSpaces();
1070 LOG(FATAL) << "Object is dead: " << obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001071 }
Mathieu Chartierdcf8d722012-08-02 14:55:54 -07001072 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001073}
1074
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001075void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001076 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001077}
1078
1079void Heap::VerifyHeap() {
Ian Rogers50b35e22012-10-04 10:09:15 -07001080 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001081 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001082}
1083
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001084void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
Mathieu Chartier601276a2014-03-20 15:12:30 -07001085 // Use signed comparison since freed bytes can be negative when background compaction foreground
1086 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1087 // free list backed space typically increasing memory footprint due to padding and binning.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001088 DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001089 // Note: This relies on 2s complement for handling negative freed_bytes.
Ian Rogers3e5cf302014-05-20 16:40:37 -07001090 num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001091 if (Runtime::Current()->HasStatsEnabled()) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001092 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001093 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -07001094 thread_stats->freed_bytes += freed_bytes;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001095 // TODO: Do this concurrently.
1096 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1097 global_stats->freed_objects += freed_objects;
1098 global_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001099 }
Carl Shapiro58551df2011-07-24 03:09:51 -07001100}
1101
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001102mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001103 size_t alloc_size, size_t* bytes_allocated,
Ian Rogers6fac4472014-02-25 17:01:10 -08001104 size_t* usable_size,
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001105 mirror::Class** klass) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001106 bool was_default_allocator = allocator == GetCurrentAllocator();
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001107 DCHECK(klass != nullptr);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001108 StackHandleScope<1> hs(self);
1109 HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
1110 klass = nullptr; // Invalidate for safety.
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001111 // The allocation failed. If the GC is running, block until it completes, and then retry the
1112 // allocation.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001113 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
Ian Rogers1d54e732013-05-02 21:10:01 -07001114 if (last_gc != collector::kGcTypeNone) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001115 // If we were the default allocator but the allocator changed while we were suspended,
1116 // abort the allocation.
1117 if (was_default_allocator && allocator != GetCurrentAllocator()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001118 return nullptr;
1119 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001120 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001121 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1122 usable_size);
1123 if (ptr != nullptr) {
1124 return ptr;
1125 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001126 }
1127
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001128 collector::GcType tried_type = next_gc_type_;
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001129 const bool gc_ran =
1130 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1131 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1132 return nullptr;
1133 }
1134 if (gc_ran) {
1135 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1136 usable_size);
1137 if (ptr != nullptr) {
1138 return ptr;
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001139 }
1140 }
1141
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001142 // Loop through our different Gc types and try to Gc until we get enough free memory.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001143 for (collector::GcType gc_type : gc_plan_) {
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001144 if (gc_type == tried_type) {
1145 continue;
1146 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001147 // Attempt to run the collector, if we succeed, re-try the allocation.
Mathieu Chartier5ae2c932014-03-28 16:22:20 -07001148 const bool gc_ran =
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001149 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1150 if (was_default_allocator && allocator != GetCurrentAllocator()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001151 return nullptr;
1152 }
1153 if (gc_ran) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001154 // Did we free sufficient memory for the allocation to succeed?
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001155 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1156 usable_size);
1157 if (ptr != nullptr) {
1158 return ptr;
1159 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001160 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001161 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001162 // Allocations have failed after GCs; this is an exceptional state.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001163 // Try harder, growing the heap if necessary.
1164 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1165 usable_size);
1166 if (ptr != nullptr) {
1167 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001168 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001169 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1170 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1171 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1172 // OOME.
1173 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1174 << " allocation";
1175 // TODO: Run finalization, but this may cause more allocations to occur.
1176 // We don't need a WaitForGcToComplete here either.
1177 DCHECK(!gc_plan_.empty());
1178 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1179 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1180 return nullptr;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001181 }
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001182 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
1183 if (ptr == nullptr) {
Mathieu Chartier8e1ebf42014-05-29 17:09:51 -07001184 ThrowOutOfMemoryError(self, alloc_size, allocator == kAllocatorTypeLOS);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001185 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001186 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001187}
1188
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001189void Heap::SetTargetHeapUtilization(float target) {
1190 DCHECK_GT(target, 0.0f); // asserted in Java code
1191 DCHECK_LT(target, 1.0f);
1192 target_utilization_ = target;
1193}
1194
Ian Rogers1d54e732013-05-02 21:10:01 -07001195size_t Heap::GetObjectsAllocated() const {
1196 size_t total = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001197 for (space::AllocSpace* space : alloc_spaces_) {
1198 total += space->GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001199 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001200 return total;
1201}
1202
Ian Rogers1d54e732013-05-02 21:10:01 -07001203size_t Heap::GetObjectsAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001204 return GetObjectsFreedEver() + GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001205}
1206
1207size_t Heap::GetBytesAllocatedEver() const {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001208 return GetBytesFreedEver() + GetBytesAllocated();
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001209}
1210
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001211class InstanceCounter {
1212 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001213 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001214 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001215 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001216 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001217 static void Callback(mirror::Object* obj, void* arg)
1218 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1219 InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1220 mirror::Class* instance_class = obj->GetClass();
1221 CHECK(instance_class != nullptr);
1222 for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1223 if (instance_counter->use_is_assignable_from_) {
1224 if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) {
1225 ++instance_counter->counts_[i];
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001226 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001227 } else if (instance_class == instance_counter->classes_[i]) {
1228 ++instance_counter->counts_[i];
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001229 }
1230 }
1231 }
1232
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001233 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001234 const std::vector<mirror::Class*>& classes_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001235 bool use_is_assignable_from_;
1236 uint64_t* const counts_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001237 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001238};
1239
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001240void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001241 uint64_t* counts) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001242 // Can't do any GC in this function since this may move classes.
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001243 Thread* self = Thread::Current();
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001244 auto* old_cause = self->StartAssertNoThreadSuspension("CountInstances");
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001245 InstanceCounter counter(classes, use_is_assignable_from, counts);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001246 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1247 VisitObjects(InstanceCounter::Callback, &counter);
1248 self->EndAssertNoThreadSuspension(old_cause);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001249}
1250
Elliott Hughes3b78c942013-01-15 17:35:41 -08001251class InstanceCollector {
1252 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001253 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
Elliott Hughes3b78c942013-01-15 17:35:41 -08001254 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1255 : class_(c), max_count_(max_count), instances_(instances) {
1256 }
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001257 static void Callback(mirror::Object* obj, void* arg)
1258 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1259 DCHECK(arg != nullptr);
1260 InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
1261 mirror::Class* instance_class = obj->GetClass();
1262 if (instance_class == instance_collector->class_) {
1263 if (instance_collector->max_count_ == 0 ||
1264 instance_collector->instances_.size() < instance_collector->max_count_) {
1265 instance_collector->instances_.push_back(obj);
Elliott Hughes3b78c942013-01-15 17:35:41 -08001266 }
1267 }
1268 }
1269
1270 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001271 mirror::Class* class_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001272 uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001273 std::vector<mirror::Object*>& instances_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001274 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1275};
1276
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001277void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1278 std::vector<mirror::Object*>& instances) {
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001279 // Can't do any GC in this function since this may move classes.
Elliott Hughes3b78c942013-01-15 17:35:41 -08001280 Thread* self = Thread::Current();
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001281 auto* old_cause = self->StartAssertNoThreadSuspension("GetInstances");
Elliott Hughes3b78c942013-01-15 17:35:41 -08001282 InstanceCollector collector(c, max_count, instances);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001283 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1284 VisitObjects(&InstanceCollector::Callback, &collector);
1285 self->EndAssertNoThreadSuspension(old_cause);
Elliott Hughes3b78c942013-01-15 17:35:41 -08001286}
1287
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001288class ReferringObjectsFinder {
1289 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001290 ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1291 std::vector<mirror::Object*>& referring_objects)
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001292 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1293 : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1294 }
1295
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001296 static void Callback(mirror::Object* obj, void* arg)
1297 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1298 reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1299 }
1300
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001301 // For bitmap Visit.
1302 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1303 // annotalysis on visitors.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001304 void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001305 o->VisitReferences<true>(*this, VoidFunctor());
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001306 }
1307
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -07001308 // For Object::VisitReferences.
Mathieu Chartier407f7022014-02-18 14:37:05 -08001309 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
1310 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001311 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartier407f7022014-02-18 14:37:05 -08001312 if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1313 referring_objects_.push_back(obj);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001314 }
1315 }
1316
1317 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001318 mirror::Object* object_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001319 uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001320 std::vector<mirror::Object*>& referring_objects_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001321 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1322};
1323
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001324void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1325 std::vector<mirror::Object*>& referring_objects) {
Mathieu Chartier83c8ee02014-01-28 14:50:23 -08001326 // Can't do any GC in this function since this may move the object o.
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001327 Thread* self = Thread::Current();
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001328 auto* old_cause = self->StartAssertNoThreadSuspension("GetReferringObjects");
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001329 ReferringObjectsFinder finder(o, max_count, referring_objects);
Mathieu Chartier412c7fc2014-02-07 12:18:39 -08001330 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1331 VisitObjects(&ReferringObjectsFinder::Callback, &finder);
1332 self->EndAssertNoThreadSuspension(old_cause);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001333}
1334
Ian Rogers30fab402012-01-23 15:43:46 -08001335void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001336 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1337 // last GC will not have necessarily been cleared.
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001338 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001339}
1340
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001341void Heap::TransitionCollector(CollectorType collector_type) {
1342 if (collector_type == collector_type_) {
1343 return;
1344 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001345 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1346 << " -> " << static_cast<int>(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001347 uint64_t start_time = NanoTime();
Ian Rogers3e5cf302014-05-20 16:40:37 -07001348 uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001349 Runtime* const runtime = Runtime::Current();
1350 ThreadList* const tl = runtime->GetThreadList();
1351 Thread* const self = Thread::Current();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001352 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1353 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001354 const bool copying_transition =
Mathieu Chartier31f44142014-04-08 14:40:03 -07001355 IsMovingGc(background_collector_type_) || IsMovingGc(foreground_collector_type_);
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001356 // Busy wait until we can GC (StartGC can fail if we have a non-zero
1357 // compacting_gc_disable_count_, this should rarely occurs).
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001358 for (;;) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001359 {
1360 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1361 MutexLock mu(self, *gc_complete_lock_);
1362 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001363 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
Mathieu Chartierb38d4832014-04-10 10:56:55 -07001364 // If someone else beat us to it and changed the collector before we could, exit.
1365 // This is safe to do before the suspend all since we set the collector_type_running_ before
1366 // we exit the loop. If another thread attempts to do the heap transition before we exit,
1367 // then it would get blocked on WaitForGcToCompleteLocked.
1368 if (collector_type == collector_type_) {
1369 return;
1370 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001371 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1372 if (!copying_transition || disable_moving_gc_count_ == 0) {
1373 // TODO: Not hard code in semi-space collector?
1374 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
1375 break;
1376 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001377 }
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001378 usleep(1000);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001379 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001380 if (runtime->IsShuttingDown(self)) {
Hiroshi Yamauchia6a8d142014-05-12 16:57:33 -07001381 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1382 // cause objects to get finalized.
1383 FinishGC(self, collector::kGcTypeNone);
1384 return;
1385 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001386 tl->SuspendAll();
1387 switch (collector_type) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001388 case kCollectorTypeSS:
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001389 // Fall-through.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001390 case kCollectorTypeGSS: {
Mathieu Chartier31f44142014-04-08 14:40:03 -07001391 if (!IsMovingGc(collector_type_)) {
1392 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
1393 // pointer space last transition it will be protected.
1394 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1395 Compact(bump_pointer_space_, main_space_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001396 // Remove the main space so that we don't try to trim it, this doens't work for debug
1397 // builds since RosAlloc attempts to read the magic number from a protected page.
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001398 RemoveSpace(main_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001399 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001400 break;
1401 }
1402 case kCollectorTypeMS:
1403 // Fall through.
1404 case kCollectorTypeCMS: {
Mathieu Chartier31f44142014-04-08 14:40:03 -07001405 if (IsMovingGc(collector_type_)) {
1406 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001407 AddSpace(main_space_);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001408 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001409 Compact(main_space_, bump_pointer_space_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001410 }
1411 break;
1412 }
1413 default: {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001414 LOG(FATAL) << "Attempted to transition to invalid collector type "
1415 << static_cast<size_t>(collector_type);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001416 break;
1417 }
1418 }
1419 ChangeCollector(collector_type);
1420 tl->ResumeAll();
1421 // Can't call into java code with all threads suspended.
Mathieu Chartier308351a2014-06-15 12:39:02 -07001422 reference_processor_.EnqueueClearedReferences(self);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001423 uint64_t duration = NanoTime() - start_time;
Mathieu Chartierafe49982014-03-27 10:55:04 -07001424 GrowForUtilization(semi_space_collector_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001425 FinishGC(self, collector::kGcTypeFull);
Ian Rogers3e5cf302014-05-20 16:40:37 -07001426 int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001427 int32_t delta_allocated = before_allocated - after_allocated;
Mathieu Chartier19d46b42014-06-17 15:04:40 -07001428 std::string saved_str;
1429 if (delta_allocated >= 0) {
1430 saved_str = " saved at least " + PrettySize(delta_allocated);
1431 } else {
1432 saved_str = " expanded " + PrettySize(-delta_allocated);
1433 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001434 LOG(INFO) << "Heap transition to " << process_state_ << " took "
Mathieu Chartier19d46b42014-06-17 15:04:40 -07001435 << PrettyDuration(duration) << saved_str;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001436}
1437
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001438void Heap::ChangeCollector(CollectorType collector_type) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001439 // TODO: Only do this with all mutators suspended to avoid races.
1440 if (collector_type != collector_type_) {
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001441 if (collector_type == kCollectorTypeMC) {
1442 // Don't allow mark compact unless support is compiled in.
1443 CHECK(kMarkCompactSupport);
1444 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001445 collector_type_ = collector_type;
1446 gc_plan_.clear();
1447 switch (collector_type_) {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001448 case kCollectorTypeCC: // Fall-through.
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001449 case kCollectorTypeMC: // Fall-through.
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001450 case kCollectorTypeSS: // Fall-through.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001451 case kCollectorTypeGSS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001452 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001453 if (use_tlab_) {
1454 ChangeAllocator(kAllocatorTypeTLAB);
1455 } else {
1456 ChangeAllocator(kAllocatorTypeBumpPointer);
1457 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001458 break;
1459 }
1460 case kCollectorTypeMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001461 gc_plan_.push_back(collector::kGcTypeSticky);
1462 gc_plan_.push_back(collector::kGcTypePartial);
1463 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001464 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001465 break;
1466 }
1467 case kCollectorTypeCMS: {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001468 gc_plan_.push_back(collector::kGcTypeSticky);
1469 gc_plan_.push_back(collector::kGcTypePartial);
1470 gc_plan_.push_back(collector::kGcTypeFull);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001471 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001472 break;
1473 }
1474 default: {
1475 LOG(FATAL) << "Unimplemented";
1476 }
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001477 }
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001478 if (IsGcConcurrent()) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001479 concurrent_start_bytes_ =
1480 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
1481 } else {
1482 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001483 }
1484 }
1485}
1486
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001487// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
Ian Rogers6fac4472014-02-25 17:01:10 -08001488class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001489 public:
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001490 explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, false, "zygote collector"),
Ian Rogers6fac4472014-02-25 17:01:10 -08001491 bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001492 }
1493
1494 void BuildBins(space::ContinuousSpace* space) {
1495 bin_live_bitmap_ = space->GetLiveBitmap();
1496 bin_mark_bitmap_ = space->GetMarkBitmap();
1497 BinContext context;
1498 context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
1499 context.collector_ = this;
1500 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1501 // Note: This requires traversing the space in increasing order of object addresses.
1502 bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
1503 // Add the last bin which spans after the last object to the end of the space.
1504 AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
1505 }
1506
1507 private:
1508 struct BinContext {
1509 uintptr_t prev_; // The end of the previous object.
1510 ZygoteCompactingCollector* collector_;
1511 };
1512 // Maps from bin sizes to locations.
1513 std::multimap<size_t, uintptr_t> bins_;
1514 // Live bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001515 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001516 // Mark bitmap of the space which contains the bins.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001517 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001518
1519 static void Callback(mirror::Object* obj, void* arg)
1520 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1521 DCHECK(arg != nullptr);
1522 BinContext* context = reinterpret_cast<BinContext*>(arg);
1523 ZygoteCompactingCollector* collector = context->collector_;
1524 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
1525 size_t bin_size = object_addr - context->prev_;
1526 // Add the bin consisting of the end of the previous object to the start of the current object.
1527 collector->AddBin(bin_size, context->prev_);
1528 context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
1529 }
1530
1531 void AddBin(size_t size, uintptr_t position) {
1532 if (size != 0) {
1533 bins_.insert(std::make_pair(size, position));
1534 }
1535 }
1536
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001537 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001538 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
1539 // allocator.
1540 return false;
1541 }
1542
1543 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
1544 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
1545 size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08001546 mirror::Object* forward_address;
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001547 // Find the smallest bin which we can move obj in.
1548 auto it = bins_.lower_bound(object_size);
1549 if (it == bins_.end()) {
1550 // No available space in the bins, place it in the target space instead (grows the zygote
1551 // space).
Mathieu Chartier5dc08a62014-01-10 10:10:23 -08001552 size_t bytes_allocated;
Ian Rogers6fac4472014-02-25 17:01:10 -08001553 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001554 if (to_space_live_bitmap_ != nullptr) {
1555 to_space_live_bitmap_->Set(forward_address);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001556 } else {
1557 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
1558 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001559 }
1560 } else {
1561 size_t size = it->first;
1562 uintptr_t pos = it->second;
1563 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
1564 forward_address = reinterpret_cast<mirror::Object*>(pos);
1565 // Set the live and mark bits so that sweeping system weaks works properly.
1566 bin_live_bitmap_->Set(forward_address);
1567 bin_mark_bitmap_->Set(forward_address);
1568 DCHECK_GE(size, object_size);
1569 AddBin(size - object_size, pos + object_size); // Add a new bin with the remaining space.
1570 }
1571 // Copy the object over to its new location.
1572 memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -07001573 if (kUseBakerOrBrooksReadBarrier) {
1574 obj->AssertReadBarrierPointer();
1575 if (kUseBrooksReadBarrier) {
1576 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
1577 forward_address->SetReadBarrierPointer(forward_address);
1578 }
1579 forward_address->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -08001580 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001581 return forward_address;
1582 }
1583};
1584
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001585void Heap::UnBindBitmaps() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001586 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001587 for (const auto& space : GetContinuousSpaces()) {
1588 if (space->IsContinuousMemMapAllocSpace()) {
1589 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1590 if (alloc_space->HasBoundBitmaps()) {
1591 alloc_space->UnBindBitmaps();
1592 }
1593 }
1594 }
1595}
1596
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001597void Heap::PreZygoteFork() {
Mathieu Chartier1f3b5352014-02-03 14:00:42 -08001598 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
Ian Rogers81d425b2012-09-27 16:03:43 -07001599 Thread* self = Thread::Current();
1600 MutexLock mu(self, zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001601 // Try to see if we have any Zygote spaces.
1602 if (have_zygote_space_) {
1603 return;
1604 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001605 VLOG(heap) << "Starting PreZygoteFork";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001606 // Trim the pages at the end of the non moving space.
1607 non_moving_space_->Trim();
Mathieu Chartier31f44142014-04-08 14:40:03 -07001608 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
1609 // there.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001610 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001611 // Change the collector to the post zygote one.
Mathieu Chartier31f44142014-04-08 14:40:03 -07001612 if (kCompactZygote) {
1613 DCHECK(semi_space_collector_ != nullptr);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001614 // Temporarily disable rosalloc verification because the zygote
1615 // compaction will mess up the rosalloc internal metadata.
1616 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001617 ZygoteCompactingCollector zygote_collector(this);
1618 zygote_collector.BuildBins(non_moving_space_);
Mathieu Chartier50482232013-11-21 11:48:14 -08001619 // Create a new bump pointer space which we will compact into.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001620 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1621 non_moving_space_->Limit());
1622 // Compact the bump pointer space to a new zygote bump pointer space.
Mathieu Chartier31f44142014-04-08 14:40:03 -07001623 bool reset_main_space = false;
1624 if (IsMovingGc(collector_type_)) {
1625 zygote_collector.SetFromSpace(bump_pointer_space_);
1626 } else {
1627 CHECK(main_space_ != nullptr);
1628 // Copy from the main space.
1629 zygote_collector.SetFromSpace(main_space_);
1630 reset_main_space = true;
1631 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -08001632 zygote_collector.SetToSpace(&target_space);
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001633 zygote_collector.SetSwapSemiSpaces(false);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001634 zygote_collector.Run(kGcCauseCollectorTransition, false);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001635 if (reset_main_space) {
1636 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1637 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
1638 MemMap* mem_map = main_space_->ReleaseMemMap();
1639 RemoveSpace(main_space_);
Mathieu Chartier96bcd452014-06-17 09:50:02 -07001640 space::Space* old_main_space = main_space_;
Mathieu Chartier31f44142014-04-08 14:40:03 -07001641 CreateMainMallocSpace(mem_map, kDefaultInitialSize, mem_map->Size(), mem_map->Size());
Mathieu Chartier96bcd452014-06-17 09:50:02 -07001642 delete old_main_space;
Mathieu Chartier31f44142014-04-08 14:40:03 -07001643 AddSpace(main_space_);
1644 } else {
1645 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1646 }
1647 if (temp_space_ != nullptr) {
1648 CHECK(temp_space_->IsEmpty());
1649 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001650 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
1651 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001652 // Update the end and write out image.
1653 non_moving_space_->SetEnd(target_space.End());
1654 non_moving_space_->SetLimit(target_space.Limit());
Mathieu Chartier31f44142014-04-08 14:40:03 -07001655 VLOG(heap) << "Zygote space size " << non_moving_space_->Size() << " bytes";
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001656 }
Mathieu Chartier31f44142014-04-08 14:40:03 -07001657 ChangeCollector(foreground_collector_type_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001658 // Save the old space so that we can remove it after we complete creating the zygote space.
1659 space::MallocSpace* old_alloc_space = non_moving_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001660 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001661 // the remaining available space.
1662 // Remove the old space before creating the zygote space since creating the zygote space sets
1663 // the old alloc space's bitmaps to nullptr.
1664 RemoveSpace(old_alloc_space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001665 if (collector::SemiSpace::kUseRememberedSet) {
1666 // Sanity bound check.
1667 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
1668 // Remove the remembered set for the now zygote space (the old
1669 // non-moving space). Note now that we have compacted objects into
1670 // the zygote space, the data in the remembered set is no longer
1671 // needed. The zygote space will instead have a mod-union table
1672 // from this point on.
1673 RemoveRememberedSet(old_alloc_space);
1674 }
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001675 space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
1676 low_memory_mode_,
Mathieu Chartier31f44142014-04-08 14:40:03 -07001677 &non_moving_space_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001678 delete old_alloc_space;
1679 CHECK(zygote_space != nullptr) << "Failed creating zygote space";
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -07001680 AddSpace(zygote_space);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001681 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
1682 AddSpace(non_moving_space_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001683 have_zygote_space_ = true;
Mathieu Chartierbd0a6532014-02-27 11:14:21 -08001684 // Enable large object space allocations.
1685 large_object_threshold_ = kDefaultLargeObjectThreshold;
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001686 // Create the zygote space mod union table.
1687 accounting::ModUnionTable* mod_union_table =
1688 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
1689 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
1690 AddModUnionTable(mod_union_table);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001691 if (collector::SemiSpace::kUseRememberedSet) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001692 // Add a new remembered set for the post-zygote non-moving space.
1693 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
1694 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
1695 non_moving_space_);
1696 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
1697 << "Failed to create post-zygote non-moving space remembered set";
1698 AddRememberedSet(post_zygote_non_moving_space_rem_set);
1699 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001700}
1701
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001702void Heap::FlushAllocStack() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001703 MarkAllocStackAsLive(allocation_stack_.get());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001704 allocation_stack_->Reset();
1705}
1706
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001707void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
1708 accounting::ContinuousSpaceBitmap* bitmap2,
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001709 accounting::LargeObjectBitmap* large_objects,
Ian Rogers1d54e732013-05-02 21:10:01 -07001710 accounting::ObjectStack* stack) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001711 DCHECK(bitmap1 != nullptr);
1712 DCHECK(bitmap2 != nullptr);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001713 mirror::Object** limit = stack->End();
1714 for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
1715 const mirror::Object* obj = *it;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001716 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
1717 if (bitmap1->HasAddress(obj)) {
1718 bitmap1->Set(obj);
1719 } else if (bitmap2->HasAddress(obj)) {
1720 bitmap2->Set(obj);
1721 } else {
1722 large_objects->Set(obj);
1723 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001724 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001725 }
1726}
1727
Mathieu Chartier590fee92013-09-13 13:46:47 -07001728void Heap::SwapSemiSpaces() {
Mathieu Chartier31f44142014-04-08 14:40:03 -07001729 CHECK(bump_pointer_space_ != nullptr);
1730 CHECK(temp_space_ != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001731 std::swap(bump_pointer_space_, temp_space_);
1732}
1733
1734void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
1735 space::ContinuousMemMapAllocSpace* source_space) {
1736 CHECK(kMovingCollector);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001737 if (target_space != source_space) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001738 // Don't swap spaces since this isn't a typical semi space collection.
1739 semi_space_collector_->SetSwapSemiSpaces(false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001740 semi_space_collector_->SetFromSpace(source_space);
1741 semi_space_collector_->SetToSpace(target_space);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08001742 semi_space_collector_->Run(kGcCauseCollectorTransition, false);
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001743 } else {
1744 CHECK(target_space->IsBumpPointerSpace())
1745 << "In-place compaction is only supported for bump pointer spaces";
1746 mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
1747 mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001748 }
1749}
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001750
Ian Rogers1d54e732013-05-02 21:10:01 -07001751collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
1752 bool clear_soft_references) {
Ian Rogers81d425b2012-09-27 16:03:43 -07001753 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001754 Runtime* runtime = Runtime::Current();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001755 // If the heap can't run the GC, silently fail and return that no GC was run.
1756 switch (gc_type) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001757 case collector::kGcTypePartial: {
1758 if (!have_zygote_space_) {
1759 return collector::kGcTypeNone;
1760 }
1761 break;
1762 }
1763 default: {
1764 // Other GC types don't have any special cases which makes them not runnable. The main case
1765 // here is full GC.
1766 }
1767 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08001768 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Ian Rogers81d425b2012-09-27 16:03:43 -07001769 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers120f1c72012-09-28 17:17:10 -07001770 if (self->IsHandlingStackOverflow()) {
1771 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
1772 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001773 bool compacting_gc;
1774 {
1775 gc_complete_lock_->AssertNotHeld(self);
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08001776 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001777 MutexLock mu(self, *gc_complete_lock_);
1778 // Ensure there is only one GC at a time.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001779 WaitForGcToCompleteLocked(gc_cause, self);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001780 compacting_gc = IsMovingGc(collector_type_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001781 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
1782 if (compacting_gc && disable_moving_gc_count_ != 0) {
1783 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
1784 return collector::kGcTypeNone;
1785 }
1786 collector_type_running_ = collector_type_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001787 }
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001788
Mathieu Chartier590fee92013-09-13 13:46:47 -07001789 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
1790 ++runtime->GetStats()->gc_for_alloc_count;
1791 ++self->GetStats()->gc_for_alloc_count;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001792 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001793 uint64_t gc_start_time_ns = NanoTime();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001794 uint64_t gc_start_size = GetBytesAllocated();
1795 // Approximate allocation rate in bytes / second.
Ian Rogers1d54e732013-05-02 21:10:01 -07001796 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001797 // Back to back GCs can cause 0 ms of wait time in between GC invocations.
1798 if (LIKELY(ms_delta != 0)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001799 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
Mathieu Chartier65db8802012-11-20 12:36:46 -08001800 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
1801 }
1802
Ian Rogers1d54e732013-05-02 21:10:01 -07001803 DCHECK_LT(gc_type, collector::kGcTypeMax);
1804 DCHECK_NE(gc_type, collector::kGcTypeNone);
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001805
Mathieu Chartier590fee92013-09-13 13:46:47 -07001806 collector::GarbageCollector* collector = nullptr;
Mathieu Chartier50482232013-11-21 11:48:14 -08001807 // TODO: Clean this up.
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001808 if (compacting_gc) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001809 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
1810 current_allocator_ == kAllocatorTypeTLAB);
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001811 switch (collector_type_) {
1812 case kCollectorTypeSS:
1813 // Fall-through.
1814 case kCollectorTypeGSS:
1815 semi_space_collector_->SetFromSpace(bump_pointer_space_);
1816 semi_space_collector_->SetToSpace(temp_space_);
1817 semi_space_collector_->SetSwapSemiSpaces(true);
1818 collector = semi_space_collector_;
1819 break;
1820 case kCollectorTypeCC:
1821 collector = concurrent_copying_collector_;
1822 break;
1823 case kCollectorTypeMC:
1824 mark_compact_collector_->SetSpace(bump_pointer_space_);
1825 collector = mark_compact_collector_;
1826 break;
1827 default:
1828 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001829 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001830 if (collector != mark_compact_collector_) {
1831 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1832 CHECK(temp_space_->IsEmpty());
1833 }
1834 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001835 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
1836 current_allocator_ == kAllocatorTypeDlMalloc) {
Mathieu Chartierafe49982014-03-27 10:55:04 -07001837 collector = FindCollectorByGcType(gc_type);
Mathieu Chartier50482232013-11-21 11:48:14 -08001838 } else {
1839 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001840 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001841 CHECK(collector != nullptr)
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001842 << "Could not find garbage collector with collector_type="
1843 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001844 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001845 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
1846 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07001847 RequestHeapTrim();
Mathieu Chartier39e32612013-11-12 16:28:05 -08001848 // Enqueue cleared references.
Mathieu Chartier308351a2014-06-15 12:39:02 -07001849 reference_processor_.EnqueueClearedReferences(self);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001850 // Grow the heap so that we know when to perform the next GC.
Mathieu Chartierafe49982014-03-27 10:55:04 -07001851 GrowForUtilization(collector);
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001852 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
1853 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07001854 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001855 // (mutator time blocked >= long_pause_log_threshold_).
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07001856 bool log_gc = gc_cause == kGcCauseExplicit;
1857 if (!log_gc && CareAboutPauseTimes()) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001858 // GC for alloc pauses the allocating thread, so consider it as a pause.
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07001859 log_gc = duration > long_gc_log_threshold_ ||
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001860 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07001861 for (uint64_t pause : pause_times) {
1862 log_gc = log_gc || pause >= long_pause_log_threshold_;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001863 }
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07001864 }
1865 if (log_gc) {
1866 const size_t percent_free = GetPercentFree();
1867 const size_t current_heap_size = GetBytesAllocated();
1868 const size_t total_memory = GetTotalMemory();
1869 std::ostringstream pause_string;
1870 for (size_t i = 0; i < pause_times.size(); ++i) {
1871 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001872 << ((i != pause_times.size() - 1) ? "," : "");
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001873 }
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07001874 LOG(INFO) << gc_cause << " " << collector->GetName()
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001875 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
1876 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
1877 << current_gc_iteration_.GetFreedLargeObjects() << "("
1878 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
Mathieu Chartier62ab87b2014-04-28 12:22:07 -07001879 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
1880 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
1881 << " total " << PrettyDuration((duration / 1000) * 1000);
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001882 VLOG(heap) << ConstDumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001883 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001884 FinishGC(self, gc_type);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001885 // Inform DDMS that a GC completed.
Ian Rogers15bf2d32012-08-28 17:33:04 -07001886 Dbg::GcDidFinish();
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001887 return gc_type;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001888}
Mathieu Chartiera6399032012-06-11 18:49:50 -07001889
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001890void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
1891 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001892 collector_type_running_ = kCollectorTypeNone;
1893 if (gc_type != collector::kGcTypeNone) {
1894 last_gc_type_ = gc_type;
1895 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001896 // Wake anyone who may have been waiting for the GC to complete.
1897 gc_complete_cond_->Broadcast(self);
1898}
1899
Mathieu Chartier815873e2014-02-13 18:02:13 -08001900static void RootMatchesObjectVisitor(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
1901 RootType /*root_type*/) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001902 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
Mathieu Chartier815873e2014-02-13 18:02:13 -08001903 if (*root == obj) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001904 LOG(INFO) << "Object " << obj << " is a root";
1905 }
1906}
1907
1908class ScanVisitor {
1909 public:
Brian Carlstromdf629502013-07-17 22:39:56 -07001910 void operator()(const mirror::Object* obj) const {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001911 LOG(ERROR) << "Would have rescanned object " << obj;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001912 }
1913};
1914
Ian Rogers1d54e732013-05-02 21:10:01 -07001915// Verify a reference from an object.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001916class VerifyReferenceVisitor {
1917 public:
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07001918 explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
Ian Rogers1d54e732013-05-02 21:10:01 -07001919 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07001920 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
Ian Rogers1d54e732013-05-02 21:10:01 -07001921
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07001922 size_t GetFailureCount() const {
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07001923 return fail_count_->LoadSequentiallyConsistent();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001924 }
1925
Mathieu Chartier407f7022014-02-18 14:37:05 -08001926 void operator()(mirror::Class* klass, mirror::Reference* ref) const
1927 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001928 if (verify_referent_) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07001929 VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001930 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001931 }
1932
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -07001933 void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
Mathieu Chartier407f7022014-02-18 14:37:05 -08001934 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07001935 VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
Mathieu Chartier407f7022014-02-18 14:37:05 -08001936 }
1937
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07001938 bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
1939 return heap_->IsLiveObjectLocked(obj, true, false, true);
1940 }
1941
1942 static void VerifyRootCallback(mirror::Object** root, void* arg, uint32_t thread_id,
1943 RootType root_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1944 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
1945 if (!visitor->VerifyReference(nullptr, *root, MemberOffset(0))) {
1946 LOG(ERROR) << "Root " << *root << " is dead with type " << PrettyTypeOf(*root)
1947 << " thread_id= " << thread_id << " root_type= " << root_type;
1948 }
1949 }
1950
1951 private:
Mathieu Chartier407f7022014-02-18 14:37:05 -08001952 // TODO: Fix the no thread safety analysis.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07001953 // Returns false on failure.
1954 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001955 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001956 if (ref == nullptr || IsLive(ref)) {
1957 // Verify that the reference is live.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07001958 return true;
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001959 }
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07001960 if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001961 // Print message on only on first failure to prevent spam.
1962 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001963 }
1964 if (obj != nullptr) {
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07001965 // Only do this part for non roots.
Ian Rogers1d54e732013-05-02 21:10:01 -07001966 accounting::CardTable* card_table = heap_->GetCardTable();
1967 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
1968 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001969 byte* card_addr = card_table->CardFromAddr(obj);
1970 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
1971 << offset << "\n card value = " << static_cast<int>(*card_addr);
1972 if (heap_->IsValidObjectAddress(obj->GetClass())) {
1973 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
1974 } else {
1975 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001976 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001977
1978 // Attmept to find the class inside of the recently freed objects.
1979 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
1980 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
1981 space::MallocSpace* space = ref_space->AsMallocSpace();
1982 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
1983 if (ref_class != nullptr) {
1984 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
1985 << PrettyClass(ref_class);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001986 } else {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001987 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001988 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001989 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001990
Mathieu Chartier938a03b2014-01-16 15:10:31 -08001991 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
1992 ref->GetClass()->IsClass()) {
1993 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
1994 } else {
1995 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
1996 << ") is not a valid heap address";
1997 }
1998
1999 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
2000 void* cover_begin = card_table->AddrFromCard(card_addr);
2001 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2002 accounting::CardTable::kCardSize);
2003 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2004 << "-" << cover_end;
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002005 accounting::ContinuousSpaceBitmap* bitmap =
2006 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002007
2008 if (bitmap == nullptr) {
2009 LOG(ERROR) << "Object " << obj << " has no bitmap";
Mathieu Chartier4e305412014-02-19 10:54:44 -08002010 if (!VerifyClassClass(obj->GetClass())) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002011 LOG(ERROR) << "Object " << obj << " failed class verification!";
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002012 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002013 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07002014 // Print out how the object is live.
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002015 if (bitmap->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002016 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2017 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002018 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002019 LOG(ERROR) << "Object " << obj << " found in allocation stack";
2020 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002021 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002022 LOG(ERROR) << "Object " << obj << " found in live stack";
2023 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002024 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2025 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2026 }
2027 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2028 LOG(ERROR) << "Ref " << ref << " found in live stack";
2029 }
Ian Rogers1d54e732013-05-02 21:10:01 -07002030 // Attempt to see if the card table missed the reference.
2031 ScanVisitor scan_visitor;
2032 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
2033 card_table->Scan(bitmap, byte_cover_begin,
Mathieu Chartier184e3222013-08-03 14:02:57 -07002034 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002035 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002036
2037 // Search to see if any of the roots reference our object.
2038 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
Mathieu Chartier893263b2014-03-04 11:07:42 -08002039 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002040
2041 // Search to see if any of the roots reference our reference.
2042 arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
Mathieu Chartier893263b2014-03-04 11:07:42 -08002043 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002044 }
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002045 return false;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002046 }
2047
Ian Rogers1d54e732013-05-02 21:10:01 -07002048 Heap* const heap_;
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002049 Atomic<size_t>* const fail_count_;
2050 const bool verify_referent_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002051};
2052
Ian Rogers1d54e732013-05-02 21:10:01 -07002053// Verify all references within an object, for use with HeapBitmap::Visit.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002054class VerifyObjectVisitor {
2055 public:
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002056 explicit VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2057 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002058 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002059
Mathieu Chartier590fee92013-09-13 13:46:47 -07002060 void operator()(mirror::Object* obj) const
Ian Rogersb726dcb2012-09-05 08:57:23 -07002061 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002062 // Note: we are verifying the references in obj but not obj itself, this is because obj must
2063 // be live or else how did we find it in the live bitmap?
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002064 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002065 // The class doesn't count as a reference but we should verify it anyways.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002066 obj->VisitReferences<true>(visitor, visitor);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002067 }
2068
Mathieu Chartier590fee92013-09-13 13:46:47 -07002069 static void VisitCallback(mirror::Object* obj, void* arg)
2070 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2071 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
2072 visitor->operator()(obj);
2073 }
2074
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002075 size_t GetFailureCount() const {
Mathieu Chartiere9e55ac2014-05-21 17:48:25 -07002076 return fail_count_->LoadSequentiallyConsistent();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002077 }
2078
2079 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002080 Heap* const heap_;
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002081 Atomic<size_t>* const fail_count_;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002082 const bool verify_referent_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002083};
2084
Mathieu Chartierc1790162014-05-23 10:54:50 -07002085void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2086 // Slow path, the allocation stack push back must have already failed.
2087 DCHECK(!allocation_stack_->AtomicPushBack(*obj));
2088 do {
2089 // TODO: Add handle VerifyObject.
2090 StackHandleScope<1> hs(self);
2091 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2092 // Push our object into the reserve region of the allocaiton stack. This is only required due
2093 // to heap verification requiring that roots are live (either in the live bitmap or in the
2094 // allocation stack).
2095 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2096 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2097 } while (!allocation_stack_->AtomicPushBack(*obj));
2098}
2099
2100void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2101 // Slow path, the allocation stack push back must have already failed.
2102 DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
2103 mirror::Object** start_address;
2104 mirror::Object** end_address;
2105 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
2106 &end_address)) {
2107 // TODO: Add handle VerifyObject.
2108 StackHandleScope<1> hs(self);
2109 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2110 // Push our object into the reserve region of the allocaiton stack. This is only required due
2111 // to heap verification requiring that roots are live (either in the live bitmap or in the
2112 // allocation stack).
2113 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2114 // Push into the reserve allocation stack.
2115 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2116 }
2117 self->SetThreadLocalAllocationStack(start_address, end_address);
2118 // Retry on the new thread-local allocation stack.
2119 CHECK(self->PushOnThreadLocalAllocationStack(*obj)); // Must succeed.
2120}
2121
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002122// Must do this with mutators suspended since we are directly accessing the allocation stacks.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002123size_t Heap::VerifyHeapReferences(bool verify_referents) {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002124 Thread* self = Thread::Current();
2125 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002126 // Lets sort our allocation stacks so that we can efficiently binary search them.
Ian Rogers1d54e732013-05-02 21:10:01 -07002127 allocation_stack_->Sort();
2128 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002129 // Since we sorted the allocation stack content, need to revoke all
2130 // thread-local allocation stacks.
2131 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002132 Atomic<size_t> fail_count_(0);
2133 VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002134 // Verify objects in the allocation stack since these will be objects which were:
2135 // 1. Allocated prior to the GC (pre GC verification).
2136 // 2. Allocated during the GC (pre sweep GC verification).
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002137 // We don't want to verify the objects in the live stack since they themselves may be
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002138 // pointing to dead objects if they are not reachable.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002139 VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
2140 // Verify the roots:
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002141 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRootCallback, &visitor);
2142 if (visitor.GetFailureCount() > 0) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07002143 // Dump mod-union tables.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002144 for (const auto& table_pair : mod_union_tables_) {
2145 accounting::ModUnionTable* mod_union_table = table_pair.second;
2146 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
2147 }
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002148 // Dump remembered sets.
2149 for (const auto& table_pair : remembered_sets_) {
2150 accounting::RememberedSet* remembered_set = table_pair.second;
2151 remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
2152 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002153 DumpSpaces();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002154 }
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002155 return visitor.GetFailureCount();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002156}
2157
2158class VerifyReferenceCardVisitor {
2159 public:
2160 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
2161 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
2162 Locks::heap_bitmap_lock_)
Ian Rogers1d54e732013-05-02 21:10:01 -07002163 : heap_(heap), failed_(failed) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002164 }
2165
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002166 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
2167 // annotalysis on visitors.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002168 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
2169 NO_THREAD_SAFETY_ANALYSIS {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07002170 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002171 // Filter out class references since changing an object's class does not mark the card as dirty.
2172 // Also handles large objects, since the only reference they hold is a class reference.
Mathieu Chartier407f7022014-02-18 14:37:05 -08002173 if (ref != nullptr && !ref->IsClass()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002174 accounting::CardTable* card_table = heap_->GetCardTable();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002175 // If the object is not dirty and it is referencing something in the live stack other than
2176 // class, then it must be on a dirty card.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07002177 if (!card_table->AddrIsInCardTable(obj)) {
2178 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
2179 *failed_ = true;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002180 } else if (!card_table->IsDirty(obj)) {
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002181 // TODO: Check mod-union tables.
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002182 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
2183 // kCardDirty - 1 if it didnt get touched since we aged it.
Ian Rogers1d54e732013-05-02 21:10:01 -07002184 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier407f7022014-02-18 14:37:05 -08002185 if (live_stack->ContainsSorted(ref)) {
2186 if (live_stack->ContainsSorted(obj)) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002187 LOG(ERROR) << "Object " << obj << " found in live stack";
2188 }
2189 if (heap_->GetLiveBitmap()->Test(obj)) {
2190 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2191 }
2192 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
2193 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
2194
2195 // Print which field of the object is dead.
2196 if (!obj->IsObjectArray()) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002197 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002198 CHECK(klass != NULL);
Ian Rogersef7d42f2014-01-06 12:55:46 -08002199 mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
2200 : klass->GetIFields();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002201 CHECK(fields != NULL);
2202 for (int32_t i = 0; i < fields->GetLength(); ++i) {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002203 mirror::ArtField* cur = fields->Get(i);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002204 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
2205 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
2206 << PrettyField(cur);
2207 break;
2208 }
2209 }
2210 } else {
Ian Rogersef7d42f2014-01-06 12:55:46 -08002211 mirror::ObjectArray<mirror::Object>* object_array =
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002212 obj->AsObjectArray<mirror::Object>();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002213 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
2214 if (object_array->Get(i) == ref) {
2215 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
2216 }
2217 }
2218 }
2219
2220 *failed_ = true;
2221 }
2222 }
2223 }
2224 }
2225
2226 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002227 Heap* const heap_;
2228 bool* const failed_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002229};
2230
2231class VerifyLiveStackReferences {
2232 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07002233 explicit VerifyLiveStackReferences(Heap* heap)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002234 : heap_(heap),
Brian Carlstrom93ba8932013-07-17 21:31:49 -07002235 failed_(false) {}
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002236
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002237 void operator()(mirror::Object* obj) const
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002238 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2239 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07002240 obj->VisitReferences<true>(visitor, VoidFunctor());
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002241 }
2242
2243 bool Failed() const {
2244 return failed_;
2245 }
2246
2247 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07002248 Heap* const heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002249 bool failed_;
2250};
2251
2252bool Heap::VerifyMissingCardMarks() {
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002253 Thread* self = Thread::Current();
2254 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002255
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002256 // We need to sort the live stack since we binary search it.
Ian Rogers1d54e732013-05-02 21:10:01 -07002257 live_stack_->Sort();
Hiroshi Yamauchi1ed90612014-02-14 15:00:51 -08002258 // Since we sorted the allocation stack content, need to revoke all
2259 // thread-local allocation stacks.
2260 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002261 VerifyLiveStackReferences visitor(this);
2262 GetLiveBitmap()->Visit(visitor);
2263
2264 // We can verify objects in the live stack since none of these should reference dead objects.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002265 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002266 if (!kUseThreadLocalAllocationStack || *it != nullptr) {
2267 visitor(*it);
2268 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002269 }
2270
2271 if (visitor.Failed()) {
2272 DumpSpaces();
2273 return false;
2274 }
2275 return true;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002276}
2277
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002278void Heap::SwapStacks(Thread* self) {
2279 if (kUseThreadLocalAllocationStack) {
2280 live_stack_->AssertAllZero();
2281 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -08002282 allocation_stack_.swap(live_stack_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07002283}
2284
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002285void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08002286 // This must be called only during the pause.
2287 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
2288 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
2289 MutexLock mu2(self, *Locks::thread_list_lock_);
2290 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2291 for (Thread* t : thread_list) {
2292 t->RevokeThreadLocalAllocationStack();
2293 }
2294}
2295
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07002296void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
2297 if (kIsDebugBuild) {
2298 if (bump_pointer_space_ != nullptr) {
2299 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
2300 }
2301 }
2302}
2303
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002304accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
2305 auto it = mod_union_tables_.find(space);
2306 if (it == mod_union_tables_.end()) {
2307 return nullptr;
2308 }
2309 return it->second;
2310}
2311
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002312accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
2313 auto it = remembered_sets_.find(space);
2314 if (it == remembered_sets_.end()) {
2315 return nullptr;
2316 }
2317 return it->second;
2318}
2319
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002320void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002321 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Ian Rogers1d54e732013-05-02 21:10:01 -07002322 // Clear cards and keep track of cards cleared in the mod-union table.
Mathieu Chartier02e25112013-08-14 16:14:24 -07002323 for (const auto& space : continuous_spaces_) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002324 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002325 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002326 if (table != nullptr) {
2327 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
2328 "ImageModUnionClearCards";
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002329 TimingLogger::ScopedTiming t(name, timings);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002330 table->ClearCards();
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002331 } else if (use_rem_sets && rem_set != nullptr) {
2332 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
2333 << static_cast<int>(collector_type_);
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002334 TimingLogger::ScopedTiming t("AllocSpaceRemSetClearCards", timings);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002335 rem_set->ClearCards();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002336 } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002337 TimingLogger::ScopedTiming t("AllocSpaceClearCards", timings);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08002338 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
2339 // were dirty before the GC started.
Mathieu Chartierbd0a6532014-02-27 11:14:21 -08002340 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
2341 // -> clean(cleaning thread).
Mathieu Chartier590fee92013-09-13 13:46:47 -07002342 // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
Mathieu Chartier938a03b2014-01-16 15:10:31 -08002343 // roots and then we scan / update mod union tables after. We will always scan either card.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002344 // If we end up with the non aged card, we scan it it in the pause.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002345 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
2346 VoidFunctor());
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07002347 }
2348 }
2349}
2350
Mathieu Chartier407f7022014-02-18 14:37:05 -08002351static void IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*, void*) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002352}
2353
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002354void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
2355 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002356 TimingLogger* const timings = current_gc_iteration_.GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002357 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002358 if (verify_pre_gc_heap_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002359 TimingLogger::ScopedTiming t("(Paused)PreGcVerifyHeapReferences", timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002360 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002361 size_t failures = VerifyHeapReferences();
2362 if (failures > 0) {
2363 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
2364 << " failures";
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002365 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002366 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002367 // Check that all objects which reference things in the live stack are on dirty cards.
2368 if (verify_missing_card_marks_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002369 TimingLogger::ScopedTiming t("(Paused)PreGcVerifyMissingCardMarks", timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002370 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2371 SwapStacks(self);
2372 // Sort the live stack so that we can quickly binary search it later.
2373 if (!VerifyMissingCardMarks()) {
2374 LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002375 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002376 SwapStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002377 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002378 if (verify_mod_union_table_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002379 TimingLogger::ScopedTiming t("(Paused)PreGcVerifyModUnionTables", timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002380 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002381 for (const auto& table_pair : mod_union_tables_) {
2382 accounting::ModUnionTable* mod_union_table = table_pair.second;
Mathieu Chartier407f7022014-02-18 14:37:05 -08002383 mod_union_table->UpdateAndMarkReferences(IdentityMarkHeapReferenceCallback, nullptr);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002384 mod_union_table->Verify();
2385 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002386 }
2387}
2388
2389void Heap::PreGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07002390 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002391 collector::GarbageCollector::ScopedPause pause(gc);
2392 PreGcVerificationPaused(gc);
2393 }
2394}
2395
2396void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
2397 // TODO: Add a new runtime option for this?
2398 if (verify_pre_gc_rosalloc_) {
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002399 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002400 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08002401}
2402
Ian Rogers1d54e732013-05-02 21:10:01 -07002403void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002404 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002405 TimingLogger* const timings = current_gc_iteration_.GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002406 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002407 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
2408 // reachable objects.
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002409 if (verify_pre_sweeping_heap_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002410 TimingLogger::ScopedTiming t("(Paused)PostSweepingVerifyHeapReferences", timings);
Ian Rogers1d54e732013-05-02 21:10:01 -07002411 CHECK_NE(self->GetState(), kRunnable);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002412 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2413 // Swapping bound bitmaps does nothing.
2414 gc->SwapBitmaps();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002415 // Pass in false since concurrent reference processing can mean that the reference referents
2416 // may point to dead objects at the point which PreSweepingGcVerification is called.
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002417 size_t failures = VerifyHeapReferences(false);
2418 if (failures > 0) {
2419 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
2420 << " failures";
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002421 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002422 gc->SwapBitmaps();
2423 }
2424 if (verify_pre_sweeping_rosalloc_) {
2425 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
2426 }
2427}
2428
2429void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
2430 // Only pause if we have to do some verification.
2431 Thread* const self = Thread::Current();
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002432 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002433 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002434 if (verify_system_weaks_) {
2435 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2436 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
2437 mark_sweep->VerifySystemWeaks();
2438 }
2439 if (verify_post_gc_rosalloc_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002440 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002441 }
2442 if (verify_post_gc_heap_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002443 TimingLogger::ScopedTiming t("(Paused)PostGcVerifyHeapReferences", timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002444 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -07002445 size_t failures = VerifyHeapReferences();
2446 if (failures > 0) {
2447 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
2448 << " failures";
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002449 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002450 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08002451}
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002452
Ian Rogers1d54e732013-05-02 21:10:01 -07002453void Heap::PostGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002454 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
2455 collector::GarbageCollector::ScopedPause pause(gc);
2456 PreGcVerificationPaused(gc);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002457 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07002458}
2459
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002460void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07002461 TimingLogger::ScopedTiming t(name, timings);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07002462 for (const auto& space : continuous_spaces_) {
2463 if (space->IsRosAllocSpace()) {
2464 VLOG(heap) << name << " : " << space->GetName();
2465 space->AsRosAllocSpace()->Verify();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002466 }
2467 }
2468}
2469
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002470collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
Mathieu Chartiercaa82d62014-02-02 16:51:17 -08002471 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002472 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002473 return WaitForGcToCompleteLocked(cause, self);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002474}
2475
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002476collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002477 collector::GcType last_gc_type = collector::kGcTypeNone;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002478 uint64_t wait_start = NanoTime();
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08002479 while (collector_type_running_ != kCollectorTypeNone) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002480 ATRACE_BEGIN("GC: Wait For Completion");
2481 // We must wait, change thread state then sleep on gc_complete_cond_;
2482 gc_complete_cond_->Wait(self);
2483 last_gc_type = last_gc_type_;
Mathieu Chartier752a0e62013-06-27 11:03:27 -07002484 ATRACE_END();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002485 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002486 uint64_t wait_time = NanoTime() - wait_start;
2487 total_wait_time_ += wait_time;
2488 if (wait_time > long_pause_log_threshold_) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002489 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
2490 << " for cause " << cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002491 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07002492 return last_gc_type;
Carl Shapiro69759ea2011-07-21 18:13:35 -07002493}
2494
Elliott Hughesc967f782012-04-16 10:23:15 -07002495void Heap::DumpForSigQuit(std::ostream& os) {
Ian Rogers1d54e732013-05-02 21:10:01 -07002496 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002497 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -07002498 DumpGcPerformanceInfo(os);
Elliott Hughesc967f782012-04-16 10:23:15 -07002499}
2500
2501size_t Heap::GetPercentFree() {
Mathieu Chartierd30e1d62014-06-09 13:25:22 -07002502 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
Elliott Hughesc967f782012-04-16 10:23:15 -07002503}
2504
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08002505void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002506 if (max_allowed_footprint > GetMaxMemory()) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07002507 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002508 << PrettySize(GetMaxMemory());
2509 max_allowed_footprint = GetMaxMemory();
2510 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07002511 max_allowed_footprint_ = max_allowed_footprint;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07002512}
2513
Mathieu Chartier590fee92013-09-13 13:46:47 -07002514bool Heap::IsMovableObject(const mirror::Object* obj) const {
2515 if (kMovingCollector) {
Mathieu Chartier31f44142014-04-08 14:40:03 -07002516 space::Space* space = FindContinuousSpaceFromObject(obj, true);
2517 if (space != nullptr) {
2518 // TODO: Check large object?
2519 return space->CanMoveObjects();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002520 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07002521 }
2522 return false;
2523}
2524
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002525void Heap::UpdateMaxNativeFootprint() {
Ian Rogers3e5cf302014-05-20 16:40:37 -07002526 size_t native_size = native_bytes_allocated_.LoadRelaxed();
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002527 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
2528 size_t target_size = native_size / GetTargetHeapUtilization();
2529 if (target_size > native_size + max_free_) {
2530 target_size = native_size + max_free_;
2531 } else if (target_size < native_size + min_free_) {
2532 target_size = native_size + min_free_;
2533 }
2534 native_footprint_gc_watermark_ = target_size;
2535 native_footprint_limit_ = 2 * target_size - native_size;
2536}
2537
Mathieu Chartierafe49982014-03-27 10:55:04 -07002538collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
2539 for (const auto& collector : garbage_collectors_) {
2540 if (collector->GetCollectorType() == collector_type_ &&
2541 collector->GetGcType() == gc_type) {
2542 return collector;
2543 }
2544 }
2545 return nullptr;
2546}
2547
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002548double Heap::HeapGrowthMultiplier() const {
2549 // If we don't care about pause times we are background, so return 1.0.
2550 if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
2551 return 1.0;
2552 }
2553 return foreground_heap_growth_multiplier_;
2554}
2555
Mathieu Chartierafe49982014-03-27 10:55:04 -07002556void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002557 // We know what our utilization is at this moment.
2558 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002559 const uint64_t bytes_allocated = GetBytesAllocated();
Mathieu Chartier65db8802012-11-20 12:36:46 -08002560 last_gc_size_ = bytes_allocated;
Ian Rogers1d54e732013-05-02 21:10:01 -07002561 last_gc_time_ns_ = NanoTime();
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002562 uint64_t target_size;
Mathieu Chartierafe49982014-03-27 10:55:04 -07002563 collector::GcType gc_type = collector_ran->GetGcType();
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002564 if (gc_type != collector::kGcTypeSticky) {
2565 // Grow the heap for non sticky GC.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002566 const float multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
2567 // foreground.
2568 intptr_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
2569 CHECK_GE(delta, 0);
2570 target_size = bytes_allocated + delta * multiplier;
2571 target_size = std::min(target_size,
2572 bytes_allocated + static_cast<uint64_t>(max_free_ * multiplier));
2573 target_size = std::max(target_size,
2574 bytes_allocated + static_cast<uint64_t>(min_free_ * multiplier));
Mathieu Chartier590fee92013-09-13 13:46:47 -07002575 native_need_to_run_finalization_ = true;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002576 next_gc_type_ = collector::kGcTypeSticky;
2577 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002578 collector::GcType non_sticky_gc_type =
2579 have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
2580 // Find what the next non sticky collector will be.
2581 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
2582 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
2583 // do another sticky collection next.
2584 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
2585 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
2586 // if the sticky GC throughput always remained >= the full/partial throughput.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002587 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
Mathieu Chartierafe49982014-03-27 10:55:04 -07002588 non_sticky_collector->GetEstimatedMeanThroughput() &&
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002589 non_sticky_collector->NumberOfIterations() > 0 &&
Mathieu Chartierafe49982014-03-27 10:55:04 -07002590 bytes_allocated <= max_allowed_footprint_) {
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002591 next_gc_type_ = collector::kGcTypeSticky;
2592 } else {
Mathieu Chartierafe49982014-03-27 10:55:04 -07002593 next_gc_type_ = non_sticky_gc_type;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002594 }
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002595 // If we have freed enough memory, shrink the heap back down.
2596 if (bytes_allocated + max_free_ < max_allowed_footprint_) {
2597 target_size = bytes_allocated + max_free_;
2598 } else {
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002599 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07002600 }
2601 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002602 if (!ignore_max_footprint_) {
2603 SetIdealFootprint(target_size);
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002604 if (IsGcConcurrent()) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002605 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002606 // Calculate the estimated GC duration.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07002607 const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002608 // Estimate how many remaining bytes we will have when we need to start the next GC.
2609 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
Mathieu Chartier74762802014-01-24 10:21:35 -08002610 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002611 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
2612 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
2613 // A never going to happen situation that from the estimated allocation rate we will exceed
2614 // the applications entire footprint with the given estimated allocation rate. Schedule
Mathieu Chartier74762802014-01-24 10:21:35 -08002615 // another GC nearly straight away.
2616 remaining_bytes = kMinConcurrentRemainingBytes;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002617 }
Mathieu Chartier74762802014-01-24 10:21:35 -08002618 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07002619 DCHECK_LE(max_allowed_footprint_, growth_limit_);
Mathieu Chartier74762802014-01-24 10:21:35 -08002620 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
2621 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
2622 // right away.
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07002623 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
2624 static_cast<size_t>(bytes_allocated));
Mathieu Chartier65db8802012-11-20 12:36:46 -08002625 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002626 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07002627}
2628
jeffhaoc1160702011-10-27 15:48:45 -07002629void Heap::ClearGrowthLimit() {
Mathieu Chartier80de7a62012-11-27 17:21:50 -08002630 growth_limit_ = capacity_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002631 non_moving_space_->ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -07002632}
2633
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07002634void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002635 ScopedObjectAccess soa(self);
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07002636 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
Ian Rogers53b8b092014-03-13 23:45:53 -07002637 jvalue args[1];
2638 args[0].l = arg.get();
2639 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
Mathieu Chartier8668c3c2014-04-24 16:48:11 -07002640 // Restore object in case it gets moved.
2641 *object = soa.Decode<mirror::Object*>(arg.get());
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002642}
2643
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07002644void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) {
2645 StackHandleScope<1> hs(self);
2646 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2647 RequestConcurrentGC(self);
2648}
2649
Ian Rogers1f539342012-10-03 21:09:42 -07002650void Heap::RequestConcurrentGC(Thread* self) {
Mathieu Chartier069387a2012-06-18 12:01:01 -07002651 // Make sure that we can do a concurrent GC.
Ian Rogers120f1c72012-09-28 17:17:10 -07002652 Runtime* runtime = Runtime::Current();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07002653 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
Mathieu Chartier590fee92013-09-13 13:46:47 -07002654 self->IsHandlingStackOverflow()) {
Ian Rogers120f1c72012-09-28 17:17:10 -07002655 return;
2656 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002657 // We already have a request pending, no reason to start more until we update
2658 // concurrent_start_bytes_.
2659 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Ian Rogers120f1c72012-09-28 17:17:10 -07002660 JNIEnv* env = self->GetJniEnv();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002661 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2662 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002663 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2664 WellKnownClasses::java_lang_Daemons_requestGC);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002665 CHECK(!env->ExceptionCheck());
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002666}
2667
Ian Rogers81d425b2012-09-27 16:03:43 -07002668void Heap::ConcurrentGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002669 if (Runtime::Current()->IsShuttingDown(self)) {
2670 return;
Mathieu Chartier2542d662012-06-21 17:14:11 -07002671 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002672 // Wait for any GCs currently running to finish.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002673 if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
Mathieu Chartierf9ed0d32013-11-21 16:42:47 -08002674 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
2675 // instead. E.g. can't do partial, so do full instead.
2676 if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
2677 collector::kGcTypeNone) {
2678 for (collector::GcType gc_type : gc_plan_) {
2679 // Attempt to run the collector, if we succeed, we are done.
2680 if (gc_type > next_gc_type_ &&
2681 CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
2682 break;
2683 }
2684 }
2685 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002686 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002687}
2688
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07002689void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08002690 Thread* self = Thread::Current();
2691 {
2692 MutexLock mu(self, *heap_trim_request_lock_);
2693 if (desired_collector_type_ == desired_collector_type) {
2694 return;
2695 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002696 heap_transition_or_trim_target_time_ =
2697 std::max(heap_transition_or_trim_target_time_, NanoTime() + delta_time);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08002698 desired_collector_type_ = desired_collector_type;
2699 }
2700 SignalHeapTrimDaemon(self);
2701}
2702
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07002703void Heap::RequestHeapTrim() {
Ian Rogers48931882013-01-22 14:35:16 -08002704 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
2705 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
2706 // a space it will hold its lock and can become a cause of jank.
2707 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
2708 // forking.
2709
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002710 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
2711 // because that only marks object heads, so a large array looks like lots of empty space. We
2712 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
2713 // to utilization (which is probably inversely proportional to how much benefit we can expect).
2714 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
2715 // not how much use we're making of those pages.
Ian Rogers120f1c72012-09-28 17:17:10 -07002716
2717 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002718 Runtime* runtime = Runtime::Current();
2719 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) {
2720 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
2721 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
2722 // as we don't hold the lock while requesting the trim).
2723 return;
Ian Rogerse1d490c2012-02-03 09:09:07 -08002724 }
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07002725 {
2726 MutexLock mu(self, *heap_trim_request_lock_);
2727 if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
2728 // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
2729 // just yet.
2730 return;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08002731 }
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07002732 heap_trim_request_pending_ = true;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002733 uint64_t current_time = NanoTime();
2734 if (heap_transition_or_trim_target_time_ < current_time) {
2735 heap_transition_or_trim_target_time_ = current_time + kHeapTrimWait;
2736 }
Mathieu Chartierc39e3422013-08-07 16:41:36 -07002737 }
Mathieu Chartier440e4ce2014-03-31 16:36:35 -07002738 // Notify the daemon thread which will actually do the heap trim.
2739 SignalHeapTrimDaemon(self);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002740}
2741
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08002742void Heap::SignalHeapTrimDaemon(Thread* self) {
2743 JNIEnv* env = self->GetJniEnv();
2744 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2745 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != nullptr);
2746 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2747 WellKnownClasses::java_lang_Daemons_requestHeapTrim);
2748 CHECK(!env->ExceptionCheck());
2749}
2750
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002751void Heap::RevokeThreadLocalBuffers(Thread* thread) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002752 if (rosalloc_space_ != nullptr) {
2753 rosalloc_space_->RevokeThreadLocalBuffers(thread);
2754 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002755 if (bump_pointer_space_ != nullptr) {
2756 bump_pointer_space_->RevokeThreadLocalBuffers(thread);
2757 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002758}
2759
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07002760void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
2761 if (rosalloc_space_ != nullptr) {
2762 rosalloc_space_->RevokeThreadLocalBuffers(thread);
2763 }
2764}
2765
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002766void Heap::RevokeAllThreadLocalBuffers() {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08002767 if (rosalloc_space_ != nullptr) {
2768 rosalloc_space_->RevokeAllThreadLocalBuffers();
2769 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -08002770 if (bump_pointer_space_ != nullptr) {
2771 bump_pointer_space_->RevokeAllThreadLocalBuffers();
2772 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002773}
2774
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002775bool Heap::IsGCRequestPending() const {
2776 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
2777}
2778
Mathieu Chartier590fee92013-09-13 13:46:47 -07002779void Heap::RunFinalization(JNIEnv* env) {
2780 // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
2781 if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
2782 CHECK(WellKnownClasses::java_lang_System != nullptr);
2783 WellKnownClasses::java_lang_System_runFinalization =
2784 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
2785 CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
2786 }
2787 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
2788 WellKnownClasses::java_lang_System_runFinalization);
2789}
2790
Ian Rogers1eb512d2013-10-18 15:42:20 -07002791void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002792 Thread* self = ThreadForEnv(env);
2793 if (native_need_to_run_finalization_) {
2794 RunFinalization(env);
2795 UpdateMaxNativeFootprint();
2796 native_need_to_run_finalization_ = false;
2797 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002798 // Total number of native bytes allocated.
Ian Rogers3e5cf302014-05-20 16:40:37 -07002799 size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
2800 new_native_bytes_allocated += bytes;
2801 if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002802 collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
2803 collector::kGcTypeFull;
2804
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002805 // The second watermark is higher than the gc watermark. If you hit this it means you are
2806 // allocating native objects faster than the GC can keep up with.
Ian Rogers3e5cf302014-05-20 16:40:37 -07002807 if (new_native_bytes_allocated > native_footprint_limit_) {
Mathieu Chartier89a201e2014-05-02 10:27:26 -07002808 if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002809 // Just finished a GC, attempt to run finalizers.
2810 RunFinalization(env);
2811 CHECK(!env->ExceptionCheck());
2812 }
2813 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
Ian Rogers3e5cf302014-05-20 16:40:37 -07002814 if (new_native_bytes_allocated > native_footprint_limit_) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -08002815 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002816 RunFinalization(env);
2817 native_need_to_run_finalization_ = false;
2818 CHECK(!env->ExceptionCheck());
2819 }
2820 // We have just run finalizers, update the native watermark since it is very likely that
2821 // finalizers released native managed allocations.
2822 UpdateMaxNativeFootprint();
2823 } else if (!IsGCRequestPending()) {
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07002824 if (IsGcConcurrent()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002825 RequestConcurrentGC(self);
2826 } else {
Hiroshi Yamauchid20aba12014-04-11 15:31:09 -07002827 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002828 }
2829 }
2830 }
2831}
2832
Ian Rogers1eb512d2013-10-18 15:42:20 -07002833void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002834 int expected_size, new_size;
2835 do {
Ian Rogers3e5cf302014-05-20 16:40:37 -07002836 expected_size = native_bytes_allocated_.LoadRelaxed();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002837 new_size = expected_size - bytes;
2838 if (UNLIKELY(new_size < 0)) {
2839 ScopedObjectAccess soa(env);
2840 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
2841 StringPrintf("Attempted to free %d native bytes with only %d native bytes "
2842 "registered as allocated", bytes, expected_size).c_str());
2843 break;
2844 }
Ian Rogers3e5cf302014-05-20 16:40:37 -07002845 } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size, new_size));
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002846}
2847
Ian Rogersef7d42f2014-01-06 12:55:46 -08002848size_t Heap::GetTotalMemory() const {
2849 size_t ret = 0;
Mathieu Chartier02e25112013-08-14 16:14:24 -07002850 for (const auto& space : continuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002851 // Currently don't include the image space.
2852 if (!space->IsImageSpace()) {
2853 ret += space->Size();
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002854 }
2855 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07002856 for (const auto& space : discontinuous_spaces_) {
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002857 if (space->IsLargeObjectSpace()) {
2858 ret += space->AsLargeObjectSpace()->GetBytesAllocated();
2859 }
2860 }
2861 return ret;
2862}
2863
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002864void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
2865 DCHECK(mod_union_table != nullptr);
2866 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
2867}
2868
Mathieu Chartierc645f1d2014-03-06 18:11:53 -08002869void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
2870 CHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
2871 (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
Mathieu Chartierf8322842014-05-16 10:59:25 -07002872 c->GetDescriptor().empty());
Mathieu Chartierc645f1d2014-03-06 18:11:53 -08002873 CHECK_GE(byte_count, sizeof(mirror::Object));
2874}
2875
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002876void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
2877 CHECK(remembered_set != nullptr);
2878 space::Space* space = remembered_set->GetSpace();
2879 CHECK(space != nullptr);
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07002880 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002881 remembered_sets_.Put(space, remembered_set);
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07002882 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08002883}
2884
2885void Heap::RemoveRememberedSet(space::Space* space) {
2886 CHECK(space != nullptr);
2887 auto it = remembered_sets_.find(space);
2888 CHECK(it != remembered_sets_.end());
2889 remembered_sets_.erase(it);
2890 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
2891}
2892
Mathieu Chartier4aeec172014-03-27 16:09:46 -07002893void Heap::ClearMarkedObjects() {
2894 // Clear all of the spaces' mark bitmaps.
2895 for (const auto& space : GetContinuousSpaces()) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07002896 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07002897 if (space->GetLiveBitmap() != mark_bitmap) {
2898 mark_bitmap->Clear();
2899 }
2900 }
2901 // Clear the marked objects in the discontinous space object sets.
2902 for (const auto& space : GetDiscontinuousSpaces()) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07002903 space->GetMarkBitmap()->Clear();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07002904 }
2905}
2906
Ian Rogers1d54e732013-05-02 21:10:01 -07002907} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07002908} // namespace art