blob: f92a8212f0d7f75f8e3f24a96b6163a447062e2d [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Mathieu Chartier752a0e62013-06-27 11:03:27 -070019#define ATRACE_TAG ATRACE_TAG_DALVIK
20#include <cutils/trace.h>
Brian Carlstrom5643b782012-02-05 12:32:53 -080021
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070024#include <valgrind.h>
Carl Shapiro58551df2011-07-24 03:09:51 -070025
Mathieu Chartierb2f99362013-11-20 17:26:00 -080026#include "base/histogram-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080027#include "base/stl_util.h"
Mathieu Chartier987ccff2013-07-08 11:05:21 -070028#include "common_throws.h"
Ian Rogers48931882013-01-22 14:35:16 -080029#include "cutils/sched_policy.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070030#include "debugger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070031#include "gc/accounting/atomic_stack.h"
32#include "gc/accounting/card_table-inl.h"
33#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070034#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070035#include "gc/accounting/mod_union_table-inl.h"
36#include "gc/accounting/space_bitmap-inl.h"
37#include "gc/collector/mark_sweep-inl.h"
38#include "gc/collector/partial_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070039#include "gc/collector/semi_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070040#include "gc/collector/sticky_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070041#include "gc/space/bump_pointer_space.h"
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070042#include "gc/space/dlmalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070043#include "gc/space/image_space.h"
44#include "gc/space/large_object_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070045#include "gc/space/rosalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070046#include "gc/space/space-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070047#include "heap-inl.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070048#include "image.h"
Jeff Hao5d917302013-02-27 17:57:33 -080049#include "invoke_arg_array_builder.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070050#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080051#include "mirror/class-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080052#include "mirror/object.h"
53#include "mirror/object-inl.h"
54#include "mirror/object_array-inl.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080055#include "object_utils.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080056#include "os.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080057#include "runtime.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070058#include "ScopedLocalRef.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070059#include "scoped_thread_state_change.h"
Ian Rogers1f539342012-10-03 21:09:42 -070060#include "sirt_ref.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070061#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070062#include "UniquePtr.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070063#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070064
65namespace art {
Mathieu Chartier50482232013-11-21 11:48:14 -080066
67extern void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator);
68
Ian Rogers1d54e732013-05-02 21:10:01 -070069namespace gc {
Carl Shapiro69759ea2011-07-21 18:13:35 -070070
Mathieu Chartier720ef762013-08-17 14:46:54 -070071static constexpr bool kGCALotMode = false;
72static constexpr size_t kGcAlotInterval = KB;
Ian Rogers1d54e732013-05-02 21:10:01 -070073// Minimum amount of remaining bytes before a concurrent GC is triggered.
Mathieu Chartier720ef762013-08-17 14:46:54 -070074static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
Mathieu Chartier0051be62012-10-12 17:47:11 -070075
Mathieu Chartier0051be62012-10-12 17:47:11 -070076Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
Ian Rogers8d31bbd2013-10-13 10:44:14 -070077 double target_utilization, size_t capacity, const std::string& image_file_name,
Mathieu Chartier7bf82af2013-12-06 16:51:45 -080078 CollectorType post_zygote_collector_type, size_t parallel_gc_threads,
79 size_t conc_gc_threads, bool low_memory_mode, size_t long_pause_log_threshold,
80 size_t long_gc_log_threshold, bool ignore_max_footprint)
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080081 : non_moving_space_(nullptr),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -080082 concurrent_gc_(false),
83 collector_type_(kCollectorTypeNone),
84 post_zygote_collector_type_(post_zygote_collector_type),
Mathieu Chartier2775ee42013-08-20 17:43:47 -070085 parallel_gc_threads_(parallel_gc_threads),
86 conc_gc_threads_(conc_gc_threads),
Mathieu Chartiere0a53e92013-08-05 10:17:40 -070087 low_memory_mode_(low_memory_mode),
Mathieu Chartier2775ee42013-08-20 17:43:47 -070088 long_pause_log_threshold_(long_pause_log_threshold),
89 long_gc_log_threshold_(long_gc_log_threshold),
90 ignore_max_footprint_(ignore_max_footprint),
Ian Rogers00f7d0e2012-07-19 15:28:27 -070091 have_zygote_space_(false),
Mathieu Chartier39e32612013-11-12 16:28:05 -080092 soft_reference_queue_(this),
93 weak_reference_queue_(this),
94 finalizer_reference_queue_(this),
95 phantom_reference_queue_(this),
96 cleared_references_(this),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080097 is_gc_running_(false),
Ian Rogers1d54e732013-05-02 21:10:01 -070098 last_gc_type_(collector::kGcTypeNone),
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -070099 next_gc_type_(collector::kGcTypePartial),
Mathieu Chartier80de7a62012-11-27 17:21:50 -0800100 capacity_(capacity),
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700101 growth_limit_(growth_limit),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700102 max_allowed_footprint_(initial_size),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700103 native_footprint_gc_watermark_(initial_size),
104 native_footprint_limit_(2 * initial_size),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700105 native_need_to_run_finalization_(false),
Mathieu Chartierc39e3422013-08-07 16:41:36 -0700106 activity_thread_class_(NULL),
107 application_thread_class_(NULL),
108 activity_thread_(NULL),
109 application_thread_(NULL),
110 last_process_state_id_(NULL),
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800111 // Initially assume we perceive jank in case the process state is never updated.
112 process_state_(kProcessStateJankPerceptible),
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800113 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Ian Rogers1d54e732013-05-02 21:10:01 -0700114 total_bytes_freed_ever_(0),
115 total_objects_freed_ever_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800116 num_bytes_allocated_(0),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700117 native_bytes_allocated_(0),
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700118 gc_memory_overhead_(0),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700119 verify_missing_card_marks_(false),
120 verify_system_weaks_(false),
121 verify_pre_gc_heap_(false),
122 verify_post_gc_heap_(false),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700123 verify_mod_union_table_(false),
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700124 min_alloc_space_size_for_sticky_gc_(2 * MB),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700125 min_remaining_space_for_sticky_gc_(1 * MB),
Ian Rogers1d54e732013-05-02 21:10:01 -0700126 last_trim_time_ms_(0),
Mathieu Chartier65db8802012-11-20 12:36:46 -0800127 allocation_rate_(0),
Mathieu Chartier0418ae22013-07-31 13:35:46 -0700128 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
129 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
130 * verification is enabled, we limit the size of allocation stacks to speed up their
131 * searching.
132 */
133 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
Mathieu Chartier590fee92013-09-13 13:46:47 -0700134 : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB),
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800135 current_allocator_(kMovingCollector ? kAllocatorTypeBumpPointer : kAllocatorTypeFreeList),
136 current_non_moving_allocator_(kAllocatorTypeFreeList),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700137 bump_pointer_space_(nullptr),
138 temp_space_(nullptr),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800139 reference_referent_offset_(0),
140 reference_queue_offset_(0),
141 reference_queueNext_offset_(0),
142 reference_pendingNext_offset_(0),
143 finalizer_reference_zombie_offset_(0),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700144 min_free_(min_free),
145 max_free_(max_free),
146 target_utilization_(target_utilization),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700147 total_wait_time_(0),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700148 total_allocation_time_(0),
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700149 verify_object_mode_(kHeapVerificationNotPermitted),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700150 gc_disable_count_(0),
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700151 running_on_valgrind_(RUNNING_ON_VALGRIND) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800152 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800153 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700154 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800155 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
156 // entrypoints.
157 if (!Runtime::Current()->IsZygote()) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800158 ChangeCollector(post_zygote_collector_type_);
159 } else {
160 // We are the zygote, use bump pointer allocation + semi space collector.
161 ChangeCollector(kCollectorTypeSS);
Mathieu Chartier50482232013-11-21 11:48:14 -0800162 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800163
Ian Rogers1d54e732013-05-02 21:10:01 -0700164 live_bitmap_.reset(new accounting::HeapBitmap(this));
165 mark_bitmap_.reset(new accounting::HeapBitmap(this));
Ian Rogers30fab402012-01-23 15:43:46 -0800166 // Requested begin for the alloc space, to follow the mapped image and oat files
Mathieu Chartier50482232013-11-21 11:48:14 -0800167 byte* requested_alloc_space_begin = nullptr;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800168 if (!image_file_name.empty()) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700169 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str());
Mathieu Chartier50482232013-11-21 11:48:14 -0800170 CHECK(image_space != nullptr) << "Failed to create space for " << image_file_name;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700171 AddSpace(image_space);
Ian Rogers30fab402012-01-23 15:43:46 -0800172 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
173 // isn't going to get in the middle
Brian Carlstrom700c8d32012-11-05 10:42:02 -0800174 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
175 CHECK_GT(oat_file_end_addr, image_space->End());
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700176 if (oat_file_end_addr > requested_alloc_space_begin) {
Mathieu Chartier50482232013-11-21 11:48:14 -0800177 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700178 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700179 }
180
Mathieu Chartier590fee92013-09-13 13:46:47 -0700181 const char* name = Runtime::Current()->IsZygote() ? "zygote space" : "alloc space";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700182 if (!kUseRosAlloc) {
183 non_moving_space_ = space::DlMallocSpace::Create(name, initial_size, growth_limit, capacity,
184 requested_alloc_space_begin);
185 } else {
186 non_moving_space_ = space::RosAllocSpace::Create(name, initial_size, growth_limit, capacity,
187 requested_alloc_space_begin);
188 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700189 if (kMovingCollector) {
190 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
191 // TODO: Having 3+ spaces as big as the large heap size can cause virtual memory fragmentation
192 // issues.
193 const size_t bump_pointer_space_size = std::min(non_moving_space_->Capacity(), 128 * MB);
194 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
195 bump_pointer_space_size, nullptr);
196 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
197 AddSpace(bump_pointer_space_);
198 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", bump_pointer_space_size,
199 nullptr);
200 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
201 AddSpace(temp_space_);
202 }
203
204 CHECK(non_moving_space_ != NULL) << "Failed to create non-moving space";
205 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
206 AddSpace(non_moving_space_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700207
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700208 // Allocate the large object space.
209 const bool kUseFreeListSpaceForLOS = false;
210 if (kUseFreeListSpaceForLOS) {
211 large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity);
212 } else {
213 large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
214 }
215 CHECK(large_object_space_ != NULL) << "Failed to create large object space";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700216 AddSpace(large_object_space_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700217
Ian Rogers1d54e732013-05-02 21:10:01 -0700218 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
Mathieu Chartier590fee92013-09-13 13:46:47 -0700219 CHECK(!continuous_spaces_.empty());
220 // Relies on the spaces being sorted.
Ian Rogers1d54e732013-05-02 21:10:01 -0700221 byte* heap_begin = continuous_spaces_.front()->Begin();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700222 byte* heap_end = continuous_spaces_.back()->Limit();
223 size_t heap_capacity = heap_end - heap_begin;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700224
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800225 // Allocate the card table.
Ian Rogers1d54e732013-05-02 21:10:01 -0700226 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700227 CHECK(card_table_.get() != NULL) << "Failed to create card table";
Ian Rogers5d76c432011-10-31 21:42:49 -0700228
Mathieu Chartier590fee92013-09-13 13:46:47 -0700229 // Card cache for now since it makes it easier for us to update the references to the copying
230 // spaces.
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700231 accounting::ModUnionTable* mod_union_table =
Mathieu Chartier590fee92013-09-13 13:46:47 -0700232 new accounting::ModUnionTableCardCache("Image mod-union table", this, GetImageSpace());
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700233 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
234 AddModUnionTable(mod_union_table);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700235
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700236 // TODO: Count objects in the image space here.
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700237 num_bytes_allocated_ = 0;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700238
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800239 // Default mark stack size in bytes.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700240 static const size_t default_mark_stack_size = 64 * KB;
Ian Rogers1d54e732013-05-02 21:10:01 -0700241 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size));
242 allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack",
243 max_allocation_stack_size_));
244 live_stack_.reset(accounting::ObjectStack::Create("live stack",
245 max_allocation_stack_size_));
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700246
Mathieu Chartier65db8802012-11-20 12:36:46 -0800247 // It's still too early to take a lock because there are no threads yet, but we can create locks
248 // now. We don't create it earlier to make it clear that you can't use locks during heap
249 // initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700250 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogersc604d732012-10-14 16:09:54 -0700251 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
252 *gc_complete_lock_));
Ian Rogers1d54e732013-05-02 21:10:01 -0700253 last_gc_time_ns_ = NanoTime();
Mathieu Chartier65db8802012-11-20 12:36:46 -0800254 last_gc_size_ = GetBytesAllocated();
255
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700256 if (ignore_max_footprint_) {
257 SetIdealFootprint(std::numeric_limits<size_t>::max());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700258 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700259 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700260 CHECK_NE(max_allowed_footprint_, 0U);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700261
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800262 // Create our garbage collectors.
Mathieu Chartier50482232013-11-21 11:48:14 -0800263 for (size_t i = 0; i < 2; ++i) {
264 const bool concurrent = i != 0;
265 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
266 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
267 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
268 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800269 if (kMovingCollector) {
270 // TODO: Clean this up.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700271 semi_space_collector_ = new collector::SemiSpace(this);
272 garbage_collectors_.push_back(semi_space_collector_);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700273 }
274
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700275 if (running_on_valgrind_) {
Ian Rogersfa824272013-11-05 16:12:57 -0800276 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700277 }
278
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800279 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800280 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700281 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700282}
283
Mathieu Chartier50482232013-11-21 11:48:14 -0800284void Heap::ChangeAllocator(AllocatorType allocator) {
285 DCHECK_NE(allocator, kAllocatorTypeLOS);
286 if (current_allocator_ != allocator) {
287 current_allocator_ = allocator;
288 SetQuickAllocEntryPointsAllocator(current_allocator_);
289 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
290 }
291}
292
Mathieu Chartier590fee92013-09-13 13:46:47 -0700293bool Heap::IsCompilingBoot() const {
294 for (const auto& space : continuous_spaces_) {
295 if (space->IsImageSpace()) {
296 return false;
297 } else if (space->IsZygoteSpace()) {
298 return false;
299 }
300 }
301 return true;
302}
303
304bool Heap::HasImageSpace() const {
305 for (const auto& space : continuous_spaces_) {
306 if (space->IsImageSpace()) {
307 return true;
308 }
309 }
310 return false;
311}
312
313void Heap::IncrementDisableGC(Thread* self) {
314 // Need to do this holding the lock to prevent races where the GC is about to run / running when
315 // we attempt to disable it.
316 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
317 MutexLock mu(self, *gc_complete_lock_);
318 WaitForGcToCompleteLocked(self);
319 ++gc_disable_count_;
320}
321
322void Heap::DecrementDisableGC(Thread* self) {
323 MutexLock mu(self, *gc_complete_lock_);
324 CHECK_GE(gc_disable_count_, 0U);
325 --gc_disable_count_;
326}
327
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800328void Heap::UpdateProcessState(ProcessState process_state) {
329 process_state_ = process_state;
330}
331
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700332void Heap::CreateThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700333 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
334 if (num_threads != 0) {
Mathieu Chartierbcd5e9d2013-11-13 14:33:28 -0800335 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700336 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700337}
338
Mathieu Chartier590fee92013-09-13 13:46:47 -0700339void Heap::VisitObjects(ObjectVisitorCallback callback, void* arg) {
340 // Visit objects in bump pointer space.
341 Thread* self = Thread::Current();
342 // TODO: Use reference block.
343 std::vector<SirtRef<mirror::Object>*> saved_refs;
344 if (bump_pointer_space_ != nullptr) {
345 // Need to put all these in sirts since the callback may trigger a GC. TODO: Use a better data
346 // structure.
347 mirror::Object* obj = reinterpret_cast<mirror::Object*>(bump_pointer_space_->Begin());
348 const mirror::Object* end = reinterpret_cast<const mirror::Object*>(
349 bump_pointer_space_->End());
350 while (obj < end) {
351 saved_refs.push_back(new SirtRef<mirror::Object>(self, obj));
352 obj = space::BumpPointerSpace::GetNextObject(obj);
353 }
354 }
355 // TODO: Switch to standard begin and end to use ranged a based loop.
356 for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
357 it < end; ++it) {
358 mirror::Object* obj = *it;
359 // Objects in the allocation stack might be in a movable space.
360 saved_refs.push_back(new SirtRef<mirror::Object>(self, obj));
361 }
362 GetLiveBitmap()->Walk(callback, arg);
363 for (const auto& ref : saved_refs) {
364 callback(ref->get(), arg);
365 }
366 // Need to free the sirts in reverse order they were allocated.
367 for (size_t i = saved_refs.size(); i != 0; --i) {
368 delete saved_refs[i - 1];
369 }
370}
371
372void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
373 MarkAllocStack(non_moving_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(), stack);
374}
375
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700376void Heap::DeleteThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700377 thread_pool_.reset(nullptr);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700378}
379
Mathieu Chartier590fee92013-09-13 13:46:47 -0700380void Heap::AddSpace(space::Space* space) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700381 DCHECK(space != NULL);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700382 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
383 if (space->IsContinuousSpace()) {
384 DCHECK(!space->IsDiscontinuousSpace());
385 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
386 // Continuous spaces don't necessarily have bitmaps.
387 accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
388 accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
389 if (live_bitmap != nullptr) {
390 DCHECK(mark_bitmap != nullptr);
391 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
392 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700393 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700394
395 continuous_spaces_.push_back(continuous_space);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700396 if (continuous_space->IsMallocSpace()) {
397 non_moving_space_ = continuous_space->AsMallocSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700398 }
399
400 // Ensure that spaces remain sorted in increasing order of start address.
401 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
402 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
403 return a->Begin() < b->Begin();
404 });
405 // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
406 // avoid redundant marking.
407 bool seen_zygote = false, seen_alloc = false;
408 for (const auto& space : continuous_spaces_) {
409 if (space->IsImageSpace()) {
410 CHECK(!seen_zygote);
411 CHECK(!seen_alloc);
412 } else if (space->IsZygoteSpace()) {
413 CHECK(!seen_alloc);
414 seen_zygote = true;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700415 } else if (space->IsMallocSpace()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700416 seen_alloc = true;
417 }
418 }
419 } else {
420 DCHECK(space->IsDiscontinuousSpace());
421 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
422 DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
423 live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
424 DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
425 mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
426 discontinuous_spaces_.push_back(discontinuous_space);
427 }
428 if (space->IsAllocSpace()) {
429 alloc_spaces_.push_back(space->AsAllocSpace());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700430 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800431}
432
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700433void Heap::RegisterGCAllocation(size_t bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700434 if (this != nullptr) {
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700435 gc_memory_overhead_.fetch_add(bytes);
436 }
437}
438
439void Heap::RegisterGCDeAllocation(size_t bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700440 if (this != nullptr) {
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700441 gc_memory_overhead_.fetch_sub(bytes);
442 }
443}
444
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700445void Heap::DumpGcPerformanceInfo(std::ostream& os) {
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700446 // Dump cumulative timings.
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700447 os << "Dumping cumulative Gc timings\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700448 uint64_t total_duration = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800449
450 // Dump cumulative loggers for each GC type.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800451 uint64_t total_paused_time = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700452 for (const auto& collector : garbage_collectors_) {
Sameer Abu Asala8439542013-02-14 16:06:42 -0800453 CumulativeLogger& logger = collector->GetCumulativeTimings();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800454 if (logger.GetTotalNs() != 0) {
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700455 os << Dumpable<CumulativeLogger>(logger);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800456 const uint64_t total_ns = logger.GetTotalNs();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700457 const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800458 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
459 const uint64_t freed_bytes = collector->GetTotalFreedBytes();
460 const uint64_t freed_objects = collector->GetTotalFreedObjects();
Mathieu Chartierb2f99362013-11-20 17:26:00 -0800461 Histogram<uint64_t>::CumulativeData cumulative_data;
462 collector->GetPauseHistogram().CreateHistogram(&cumulative_data);
463 collector->GetPauseHistogram().PrintConfidenceIntervals(os, 0.99, cumulative_data);
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700464 os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n"
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700465 << collector->GetName() << " freed: " << freed_objects
466 << " objects with total size " << PrettySize(freed_bytes) << "\n"
467 << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
468 << PrettySize(freed_bytes / seconds) << "/s\n";
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800469 total_duration += total_ns;
470 total_paused_time += total_pause_ns;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700471 }
472 }
473 uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
Ian Rogers1d54e732013-05-02 21:10:01 -0700474 size_t total_objects_allocated = GetObjectsAllocatedEver();
475 size_t total_bytes_allocated = GetBytesAllocatedEver();
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700476 if (total_duration != 0) {
Brian Carlstrom2d888622013-07-18 17:02:00 -0700477 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700478 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
479 os << "Mean GC size throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700480 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700481 os << "Mean GC object throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700482 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700483 }
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700484 os << "Total number of allocations: " << total_objects_allocated << "\n";
485 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700486 if (kMeasureAllocationTime) {
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700487 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
488 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
489 << "\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700490 }
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700491 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
492 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700493 os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700494}
495
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800496Heap::~Heap() {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700497 VLOG(heap) << "Starting ~Heap()";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700498 STLDeleteElements(&garbage_collectors_);
499 // If we don't reset then the mark stack complains in its destructor.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700500 allocation_stack_->Reset();
501 live_stack_->Reset();
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700502 STLDeleteValues(&mod_union_tables_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700503 STLDeleteElements(&continuous_spaces_);
504 STLDeleteElements(&discontinuous_spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700505 delete gc_complete_lock_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700506 VLOG(heap) << "Finished ~Heap()";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700507}
508
Ian Rogers1d54e732013-05-02 21:10:01 -0700509space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
510 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700511 for (const auto& space : continuous_spaces_) {
512 if (space->Contains(obj)) {
513 return space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700514 }
515 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700516 if (!fail_ok) {
517 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
518 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700519 return NULL;
520}
521
Ian Rogers1d54e732013-05-02 21:10:01 -0700522space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
523 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700524 for (const auto& space : discontinuous_spaces_) {
525 if (space->Contains(obj)) {
526 return space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700527 }
528 }
529 if (!fail_ok) {
530 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
531 }
532 return NULL;
533}
534
535space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
536 space::Space* result = FindContinuousSpaceFromObject(obj, true);
537 if (result != NULL) {
538 return result;
539 }
540 return FindDiscontinuousSpaceFromObject(obj, true);
541}
542
Mathieu Chartier39e32612013-11-12 16:28:05 -0800543struct SoftReferenceArgs {
544 RootVisitor* is_marked_callback_;
545 RootVisitor* recursive_mark_callback_;
546 void* arg_;
547};
548
549mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
550 SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg);
551 // TODO: Not preserve all soft references.
552 return args->recursive_mark_callback_(obj, args->arg_);
553}
554
555// Process reference class instances and schedule finalizations.
556void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
557 RootVisitor* is_marked_callback,
558 RootVisitor* recursive_mark_object_callback, void* arg) {
559 // Unless we are in the zygote or required to clear soft references with white references,
560 // preserve some white referents.
561 if (!clear_soft && !Runtime::Current()->IsZygote()) {
562 SoftReferenceArgs soft_reference_args;
563 soft_reference_args.is_marked_callback_ = is_marked_callback;
564 soft_reference_args.recursive_mark_callback_ = recursive_mark_object_callback;
565 soft_reference_args.arg_ = arg;
566 soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
567 &soft_reference_args);
568 }
569 timings.StartSplit("ProcessReferences");
570 // Clear all remaining soft and weak references with white referents.
571 soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
572 weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
573 timings.EndSplit();
574 // Preserve all white objects with finalize methods and schedule them for finalization.
575 timings.StartSplit("EnqueueFinalizerReferences");
576 finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
577 recursive_mark_object_callback, arg);
578 timings.EndSplit();
579 timings.StartSplit("ProcessReferences");
580 // Clear all f-reachable soft and weak references with white referents.
581 soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
582 weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
583 // Clear all phantom references with white referents.
584 phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
585 // At this point all reference queues other than the cleared references should be empty.
586 DCHECK(soft_reference_queue_.IsEmpty());
587 DCHECK(weak_reference_queue_.IsEmpty());
588 DCHECK(finalizer_reference_queue_.IsEmpty());
589 DCHECK(phantom_reference_queue_.IsEmpty());
590 timings.EndSplit();
591}
592
593bool Heap::IsEnqueued(mirror::Object* ref) const {
594 // Since the references are stored as cyclic lists it means that once enqueued, the pending next
595 // will always be non-null.
596 return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr;
597}
598
599bool Heap::IsEnqueuable(const mirror::Object* ref) const {
600 DCHECK(ref != nullptr);
601 const mirror::Object* queue =
602 ref->GetFieldObject<mirror::Object*>(GetReferenceQueueOffset(), false);
603 const mirror::Object* queue_next =
604 ref->GetFieldObject<mirror::Object*>(GetReferenceQueueNextOffset(), false);
605 return queue != nullptr && queue_next == nullptr;
606}
607
608// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
609// marked, put it on the appropriate list in the heap for later processing.
610void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
611 RootVisitor mark_visitor, void* arg) {
612 DCHECK(klass != nullptr);
613 DCHECK(klass->IsReferenceClass());
614 DCHECK(obj != nullptr);
615 mirror::Object* referent = GetReferenceReferent(obj);
616 if (referent != nullptr) {
617 mirror::Object* forward_address = mark_visitor(referent, arg);
618 // Null means that the object is not currently marked.
619 if (forward_address == nullptr) {
620 Thread* self = Thread::Current();
621 // TODO: Remove these locks, and use atomic stacks for storing references?
622 // We need to check that the references haven't already been enqueued since we can end up
623 // scanning the same reference multiple times due to dirty cards.
624 if (klass->IsSoftReferenceClass()) {
625 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
626 } else if (klass->IsWeakReferenceClass()) {
627 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
628 } else if (klass->IsFinalizerReferenceClass()) {
629 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
630 } else if (klass->IsPhantomReferenceClass()) {
631 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
632 } else {
633 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
634 << klass->GetAccessFlags();
635 }
636 } else if (referent != forward_address) {
637 // Referent is already marked and we need to update it.
638 SetReferenceReferent(obj, forward_address);
639 }
640 }
641}
642
Ian Rogers1d54e732013-05-02 21:10:01 -0700643space::ImageSpace* Heap::GetImageSpace() const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700644 for (const auto& space : continuous_spaces_) {
645 if (space->IsImageSpace()) {
646 return space->AsImageSpace();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700647 }
648 }
649 return NULL;
650}
651
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700652static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700653 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700654 if (used_bytes < chunk_size) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700655 size_t chunk_free_bytes = chunk_size - used_bytes;
656 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
657 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700658 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700659}
660
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700661void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) {
662 std::ostringstream oss;
663 int64_t total_bytes_free = GetFreeMemory();
664 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
665 << " free bytes";
666 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
667 if (!large_object_allocation && total_bytes_free >= byte_count) {
668 size_t max_contiguous_allocation = 0;
669 for (const auto& space : continuous_spaces_) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700670 if (space->IsMallocSpace()) {
671 // To allow the Walk/InspectAll() to exclusively-lock the mutator
672 // lock, temporarily release the shared access to the mutator
673 // lock here by transitioning to the suspended state.
674 Locks::mutator_lock_->AssertSharedHeld(self);
675 self->TransitionFromRunnableToSuspended(kSuspended);
676 space->AsMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
677 self->TransitionFromSuspendedToRunnable();
678 Locks::mutator_lock_->AssertSharedHeld(self);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700679 }
680 }
681 oss << "; failed due to fragmentation (largest possible contiguous allocation "
682 << max_contiguous_allocation << " bytes)";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700683 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700684 self->ThrowOutOfMemoryError(oss.str().c_str());
685}
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700686
Mathieu Chartier590fee92013-09-13 13:46:47 -0700687void Heap::Trim() {
688 uint64_t start_ns = NanoTime();
689 // Trim the managed spaces.
690 uint64_t total_alloc_space_allocated = 0;
691 uint64_t total_alloc_space_size = 0;
692 uint64_t managed_reclaimed = 0;
693 for (const auto& space : continuous_spaces_) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700694 if (space->IsMallocSpace() && !space->IsZygoteSpace()) {
695 gc::space::MallocSpace* alloc_space = space->AsMallocSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700696 total_alloc_space_size += alloc_space->Size();
697 managed_reclaimed += alloc_space->Trim();
698 }
699 }
700 total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated() -
701 bump_pointer_space_->GetBytesAllocated();
702 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
703 static_cast<float>(total_alloc_space_size);
704 uint64_t gc_heap_end_ns = NanoTime();
705 // Trim the native heap.
706 dlmalloc_trim(0);
707 size_t native_reclaimed = 0;
708 dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
709 uint64_t end_ns = NanoTime();
710 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
711 << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
712 << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
713 << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
714 << "%.";
715}
716
717bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
718 // Note: we deliberately don't take the lock here, and mustn't test anything that would require
719 // taking the lock.
720 if (obj == nullptr) {
Elliott Hughes88c5c352012-03-15 18:49:48 -0700721 return true;
722 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700723 return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj);
724}
725
726bool Heap::IsHeapAddress(const mirror::Object* obj) const {
727 if (kMovingCollector && bump_pointer_space_->HasAddress(obj)) {
728 return true;
Elliott Hughesa2501992011-08-26 19:39:54 -0700729 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700730 // TODO: This probably doesn't work for large objects.
731 return FindSpaceFromObject(obj, true) != nullptr;
Elliott Hughesa2501992011-08-26 19:39:54 -0700732}
733
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700734bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack,
735 bool search_live_stack, bool sorted) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700736 // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700737 if (obj == nullptr || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700738 return false;
739 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700740 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
741 space::DiscontinuousSpace* d_space = NULL;
742 if (c_space != NULL) {
743 if (c_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700744 return true;
745 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700746 } else if (bump_pointer_space_->Contains(obj) || temp_space_->Contains(obj)) {
747 return true;
Ian Rogers1d54e732013-05-02 21:10:01 -0700748 } else {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700749 d_space = FindDiscontinuousSpaceFromObject(obj, true);
750 if (d_space != NULL) {
751 if (d_space->GetLiveObjects()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700752 return true;
753 }
754 }
755 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700756 // This is covering the allocation/live stack swapping that is done without mutators suspended.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700757 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
758 if (i > 0) {
759 NanoSleep(MsToNs(10));
Ian Rogers1d54e732013-05-02 21:10:01 -0700760 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700761 if (search_allocation_stack) {
762 if (sorted) {
763 if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
764 return true;
765 }
766 } else if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj))) {
767 return true;
768 }
769 }
770
771 if (search_live_stack) {
772 if (sorted) {
773 if (live_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
774 return true;
775 }
776 } else if (live_stack_->Contains(const_cast<mirror::Object*>(obj))) {
777 return true;
778 }
779 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700780 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700781 // We need to check the bitmaps again since there is a race where we mark something as live and
782 // then clear the stack containing it.
783 if (c_space != NULL) {
784 if (c_space->GetLiveBitmap()->Test(obj)) {
785 return true;
786 }
787 } else {
788 d_space = FindDiscontinuousSpaceFromObject(obj, true);
789 if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) {
790 return true;
791 }
792 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700793 return false;
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700794}
795
Ian Rogers04d7aa92013-03-16 14:29:17 -0700796void Heap::VerifyObjectImpl(const mirror::Object* obj) {
797 if (Thread::Current() == NULL ||
jeffhao25045522012-03-13 19:34:37 -0700798 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700799 return;
800 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700801 VerifyObjectBody(obj);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700802}
Elliott Hughes92b3b562011-09-08 16:32:26 -0700803
Mathieu Chartier590fee92013-09-13 13:46:47 -0700804void Heap::DumpSpaces(std::ostream& stream) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700805 for (const auto& space : continuous_spaces_) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700806 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
807 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700808 stream << space << " " << *space << "\n";
809 if (live_bitmap != nullptr) {
810 stream << live_bitmap << " " << *live_bitmap << "\n";
811 }
812 if (mark_bitmap != nullptr) {
813 stream << mark_bitmap << " " << *mark_bitmap << "\n";
814 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700815 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700816 for (const auto& space : discontinuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700817 stream << space << " " << *space << "\n";
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700818 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700819}
820
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800821void Heap::VerifyObjectBody(const mirror::Object* obj) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700822 CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
823 // Ignore early dawn of the universe verifications.
824 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.load()) < 10 * KB)) {
Ian Rogers62d6c772013-02-27 08:32:07 -0800825 return;
826 }
827 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
828 mirror::Object::ClassOffset().Int32Value();
829 const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
830 if (UNLIKELY(c == NULL)) {
831 LOG(FATAL) << "Null class in object: " << obj;
832 } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) {
833 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
834 }
835 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
836 // Note: we don't use the accessors here as they have internal sanity checks
837 // that we don't want to run
838 raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
839 const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
840 raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
841 const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
842 CHECK_EQ(c_c, c_c_c);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700843
Mathieu Chartier590fee92013-09-13 13:46:47 -0700844 if (verify_object_mode_ > kVerifyAllFast) {
Ian Rogers62d6c772013-02-27 08:32:07 -0800845 // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
846 // heap_bitmap_lock_.
Ian Rogers1d54e732013-05-02 21:10:01 -0700847 if (!IsLiveObjectLocked(obj)) {
848 DumpSpaces();
849 LOG(FATAL) << "Object is dead: " << obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700850 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700851 if (!IsLiveObjectLocked(c)) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700852 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
853 }
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700854 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700855}
856
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800857void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700858 DCHECK(obj != NULL);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700859 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700860}
861
862void Heap::VerifyHeap() {
Ian Rogers50b35e22012-10-04 10:09:15 -0700863 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700864 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700865}
866
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700867void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700868 DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_));
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -0700869 num_bytes_allocated_.fetch_sub(freed_bytes);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700870
871 if (Runtime::Current()->HasStatsEnabled()) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700872 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700873 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700874 thread_stats->freed_bytes += freed_bytes;
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700875
876 // TODO: Do this concurrently.
877 RuntimeStats* global_stats = Runtime::Current()->GetStats();
878 global_stats->freed_objects += freed_objects;
879 global_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700880 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700881}
882
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800883mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
Mathieu Chartier720ef762013-08-17 14:46:54 -0700884 size_t alloc_size, size_t* bytes_allocated) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800885 mirror::Object* ptr = nullptr;
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700886 // The allocation failed. If the GC is running, block until it completes, and then retry the
887 // allocation.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700888 collector::GcType last_gc = WaitForGcToComplete(self);
Ian Rogers1d54e732013-05-02 21:10:01 -0700889 if (last_gc != collector::kGcTypeNone) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700890 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800891 ptr = TryToAllocate<true>(self, allocator, alloc_size, false, bytes_allocated);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700892 }
893
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700894 // Loop through our different Gc types and try to Gc until we get enough free memory.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800895 for (collector::GcType gc_type : gc_plan_) {
896 if (ptr != nullptr) {
897 break;
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700898 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800899 // Attempt to run the collector, if we succeed, re-try the allocation.
900 if (CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700901 // Did we free sufficient memory for the allocation to succeed?
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800902 ptr = TryToAllocate<true>(self, allocator, alloc_size, false, bytes_allocated);
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700903 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700904 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700905 // Allocations have failed after GCs; this is an exceptional state.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800906 if (ptr == nullptr) {
907 // Try harder, growing the heap if necessary.
908 ptr = TryToAllocate<true>(self, allocator, alloc_size, true, bytes_allocated);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700909 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800910 if (ptr == nullptr) {
911 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
912 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
913 // VM spec requires that all SoftReferences have been collected and cleared before throwing
914 // OOME.
915 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
916 << " allocation";
917 // TODO: Run finalization, but this may cause more allocations to occur.
918 // We don't need a WaitForGcToComplete here either.
919 DCHECK(!gc_plan_.empty());
920 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
921 ptr = TryToAllocate<true>(self, allocator, alloc_size, true, bytes_allocated);
922 if (ptr == nullptr) {
923 ThrowOutOfMemoryError(self, alloc_size, false);
924 }
925 }
926 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700927}
928
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700929void Heap::SetTargetHeapUtilization(float target) {
930 DCHECK_GT(target, 0.0f); // asserted in Java code
931 DCHECK_LT(target, 1.0f);
932 target_utilization_ = target;
933}
934
Ian Rogers1d54e732013-05-02 21:10:01 -0700935size_t Heap::GetObjectsAllocated() const {
936 size_t total = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700937 for (space::AllocSpace* space : alloc_spaces_) {
938 total += space->GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -0700939 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700940 return total;
941}
942
Ian Rogers1d54e732013-05-02 21:10:01 -0700943size_t Heap::GetObjectsAllocatedEver() const {
944 size_t total = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700945 for (space::AllocSpace* space : alloc_spaces_) {
946 total += space->GetTotalObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -0700947 }
948 return total;
949}
950
951size_t Heap::GetBytesAllocatedEver() const {
952 size_t total = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700953 for (space::AllocSpace* space : alloc_spaces_) {
954 total += space->GetTotalBytesAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -0700955 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700956 return total;
957}
958
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700959class InstanceCounter {
960 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800961 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700962 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800963 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700964 }
965
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800966 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800967 for (size_t i = 0; i < classes_.size(); ++i) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800968 const mirror::Class* instance_class = o->GetClass();
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800969 if (use_is_assignable_from_) {
970 if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) {
971 ++counts_[i];
972 }
973 } else {
974 if (instance_class == classes_[i]) {
975 ++counts_[i];
976 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700977 }
978 }
979 }
980
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700981 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800982 const std::vector<mirror::Class*>& classes_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800983 bool use_is_assignable_from_;
984 uint64_t* const counts_;
985
986 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700987};
988
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800989void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800990 uint64_t* counts) {
991 // We only want reachable instances, so do a GC. This also ensures that the alloc stack
992 // is empty, so the live bitmap is the only place we need to look.
993 Thread* self = Thread::Current();
994 self->TransitionFromRunnableToSuspended(kNative);
995 CollectGarbage(false);
996 self->TransitionFromSuspendedToRunnable();
997
998 InstanceCounter counter(classes, use_is_assignable_from, counts);
999 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001000 GetLiveBitmap()->Visit(counter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001001}
1002
Elliott Hughes3b78c942013-01-15 17:35:41 -08001003class InstanceCollector {
1004 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001005 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
Elliott Hughes3b78c942013-01-15 17:35:41 -08001006 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1007 : class_(c), max_count_(max_count), instances_(instances) {
1008 }
1009
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001010 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1011 const mirror::Class* instance_class = o->GetClass();
Elliott Hughes3b78c942013-01-15 17:35:41 -08001012 if (instance_class == class_) {
1013 if (max_count_ == 0 || instances_.size() < max_count_) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001014 instances_.push_back(const_cast<mirror::Object*>(o));
Elliott Hughes3b78c942013-01-15 17:35:41 -08001015 }
1016 }
1017 }
1018
1019 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001020 mirror::Class* class_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001021 uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001022 std::vector<mirror::Object*>& instances_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001023
1024 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1025};
1026
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001027void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1028 std::vector<mirror::Object*>& instances) {
Elliott Hughes3b78c942013-01-15 17:35:41 -08001029 // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1030 // is empty, so the live bitmap is the only place we need to look.
1031 Thread* self = Thread::Current();
1032 self->TransitionFromRunnableToSuspended(kNative);
1033 CollectGarbage(false);
1034 self->TransitionFromSuspendedToRunnable();
1035
1036 InstanceCollector collector(c, max_count, instances);
1037 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1038 GetLiveBitmap()->Visit(collector);
1039}
1040
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001041class ReferringObjectsFinder {
1042 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001043 ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1044 std::vector<mirror::Object*>& referring_objects)
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001045 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1046 : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1047 }
1048
1049 // For bitmap Visit.
1050 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1051 // annotalysis on visitors.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001052 void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1053 collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(o), *this, true);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001054 }
1055
1056 // For MarkSweep::VisitObjectReferences.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001057 void operator()(mirror::Object* referrer, mirror::Object* object,
Brian Carlstromdf629502013-07-17 22:39:56 -07001058 const MemberOffset&, bool) const {
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001059 if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001060 referring_objects_.push_back(referrer);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001061 }
1062 }
1063
1064 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001065 mirror::Object* object_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001066 uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001067 std::vector<mirror::Object*>& referring_objects_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001068
1069 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1070};
1071
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001072void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1073 std::vector<mirror::Object*>& referring_objects) {
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001074 // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1075 // is empty, so the live bitmap is the only place we need to look.
1076 Thread* self = Thread::Current();
1077 self->TransitionFromRunnableToSuspended(kNative);
1078 CollectGarbage(false);
1079 self->TransitionFromSuspendedToRunnable();
1080
1081 ReferringObjectsFinder finder(o, max_count, referring_objects);
1082 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1083 GetLiveBitmap()->Visit(finder);
1084}
1085
Ian Rogers30fab402012-01-23 15:43:46 -08001086void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001087 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1088 // last GC will not have necessarily been cleared.
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001089 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001090}
1091
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001092void Heap::ChangeCollector(CollectorType collector_type) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001093 // TODO: Only do this with all mutators suspended to avoid races.
1094 if (collector_type != collector_type_) {
1095 collector_type_ = collector_type;
1096 gc_plan_.clear();
1097 switch (collector_type_) {
1098 case kCollectorTypeSS: {
1099 concurrent_gc_ = false;
1100 gc_plan_.push_back(collector::kGcTypeFull);
1101 ChangeAllocator(kAllocatorTypeBumpPointer);
1102 break;
1103 }
1104 case kCollectorTypeMS: {
1105 concurrent_gc_ = false;
1106 gc_plan_.push_back(collector::kGcTypeSticky);
1107 gc_plan_.push_back(collector::kGcTypePartial);
1108 gc_plan_.push_back(collector::kGcTypeFull);
1109 ChangeAllocator(kAllocatorTypeFreeList);
1110 break;
1111 }
1112 case kCollectorTypeCMS: {
1113 concurrent_gc_ = true;
1114 gc_plan_.push_back(collector::kGcTypeSticky);
1115 gc_plan_.push_back(collector::kGcTypePartial);
1116 gc_plan_.push_back(collector::kGcTypeFull);
1117 ChangeAllocator(kAllocatorTypeFreeList);
1118 break;
1119 }
1120 default: {
1121 LOG(FATAL) << "Unimplemented";
1122 }
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001123 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001124 if (concurrent_gc_) {
1125 concurrent_start_bytes_ =
1126 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
1127 } else {
1128 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001129 }
1130 }
1131}
1132
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001133void Heap::PreZygoteFork() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001134 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
Ian Rogers81d425b2012-09-27 16:03:43 -07001135 Thread* self = Thread::Current();
1136 MutexLock mu(self, zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001137 // Try to see if we have any Zygote spaces.
1138 if (have_zygote_space_) {
1139 return;
1140 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001141 VLOG(heap) << "Starting PreZygoteFork";
1142 // Do this before acquiring the zygote creation lock so that we don't get lock order violations.
1143 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
1144 // Trim the pages at the end of the non moving space.
1145 non_moving_space_->Trim();
1146 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001147 // Change the collector to the post zygote one.
1148 ChangeCollector(post_zygote_collector_type_);
Mathieu Chartier50482232013-11-21 11:48:14 -08001149 // TODO: Delete bump_pointer_space_ and temp_pointer_space_?
Mathieu Chartier590fee92013-09-13 13:46:47 -07001150 if (semi_space_collector_ != nullptr) {
Mathieu Chartier50482232013-11-21 11:48:14 -08001151 // Create a new bump pointer space which we will compact into.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001152 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1153 non_moving_space_->Limit());
1154 // Compact the bump pointer space to a new zygote bump pointer space.
1155 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1156 Compact(&target_space, bump_pointer_space_);
1157 CHECK_EQ(temp_space_->GetBytesAllocated(), 0U);
1158 total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
1159 total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
1160 // Update the end and write out image.
1161 non_moving_space_->SetEnd(target_space.End());
1162 non_moving_space_->SetLimit(target_space.Limit());
1163 accounting::SpaceBitmap* bitmap = non_moving_space_->GetLiveBitmap();
1164 // Record the allocations in the bitmap.
1165 VLOG(heap) << "Recording zygote allocations";
1166 mirror::Object* obj = reinterpret_cast<mirror::Object*>(target_space.Begin());
1167 const mirror::Object* end = reinterpret_cast<const mirror::Object*>(target_space.End());
1168 while (obj < end) {
1169 bitmap->Set(obj);
1170 obj = space::BumpPointerSpace::GetNextObject(obj);
1171 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001172 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001173 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
1174 // the remaining available heap memory.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001175 space::MallocSpace* zygote_space = non_moving_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001176 non_moving_space_ = zygote_space->CreateZygoteSpace("alloc space");
1177 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
Ian Rogers1d54e732013-05-02 21:10:01 -07001178 // Change the GC retention policy of the zygote space to only collect when full.
1179 zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001180 AddSpace(non_moving_space_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001181 have_zygote_space_ = true;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001182 zygote_space->InvalidateAllocator();
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001183 // Create the zygote space mod union table.
1184 accounting::ModUnionTable* mod_union_table =
1185 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
1186 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
1187 AddModUnionTable(mod_union_table);
Ian Rogers5f5a2c02012-09-17 10:52:08 -07001188 // Reset the cumulative loggers since we now have a few additional timing phases.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001189 for (const auto& collector : garbage_collectors_) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001190 collector->ResetCumulativeStatistics();
Mathieu Chartier0325e622012-09-05 14:22:51 -07001191 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001192}
1193
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001194void Heap::FlushAllocStack() {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001195 MarkAllocStack(non_moving_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001196 allocation_stack_.get());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001197 allocation_stack_->Reset();
1198}
1199
Ian Rogers1d54e732013-05-02 21:10:01 -07001200void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
1201 accounting::ObjectStack* stack) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001202 mirror::Object** limit = stack->End();
1203 for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
1204 const mirror::Object* obj = *it;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001205 DCHECK(obj != NULL);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001206 if (LIKELY(bitmap->HasAddress(obj))) {
1207 bitmap->Set(obj);
1208 } else {
1209 large_objects->Set(obj);
1210 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001211 }
1212}
1213
Mathieu Chartier590fee92013-09-13 13:46:47 -07001214const char* PrettyCause(GcCause cause) {
1215 switch (cause) {
1216 case kGcCauseForAlloc: return "Alloc";
1217 case kGcCauseBackground: return "Background";
1218 case kGcCauseExplicit: return "Explicit";
1219 default:
1220 LOG(FATAL) << "Unreachable";
1221 }
1222 return "";
1223}
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001224
Mathieu Chartier590fee92013-09-13 13:46:47 -07001225void Heap::SwapSemiSpaces() {
1226 // Swap the spaces so we allocate into the space which we just evacuated.
1227 std::swap(bump_pointer_space_, temp_space_);
1228}
1229
1230void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
1231 space::ContinuousMemMapAllocSpace* source_space) {
1232 CHECK(kMovingCollector);
Mathieu Chartier50482232013-11-21 11:48:14 -08001233 CHECK_NE(target_space, source_space) << "In-place compaction currently unsupported";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001234 if (target_space != source_space) {
1235 semi_space_collector_->SetFromSpace(source_space);
1236 semi_space_collector_->SetToSpace(target_space);
1237 semi_space_collector_->Run(false);
1238 }
1239}
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001240
Ian Rogers1d54e732013-05-02 21:10:01 -07001241collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
1242 bool clear_soft_references) {
Ian Rogers81d425b2012-09-27 16:03:43 -07001243 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001244 Runtime* runtime = Runtime::Current();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001245 // If the heap can't run the GC, silently fail and return that no GC was run.
1246 switch (gc_type) {
1247 case collector::kGcTypeSticky: {
1248 const size_t alloc_space_size = non_moving_space_->Size();
1249 if (alloc_space_size < min_alloc_space_size_for_sticky_gc_ ||
1250 non_moving_space_->Capacity() - alloc_space_size < min_remaining_space_for_sticky_gc_) {
1251 return collector::kGcTypeNone;
1252 }
1253 break;
1254 }
1255 case collector::kGcTypePartial: {
1256 if (!have_zygote_space_) {
1257 return collector::kGcTypeNone;
1258 }
1259 break;
1260 }
1261 default: {
1262 // Other GC types don't have any special cases which makes them not runnable. The main case
1263 // here is full GC.
1264 }
1265 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08001266 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Ian Rogers81d425b2012-09-27 16:03:43 -07001267 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers120f1c72012-09-28 17:17:10 -07001268 if (self->IsHandlingStackOverflow()) {
1269 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
1270 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001271 {
1272 gc_complete_lock_->AssertNotHeld(self);
1273 MutexLock mu(self, *gc_complete_lock_);
1274 // Ensure there is only one GC at a time.
1275 WaitForGcToCompleteLocked(self);
1276 // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
1277 // Not doing at the moment to ensure soft references are cleared.
1278 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
1279 if (gc_disable_count_ != 0) {
1280 LOG(WARNING) << "Skipping GC due to disable count " << gc_disable_count_;
1281 return collector::kGcTypeNone;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001282 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001283 is_gc_running_ = true;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001284 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001285 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
1286 ++runtime->GetStats()->gc_for_alloc_count;
1287 ++self->GetStats()->gc_for_alloc_count;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001288 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001289 uint64_t gc_start_time_ns = NanoTime();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001290 uint64_t gc_start_size = GetBytesAllocated();
1291 // Approximate allocation rate in bytes / second.
Ian Rogers1d54e732013-05-02 21:10:01 -07001292 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001293 // Back to back GCs can cause 0 ms of wait time in between GC invocations.
1294 if (LIKELY(ms_delta != 0)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001295 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
Mathieu Chartier65db8802012-11-20 12:36:46 -08001296 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
1297 }
1298
Ian Rogers1d54e732013-05-02 21:10:01 -07001299 DCHECK_LT(gc_type, collector::kGcTypeMax);
1300 DCHECK_NE(gc_type, collector::kGcTypeNone);
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001301
Mathieu Chartier590fee92013-09-13 13:46:47 -07001302 collector::GarbageCollector* collector = nullptr;
Mathieu Chartier50482232013-11-21 11:48:14 -08001303 // TODO: Clean this up.
1304 if (current_allocator_ == kAllocatorTypeBumpPointer) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001305 gc_type = semi_space_collector_->GetGcType();
1306 CHECK_EQ(temp_space_->GetObjectsAllocated(), 0U);
1307 semi_space_collector_->SetFromSpace(bump_pointer_space_);
1308 semi_space_collector_->SetToSpace(temp_space_);
1309 mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
Mathieu Chartier50482232013-11-21 11:48:14 -08001310 collector = semi_space_collector_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001311 gc_type = collector::kGcTypeFull;
Mathieu Chartier50482232013-11-21 11:48:14 -08001312 } else if (current_allocator_ == kAllocatorTypeFreeList) {
1313 for (const auto& cur_collector : garbage_collectors_) {
1314 if (cur_collector->IsConcurrent() == concurrent_gc_ &&
1315 cur_collector->GetGcType() == gc_type) {
1316 collector = cur_collector;
1317 break;
1318 }
1319 }
1320 } else {
1321 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001322 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001323 CHECK(collector != nullptr)
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001324 << "Could not find garbage collector with concurrent=" << concurrent_gc_
1325 << " and type=" << gc_type;
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001326
Mathieu Chartier590fee92013-09-13 13:46:47 -07001327 ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
1328
1329 collector->Run(clear_soft_references);
Ian Rogers1d54e732013-05-02 21:10:01 -07001330 total_objects_freed_ever_ += collector->GetFreedObjects();
1331 total_bytes_freed_ever_ += collector->GetFreedBytes();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001332
Mathieu Chartier39e32612013-11-12 16:28:05 -08001333 // Enqueue cleared references.
1334 EnqueueClearedReferences();
1335
Mathieu Chartier590fee92013-09-13 13:46:47 -07001336 // Grow the heap so that we know when to perform the next GC.
1337 GrowForUtilization(gc_type, collector->GetDurationNs());
1338
Mathieu Chartierca2a24d2013-11-25 15:12:12 -08001339 if (CareAboutPauseTimes()) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001340 const size_t duration = collector->GetDurationNs();
1341 std::vector<uint64_t> pauses = collector->GetPauseTimes();
1342 // GC for alloc pauses the allocating thread, so consider it as a pause.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001343 bool was_slow = duration > long_gc_log_threshold_ ||
1344 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001345 if (!was_slow) {
1346 for (uint64_t pause : pauses) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001347 was_slow = was_slow || pause > long_pause_log_threshold_;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001348 }
1349 }
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001350 if (was_slow) {
1351 const size_t percent_free = GetPercentFree();
1352 const size_t current_heap_size = GetBytesAllocated();
1353 const size_t total_memory = GetTotalMemory();
1354 std::ostringstream pause_string;
1355 for (size_t i = 0; i < pauses.size(); ++i) {
1356 pause_string << PrettyDuration((pauses[i] / 1000) * 1000)
1357 << ((i != pauses.size() - 1) ? ", " : "");
1358 }
1359 LOG(INFO) << gc_cause << " " << collector->GetName()
1360 << " GC freed " << collector->GetFreedObjects() << "("
1361 << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
1362 << collector->GetFreedLargeObjects() << "("
1363 << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
1364 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
1365 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
1366 << " total " << PrettyDuration((duration / 1000) * 1000);
1367 if (VLOG_IS_ON(heap)) {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001368 LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings());
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001369 }
1370 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001371 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001372
Ian Rogers15bf2d32012-08-28 17:33:04 -07001373 {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001374 MutexLock mu(self, *gc_complete_lock_);
1375 is_gc_running_ = false;
1376 last_gc_type_ = gc_type;
1377 // Wake anyone who may have been waiting for the GC to complete.
1378 gc_complete_cond_->Broadcast(self);
Ian Rogers15bf2d32012-08-28 17:33:04 -07001379 }
Mathieu Chartier0a9dc052013-07-25 11:01:28 -07001380
Mathieu Chartier752a0e62013-06-27 11:03:27 -07001381 ATRACE_END();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001382
1383 // Inform DDMS that a GC completed.
Ian Rogers15bf2d32012-08-28 17:33:04 -07001384 Dbg::GcDidFinish();
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001385 return gc_type;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001386}
Mathieu Chartiera6399032012-06-11 18:49:50 -07001387
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001388static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001389 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001390 if (root == obj) {
1391 LOG(INFO) << "Object " << obj << " is a root";
1392 }
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001393 return root;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001394}
1395
1396class ScanVisitor {
1397 public:
Brian Carlstromdf629502013-07-17 22:39:56 -07001398 void operator()(const mirror::Object* obj) const {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001399 LOG(ERROR) << "Would have rescanned object " << obj;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001400 }
1401};
1402
Ian Rogers1d54e732013-05-02 21:10:01 -07001403// Verify a reference from an object.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001404class VerifyReferenceVisitor {
1405 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001406 explicit VerifyReferenceVisitor(Heap* heap)
Ian Rogers1d54e732013-05-02 21:10:01 -07001407 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001408 : heap_(heap), failed_(false) {}
Ian Rogers1d54e732013-05-02 21:10:01 -07001409
1410 bool Failed() const {
1411 return failed_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001412 }
1413
1414 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
Ian Rogers1d54e732013-05-02 21:10:01 -07001415 // analysis on visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001416 void operator()(const mirror::Object* obj, const mirror::Object* ref,
1417 const MemberOffset& offset, bool /* is_static */) const
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001418 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001419 // Verify that the reference is live.
Ian Rogers1d54e732013-05-02 21:10:01 -07001420 if (UNLIKELY(ref != NULL && !IsLive(ref))) {
1421 accounting::CardTable* card_table = heap_->GetCardTable();
1422 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
1423 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001424 if (!failed_) {
1425 // Print message on only on first failure to prevent spam.
1426 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
1427 failed_ = true;
1428 }
1429 if (obj != nullptr) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001430 byte* card_addr = card_table->CardFromAddr(obj);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001431 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
1432 << offset << "\n card value = " << static_cast<int>(*card_addr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001433 if (heap_->IsValidObjectAddress(obj->GetClass())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001434 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
1435 } else {
1436 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
1437 }
1438
1439 // Attmept to find the class inside of the recently freed objects.
1440 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001441 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
1442 space::MallocSpace* space = ref_space->AsMallocSpace();
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001443 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
1444 if (ref_class != nullptr) {
1445 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
1446 << PrettyClass(ref_class);
1447 } else {
1448 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
1449 }
1450 }
1451
Mathieu Chartier590fee92013-09-13 13:46:47 -07001452 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001453 ref->GetClass()->IsClass()) {
1454 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
1455 } else {
1456 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
1457 << ") is not a valid heap address";
1458 }
1459
Ian Rogers1d54e732013-05-02 21:10:01 -07001460 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
1461 void* cover_begin = card_table->AddrFromCard(card_addr);
1462 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
1463 accounting::CardTable::kCardSize);
1464 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
1465 << "-" << cover_end;
1466 accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001467
Ian Rogers1d54e732013-05-02 21:10:01 -07001468 // Print out how the object is live.
1469 if (bitmap != NULL && bitmap->Test(obj)) {
1470 LOG(ERROR) << "Object " << obj << " found in live bitmap";
1471 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001472 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001473 LOG(ERROR) << "Object " << obj << " found in allocation stack";
1474 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001475 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001476 LOG(ERROR) << "Object " << obj << " found in live stack";
1477 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001478 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
1479 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
1480 }
1481 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
1482 LOG(ERROR) << "Ref " << ref << " found in live stack";
1483 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001484 // Attempt to see if the card table missed the reference.
1485 ScanVisitor scan_visitor;
1486 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
1487 card_table->Scan(bitmap, byte_cover_begin,
Mathieu Chartier184e3222013-08-03 14:02:57 -07001488 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
Ian Rogers1d54e732013-05-02 21:10:01 -07001489
1490 // Search to see if any of the roots reference our object.
1491 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
1492 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
1493
1494 // Search to see if any of the roots reference our reference.
1495 arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
1496 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
1497 } else {
1498 LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001499 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001500 }
1501 }
1502
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001503 bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001504 return heap_->IsLiveObjectLocked(obj, true, false, true);
Ian Rogers1d54e732013-05-02 21:10:01 -07001505 }
1506
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001507 static mirror::Object* VerifyRoots(mirror::Object* root, void* arg) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001508 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001509 (*visitor)(nullptr, root, MemberOffset(0), true);
1510 return root;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001511 }
1512
1513 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07001514 Heap* const heap_;
1515 mutable bool failed_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001516};
1517
Ian Rogers1d54e732013-05-02 21:10:01 -07001518// Verify all references within an object, for use with HeapBitmap::Visit.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001519class VerifyObjectVisitor {
1520 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001521 explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001522
Mathieu Chartier590fee92013-09-13 13:46:47 -07001523 void operator()(mirror::Object* obj) const
Ian Rogersb726dcb2012-09-05 08:57:23 -07001524 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001525 // Note: we are verifying the references in obj but not obj itself, this is because obj must
1526 // be live or else how did we find it in the live bitmap?
1527 VerifyReferenceVisitor visitor(heap_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001528 // The class doesn't count as a reference but we should verify it anyways.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001529 collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
1530 if (obj->GetClass()->IsReferenceClass()) {
1531 visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false);
1532 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001533 failed_ = failed_ || visitor.Failed();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001534 }
1535
Mathieu Chartier590fee92013-09-13 13:46:47 -07001536 static void VisitCallback(mirror::Object* obj, void* arg)
1537 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1538 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
1539 visitor->operator()(obj);
1540 }
1541
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001542 bool Failed() const {
1543 return failed_;
1544 }
1545
1546 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07001547 Heap* const heap_;
1548 mutable bool failed_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001549};
1550
1551// Must do this with mutators suspended since we are directly accessing the allocation stacks.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001552bool Heap::VerifyHeapReferences() {
Ian Rogers81d425b2012-09-27 16:03:43 -07001553 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001554 // Lets sort our allocation stacks so that we can efficiently binary search them.
Ian Rogers1d54e732013-05-02 21:10:01 -07001555 allocation_stack_->Sort();
1556 live_stack_->Sort();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001557 VerifyObjectVisitor visitor(this);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001558 // Verify objects in the allocation stack since these will be objects which were:
1559 // 1. Allocated prior to the GC (pre GC verification).
1560 // 2. Allocated during the GC (pre sweep GC verification).
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001561 // We don't want to verify the objects in the live stack since they themselves may be
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001562 // pointing to dead objects if they are not reachable.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001563 VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
1564 // Verify the roots:
1565 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001566 if (visitor.Failed()) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001567 // Dump mod-union tables.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001568 for (const auto& table_pair : mod_union_tables_) {
1569 accounting::ModUnionTable* mod_union_table = table_pair.second;
1570 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
1571 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001572 DumpSpaces();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001573 return false;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001574 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001575 return true;
1576}
1577
1578class VerifyReferenceCardVisitor {
1579 public:
1580 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
1581 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
1582 Locks::heap_bitmap_lock_)
Ian Rogers1d54e732013-05-02 21:10:01 -07001583 : heap_(heap), failed_(failed) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001584 }
1585
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001586 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1587 // annotalysis on visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001588 void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset,
1589 bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001590 // Filter out class references since changing an object's class does not mark the card as dirty.
1591 // Also handles large objects, since the only reference they hold is a class reference.
1592 if (ref != NULL && !ref->IsClass()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001593 accounting::CardTable* card_table = heap_->GetCardTable();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001594 // If the object is not dirty and it is referencing something in the live stack other than
1595 // class, then it must be on a dirty card.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001596 if (!card_table->AddrIsInCardTable(obj)) {
1597 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
1598 *failed_ = true;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001599 } else if (!card_table->IsDirty(obj)) {
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001600 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
1601 // kCardDirty - 1 if it didnt get touched since we aged it.
Ian Rogers1d54e732013-05-02 21:10:01 -07001602 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001603 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) {
1604 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001605 LOG(ERROR) << "Object " << obj << " found in live stack";
1606 }
1607 if (heap_->GetLiveBitmap()->Test(obj)) {
1608 LOG(ERROR) << "Object " << obj << " found in live bitmap";
1609 }
1610 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
1611 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
1612
1613 // Print which field of the object is dead.
1614 if (!obj->IsObjectArray()) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001615 const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001616 CHECK(klass != NULL);
Brian Carlstromea46f952013-07-30 01:26:50 -07001617 const mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
1618 : klass->GetIFields();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001619 CHECK(fields != NULL);
1620 for (int32_t i = 0; i < fields->GetLength(); ++i) {
Brian Carlstromea46f952013-07-30 01:26:50 -07001621 const mirror::ArtField* cur = fields->Get(i);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001622 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1623 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
1624 << PrettyField(cur);
1625 break;
1626 }
1627 }
1628 } else {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001629 const mirror::ObjectArray<mirror::Object>* object_array =
1630 obj->AsObjectArray<mirror::Object>();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001631 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
1632 if (object_array->Get(i) == ref) {
1633 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
1634 }
1635 }
1636 }
1637
1638 *failed_ = true;
1639 }
1640 }
1641 }
1642 }
1643
1644 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07001645 Heap* const heap_;
1646 bool* const failed_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001647};
1648
1649class VerifyLiveStackReferences {
1650 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001651 explicit VerifyLiveStackReferences(Heap* heap)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001652 : heap_(heap),
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001653 failed_(false) {}
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001654
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001655 void operator()(mirror::Object* obj) const
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001656 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1657 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
Mathieu Chartier590fee92013-09-13 13:46:47 -07001658 collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001659 }
1660
1661 bool Failed() const {
1662 return failed_;
1663 }
1664
1665 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07001666 Heap* const heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001667 bool failed_;
1668};
1669
1670bool Heap::VerifyMissingCardMarks() {
Ian Rogers81d425b2012-09-27 16:03:43 -07001671 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001672
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001673 // We need to sort the live stack since we binary search it.
Ian Rogers1d54e732013-05-02 21:10:01 -07001674 live_stack_->Sort();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001675 VerifyLiveStackReferences visitor(this);
1676 GetLiveBitmap()->Visit(visitor);
1677
1678 // We can verify objects in the live stack since none of these should reference dead objects.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001679 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001680 visitor(*it);
1681 }
1682
1683 if (visitor.Failed()) {
1684 DumpSpaces();
1685 return false;
1686 }
1687 return true;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001688}
1689
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001690void Heap::SwapStacks() {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001691 allocation_stack_.swap(live_stack_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001692}
1693
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001694accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
1695 auto it = mod_union_tables_.find(space);
1696 if (it == mod_union_tables_.end()) {
1697 return nullptr;
1698 }
1699 return it->second;
1700}
1701
Ian Rogers5fe9af72013-11-14 00:17:20 -08001702void Heap::ProcessCards(TimingLogger& timings) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001703 // Clear cards and keep track of cards cleared in the mod-union table.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001704 for (const auto& space : continuous_spaces_) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001705 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
1706 if (table != nullptr) {
1707 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
1708 "ImageModUnionClearCards";
Ian Rogers5fe9af72013-11-14 00:17:20 -08001709 TimingLogger::ScopedSplit split(name, &timings);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001710 table->ClearCards();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001711 } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001712 TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001713 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
1714 // were dirty before the GC started.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001715 // TODO: Don't need to use atomic.
1716 // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
1717 // roots and then we scan / update mod union tables after. We will always scan either card.//
1718 // If we end up with the non aged card, we scan it it in the pause.
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001719 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001720 }
1721 }
1722}
1723
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001724static mirror::Object* IdentityCallback(mirror::Object* obj, void*) {
1725 return obj;
1726}
1727
Ian Rogers1d54e732013-05-02 21:10:01 -07001728void Heap::PreGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001729 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1730 Thread* self = Thread::Current();
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001731
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001732 if (verify_pre_gc_heap_) {
1733 thread_list->SuspendAll();
1734 {
1735 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1736 if (!VerifyHeapReferences()) {
1737 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed";
1738 }
1739 }
1740 thread_list->ResumeAll();
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001741 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001742
1743 // Check that all objects which reference things in the live stack are on dirty cards.
1744 if (verify_missing_card_marks_) {
1745 thread_list->SuspendAll();
1746 {
1747 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1748 SwapStacks();
1749 // Sort the live stack so that we can quickly binary search it later.
1750 if (!VerifyMissingCardMarks()) {
1751 LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
1752 }
1753 SwapStacks();
1754 }
1755 thread_list->ResumeAll();
1756 }
1757
1758 if (verify_mod_union_table_) {
1759 thread_list->SuspendAll();
1760 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001761 for (const auto& table_pair : mod_union_tables_) {
1762 accounting::ModUnionTable* mod_union_table = table_pair.second;
1763 mod_union_table->UpdateAndMarkReferences(IdentityCallback, nullptr);
1764 mod_union_table->Verify();
1765 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001766 thread_list->ResumeAll();
1767 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001768}
1769
Ian Rogers1d54e732013-05-02 21:10:01 -07001770void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001771 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
1772 // reachable objects.
1773 if (verify_post_gc_heap_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001774 Thread* self = Thread::Current();
1775 CHECK_NE(self->GetState(), kRunnable);
Ian Rogers1d54e732013-05-02 21:10:01 -07001776 {
1777 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1778 // Swapping bound bitmaps does nothing.
1779 gc->SwapBitmaps();
1780 if (!VerifyHeapReferences()) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001781 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed";
Ian Rogers1d54e732013-05-02 21:10:01 -07001782 }
1783 gc->SwapBitmaps();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001784 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001785 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001786}
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001787
Ian Rogers1d54e732013-05-02 21:10:01 -07001788void Heap::PostGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001789 if (verify_system_weaks_) {
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001790 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001791 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001792 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001793 mark_sweep->VerifySystemWeaks();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001794 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001795}
1796
Mathieu Chartier590fee92013-09-13 13:46:47 -07001797collector::GcType Heap::WaitForGcToComplete(Thread* self) {
1798 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1799 MutexLock mu(self, *gc_complete_lock_);
1800 return WaitForGcToCompleteLocked(self);
1801}
1802
1803collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001804 collector::GcType last_gc_type = collector::kGcTypeNone;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001805 uint64_t wait_start = NanoTime();
1806 while (is_gc_running_) {
1807 ATRACE_BEGIN("GC: Wait For Completion");
1808 // We must wait, change thread state then sleep on gc_complete_cond_;
1809 gc_complete_cond_->Wait(self);
1810 last_gc_type = last_gc_type_;
Mathieu Chartier752a0e62013-06-27 11:03:27 -07001811 ATRACE_END();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001812 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001813 uint64_t wait_time = NanoTime() - wait_start;
1814 total_wait_time_ += wait_time;
1815 if (wait_time > long_pause_log_threshold_) {
1816 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time);
1817 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001818 return last_gc_type;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001819}
1820
Elliott Hughesc967f782012-04-16 10:23:15 -07001821void Heap::DumpForSigQuit(std::ostream& os) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001822 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001823 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -07001824 DumpGcPerformanceInfo(os);
Elliott Hughesc967f782012-04-16 10:23:15 -07001825}
1826
1827size_t Heap::GetPercentFree() {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001828 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory());
Elliott Hughesc967f782012-04-16 10:23:15 -07001829}
1830
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001831void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001832 if (max_allowed_footprint > GetMaxMemory()) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001833 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001834 << PrettySize(GetMaxMemory());
1835 max_allowed_footprint = GetMaxMemory();
1836 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001837 max_allowed_footprint_ = max_allowed_footprint;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001838}
1839
Mathieu Chartier590fee92013-09-13 13:46:47 -07001840bool Heap::IsMovableObject(const mirror::Object* obj) const {
1841 if (kMovingCollector) {
1842 DCHECK(!IsInTempSpace(obj));
1843 if (bump_pointer_space_->HasAddress(obj)) {
1844 return true;
1845 }
1846 }
1847 return false;
1848}
1849
1850bool Heap::IsInTempSpace(const mirror::Object* obj) const {
1851 if (temp_space_->HasAddress(obj) && !temp_space_->Contains(obj)) {
1852 return true;
1853 }
1854 return false;
1855}
1856
Mathieu Chartier987ccff2013-07-08 11:05:21 -07001857void Heap::UpdateMaxNativeFootprint() {
1858 size_t native_size = native_bytes_allocated_;
1859 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
1860 size_t target_size = native_size / GetTargetHeapUtilization();
1861 if (target_size > native_size + max_free_) {
1862 target_size = native_size + max_free_;
1863 } else if (target_size < native_size + min_free_) {
1864 target_size = native_size + min_free_;
1865 }
1866 native_footprint_gc_watermark_ = target_size;
1867 native_footprint_limit_ = 2 * target_size - native_size;
1868}
1869
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001870void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001871 // We know what our utilization is at this moment.
1872 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
Mathieu Chartier65db8802012-11-20 12:36:46 -08001873 const size_t bytes_allocated = GetBytesAllocated();
1874 last_gc_size_ = bytes_allocated;
Ian Rogers1d54e732013-05-02 21:10:01 -07001875 last_gc_time_ns_ = NanoTime();
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001876 size_t target_size;
1877 if (gc_type != collector::kGcTypeSticky) {
1878 // Grow the heap for non sticky GC.
1879 target_size = bytes_allocated / GetTargetHeapUtilization();
1880 if (target_size > bytes_allocated + max_free_) {
1881 target_size = bytes_allocated + max_free_;
1882 } else if (target_size < bytes_allocated + min_free_) {
1883 target_size = bytes_allocated + min_free_;
1884 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001885 native_need_to_run_finalization_ = true;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001886 next_gc_type_ = collector::kGcTypeSticky;
1887 } else {
1888 // Based on how close the current heap size is to the target size, decide
1889 // whether or not to do a partial or sticky GC next.
1890 if (bytes_allocated + min_free_ <= max_allowed_footprint_) {
1891 next_gc_type_ = collector::kGcTypeSticky;
1892 } else {
1893 next_gc_type_ = collector::kGcTypePartial;
1894 }
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001895 // If we have freed enough memory, shrink the heap back down.
1896 if (bytes_allocated + max_free_ < max_allowed_footprint_) {
1897 target_size = bytes_allocated + max_free_;
1898 } else {
1899 target_size = std::max(bytes_allocated, max_allowed_footprint_);
1900 }
1901 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001902 if (!ignore_max_footprint_) {
1903 SetIdealFootprint(target_size);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08001904 if (concurrent_gc_) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001905 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001906 // Calculate the estimated GC duration.
1907 double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
1908 // Estimate how many remaining bytes we will have when we need to start the next GC.
1909 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
1910 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
1911 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
1912 // A never going to happen situation that from the estimated allocation rate we will exceed
1913 // the applications entire footprint with the given estimated allocation rate. Schedule
1914 // another GC straight away.
1915 concurrent_start_bytes_ = bytes_allocated;
1916 } else {
1917 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
1918 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
1919 // right away.
Mathieu Chartier50482232013-11-21 11:48:14 -08001920 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
1921 bytes_allocated);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001922 }
1923 DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_);
1924 DCHECK_LE(max_allowed_footprint_, growth_limit_);
Mathieu Chartier65db8802012-11-20 12:36:46 -08001925 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08001926 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001927}
1928
jeffhaoc1160702011-10-27 15:48:45 -07001929void Heap::ClearGrowthLimit() {
Mathieu Chartier80de7a62012-11-27 17:21:50 -08001930 growth_limit_ = capacity_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001931 non_moving_space_->ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -07001932}
1933
Elliott Hughesadb460d2011-10-05 17:02:34 -07001934void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
Mathieu Chartier50482232013-11-21 11:48:14 -08001935 MemberOffset reference_queue_offset,
1936 MemberOffset reference_queueNext_offset,
1937 MemberOffset reference_pendingNext_offset,
1938 MemberOffset finalizer_reference_zombie_offset) {
Elliott Hughesadb460d2011-10-05 17:02:34 -07001939 reference_referent_offset_ = reference_referent_offset;
1940 reference_queue_offset_ = reference_queue_offset;
1941 reference_queueNext_offset_ = reference_queueNext_offset;
1942 reference_pendingNext_offset_ = reference_pendingNext_offset;
1943 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
1944 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1945 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
1946 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
1947 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
1948 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
1949}
1950
Mathieu Chartier590fee92013-09-13 13:46:47 -07001951void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) {
1952 DCHECK(reference != NULL);
1953 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1954 reference->SetFieldObject(reference_referent_offset_, referent, true);
1955}
1956
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001957mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
Elliott Hughesadb460d2011-10-05 17:02:34 -07001958 DCHECK(reference != NULL);
1959 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001960 return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true);
Elliott Hughesadb460d2011-10-05 17:02:34 -07001961}
1962
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001963void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001964 ScopedObjectAccess soa(self);
Jeff Hao5d917302013-02-27 17:57:33 -08001965 JValue result;
Jeff Hao5d917302013-02-27 17:57:33 -08001966 ArgArray arg_array(NULL, 0);
1967 arg_array.Append(reinterpret_cast<uint32_t>(object));
1968 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self,
Jeff Hao6474d192013-03-26 14:08:09 -07001969 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001970}
1971
Mathieu Chartier39e32612013-11-12 16:28:05 -08001972void Heap::EnqueueClearedReferences() {
1973 if (!cleared_references_.IsEmpty()) {
Ian Rogers64b6d142012-10-29 16:34:15 -07001974 // When a runtime isn't started there are no reference queues to care about so ignore.
1975 if (LIKELY(Runtime::Current()->IsStarted())) {
1976 ScopedObjectAccess soa(Thread::Current());
Jeff Hao5d917302013-02-27 17:57:33 -08001977 JValue result;
Jeff Hao5d917302013-02-27 17:57:33 -08001978 ArgArray arg_array(NULL, 0);
Mathieu Chartier39e32612013-11-12 16:28:05 -08001979 arg_array.Append(reinterpret_cast<uint32_t>(cleared_references_.GetList()));
Jeff Hao5d917302013-02-27 17:57:33 -08001980 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
Jeff Hao6474d192013-03-26 14:08:09 -07001981 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
Ian Rogers64b6d142012-10-29 16:34:15 -07001982 }
Mathieu Chartier39e32612013-11-12 16:28:05 -08001983 cleared_references_.Clear();
Elliott Hughesadb460d2011-10-05 17:02:34 -07001984 }
1985}
1986
Ian Rogers1f539342012-10-03 21:09:42 -07001987void Heap::RequestConcurrentGC(Thread* self) {
Mathieu Chartier069387a2012-06-18 12:01:01 -07001988 // Make sure that we can do a concurrent GC.
Ian Rogers120f1c72012-09-28 17:17:10 -07001989 Runtime* runtime = Runtime::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001990 if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
1991 self->IsHandlingStackOverflow()) {
Ian Rogers120f1c72012-09-28 17:17:10 -07001992 return;
1993 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07001994 // We already have a request pending, no reason to start more until we update
1995 // concurrent_start_bytes_.
1996 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Ian Rogers120f1c72012-09-28 17:17:10 -07001997 JNIEnv* env = self->GetJniEnv();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001998 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
1999 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002000 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2001 WellKnownClasses::java_lang_Daemons_requestGC);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002002 CHECK(!env->ExceptionCheck());
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002003}
2004
Ian Rogers81d425b2012-09-27 16:03:43 -07002005void Heap::ConcurrentGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002006 if (Runtime::Current()->IsShuttingDown(self)) {
2007 return;
Mathieu Chartier2542d662012-06-21 17:14:11 -07002008 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002009 // Wait for any GCs currently running to finish.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002010 if (WaitForGcToComplete(self) == collector::kGcTypeNone) {
Mathieu Chartierf9ed0d32013-11-21 16:42:47 -08002011 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
2012 // instead. E.g. can't do partial, so do full instead.
2013 if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
2014 collector::kGcTypeNone) {
2015 for (collector::GcType gc_type : gc_plan_) {
2016 // Attempt to run the collector, if we succeed, we are done.
2017 if (gc_type > next_gc_type_ &&
2018 CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
2019 break;
2020 }
2021 }
2022 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002023 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002024}
2025
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002026void Heap::RequestHeapTrim() {
Ian Rogers48931882013-01-22 14:35:16 -08002027 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
2028 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
2029 // a space it will hold its lock and can become a cause of jank.
2030 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
2031 // forking.
2032
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002033 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
2034 // because that only marks object heads, so a large array looks like lots of empty space. We
2035 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
2036 // to utilization (which is probably inversely proportional to how much benefit we can expect).
2037 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
2038 // not how much use we're making of those pages.
Ian Rogers48931882013-01-22 14:35:16 -08002039 uint64_t ms_time = MilliTime();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002040 // Don't bother trimming the alloc space if a heap trim occurred in the last two seconds.
2041 if (ms_time - last_trim_time_ms_ < 2 * 1000) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002042 return;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002043 }
Ian Rogers120f1c72012-09-28 17:17:10 -07002044
2045 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002046 Runtime* runtime = Runtime::Current();
2047 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) {
2048 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
2049 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
2050 // as we don't hold the lock while requesting the trim).
2051 return;
Ian Rogerse1d490c2012-02-03 09:09:07 -08002052 }
Ian Rogers48931882013-01-22 14:35:16 -08002053
Ian Rogers1d54e732013-05-02 21:10:01 -07002054 last_trim_time_ms_ = ms_time;
Mathieu Chartierc39e3422013-08-07 16:41:36 -07002055
2056 // Trim only if we do not currently care about pause times.
Mathieu Chartierca2a24d2013-11-25 15:12:12 -08002057 if (!CareAboutPauseTimes()) {
Mathieu Chartierc39e3422013-08-07 16:41:36 -07002058 JNIEnv* env = self->GetJniEnv();
2059 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
2060 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
2061 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2062 WellKnownClasses::java_lang_Daemons_requestHeapTrim);
2063 CHECK(!env->ExceptionCheck());
2064 }
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002065}
2066
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002067void Heap::RevokeThreadLocalBuffers(Thread* thread) {
2068 non_moving_space_->RevokeThreadLocalBuffers(thread);
2069}
2070
2071void Heap::RevokeAllThreadLocalBuffers() {
2072 non_moving_space_->RevokeAllThreadLocalBuffers();
2073}
2074
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002075bool Heap::IsGCRequestPending() const {
2076 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
2077}
2078
Mathieu Chartier590fee92013-09-13 13:46:47 -07002079void Heap::RunFinalization(JNIEnv* env) {
2080 // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
2081 if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
2082 CHECK(WellKnownClasses::java_lang_System != nullptr);
2083 WellKnownClasses::java_lang_System_runFinalization =
2084 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
2085 CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
2086 }
2087 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
2088 WellKnownClasses::java_lang_System_runFinalization);
2089}
2090
Ian Rogers1eb512d2013-10-18 15:42:20 -07002091void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002092 Thread* self = ThreadForEnv(env);
2093 if (native_need_to_run_finalization_) {
2094 RunFinalization(env);
2095 UpdateMaxNativeFootprint();
2096 native_need_to_run_finalization_ = false;
2097 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002098 // Total number of native bytes allocated.
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07002099 native_bytes_allocated_.fetch_add(bytes);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002100 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002101 collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
2102 collector::kGcTypeFull;
2103
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002104 // The second watermark is higher than the gc watermark. If you hit this it means you are
2105 // allocating native objects faster than the GC can keep up with.
2106 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002107 if (WaitForGcToComplete(self) != collector::kGcTypeNone) {
2108 // Just finished a GC, attempt to run finalizers.
2109 RunFinalization(env);
2110 CHECK(!env->ExceptionCheck());
2111 }
2112 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
2113 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002114 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002115 RunFinalization(env);
2116 native_need_to_run_finalization_ = false;
2117 CHECK(!env->ExceptionCheck());
2118 }
2119 // We have just run finalizers, update the native watermark since it is very likely that
2120 // finalizers released native managed allocations.
2121 UpdateMaxNativeFootprint();
2122 } else if (!IsGCRequestPending()) {
Mathieu Chartier7bf82af2013-12-06 16:51:45 -08002123 if (concurrent_gc_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002124 RequestConcurrentGC(self);
2125 } else {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002126 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002127 }
2128 }
2129 }
2130}
2131
Ian Rogers1eb512d2013-10-18 15:42:20 -07002132void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002133 int expected_size, new_size;
2134 do {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002135 expected_size = native_bytes_allocated_.load();
2136 new_size = expected_size - bytes;
2137 if (UNLIKELY(new_size < 0)) {
2138 ScopedObjectAccess soa(env);
2139 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
2140 StringPrintf("Attempted to free %d native bytes with only %d native bytes "
2141 "registered as allocated", bytes, expected_size).c_str());
2142 break;
2143 }
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07002144 } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size));
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002145}
2146
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002147int64_t Heap::GetTotalMemory() const {
2148 int64_t ret = 0;
Mathieu Chartier02e25112013-08-14 16:14:24 -07002149 for (const auto& space : continuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002150 // Currently don't include the image space.
2151 if (!space->IsImageSpace()) {
2152 ret += space->Size();
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002153 }
2154 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07002155 for (const auto& space : discontinuous_spaces_) {
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002156 if (space->IsLargeObjectSpace()) {
2157 ret += space->AsLargeObjectSpace()->GetBytesAllocated();
2158 }
2159 }
2160 return ret;
2161}
2162
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002163void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
2164 DCHECK(mod_union_table != nullptr);
2165 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
2166}
2167
Ian Rogers1d54e732013-05-02 21:10:01 -07002168} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07002169} // namespace art