blob: 33e6bfd7e09c773bb542112e655f88a649530f13 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Mathieu Chartier752a0e62013-06-27 11:03:27 -070019#define ATRACE_TAG ATRACE_TAG_DALVIK
20#include <cutils/trace.h>
Brian Carlstrom5643b782012-02-05 12:32:53 -080021
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070024#include <valgrind.h>
Carl Shapiro58551df2011-07-24 03:09:51 -070025
Mathieu Chartierb2f99362013-11-20 17:26:00 -080026#include "base/histogram-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080027#include "base/stl_util.h"
Mathieu Chartier987ccff2013-07-08 11:05:21 -070028#include "common_throws.h"
Ian Rogers48931882013-01-22 14:35:16 -080029#include "cutils/sched_policy.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070030#include "debugger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070031#include "gc/accounting/atomic_stack.h"
32#include "gc/accounting/card_table-inl.h"
33#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070034#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070035#include "gc/accounting/mod_union_table-inl.h"
36#include "gc/accounting/space_bitmap-inl.h"
37#include "gc/collector/mark_sweep-inl.h"
38#include "gc/collector/partial_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070039#include "gc/collector/semi_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070040#include "gc/collector/sticky_mark_sweep.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070041#include "gc/space/bump_pointer_space.h"
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070042#include "gc/space/dlmalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070043#include "gc/space/image_space.h"
44#include "gc/space/large_object_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070045#include "gc/space/rosalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070046#include "gc/space/space-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070047#include "heap-inl.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070048#include "image.h"
Jeff Hao5d917302013-02-27 17:57:33 -080049#include "invoke_arg_array_builder.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070050#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080051#include "mirror/class-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080052#include "mirror/object.h"
53#include "mirror/object-inl.h"
54#include "mirror/object_array-inl.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080055#include "object_utils.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080056#include "os.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070057#include "ScopedLocalRef.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070058#include "scoped_thread_state_change.h"
Ian Rogers1f539342012-10-03 21:09:42 -070059#include "sirt_ref.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070060#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070061#include "UniquePtr.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070062#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070063
64namespace art {
Mathieu Chartier50482232013-11-21 11:48:14 -080065
66extern void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator);
67
Ian Rogers1d54e732013-05-02 21:10:01 -070068namespace gc {
Carl Shapiro69759ea2011-07-21 18:13:35 -070069
Mathieu Chartier720ef762013-08-17 14:46:54 -070070static constexpr bool kGCALotMode = false;
71static constexpr size_t kGcAlotInterval = KB;
72static constexpr bool kDumpGcPerformanceOnShutdown = false;
Ian Rogers1d54e732013-05-02 21:10:01 -070073// Minimum amount of remaining bytes before a concurrent GC is triggered.
Mathieu Chartier720ef762013-08-17 14:46:54 -070074static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
Mathieu Chartier50482232013-11-21 11:48:14 -080075static constexpr AllocatorType kDefaultPreZygoteAllocator = kAllocatorTypeFreeList;
76static constexpr AllocatorType kDefaultPostZygoteAllocator = kAllocatorTypeFreeList;
Mathieu Chartier0051be62012-10-12 17:47:11 -070077
Mathieu Chartier0051be62012-10-12 17:47:11 -070078Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
Ian Rogers8d31bbd2013-10-13 10:44:14 -070079 double target_utilization, size_t capacity, const std::string& image_file_name,
Mathieu Chartier2775ee42013-08-20 17:43:47 -070080 bool concurrent_gc, size_t parallel_gc_threads, size_t conc_gc_threads,
81 bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold,
82 bool ignore_max_footprint)
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080083 : non_moving_space_(nullptr),
Mathieu Chartier50482232013-11-21 11:48:14 -080084 concurrent_gc_(concurrent_gc),
Mathieu Chartier2775ee42013-08-20 17:43:47 -070085 parallel_gc_threads_(parallel_gc_threads),
86 conc_gc_threads_(conc_gc_threads),
Mathieu Chartiere0a53e92013-08-05 10:17:40 -070087 low_memory_mode_(low_memory_mode),
Mathieu Chartier2775ee42013-08-20 17:43:47 -070088 long_pause_log_threshold_(long_pause_log_threshold),
89 long_gc_log_threshold_(long_gc_log_threshold),
90 ignore_max_footprint_(ignore_max_footprint),
Ian Rogers00f7d0e2012-07-19 15:28:27 -070091 have_zygote_space_(false),
Mathieu Chartier39e32612013-11-12 16:28:05 -080092 soft_reference_queue_(this),
93 weak_reference_queue_(this),
94 finalizer_reference_queue_(this),
95 phantom_reference_queue_(this),
96 cleared_references_(this),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080097 is_gc_running_(false),
Ian Rogers1d54e732013-05-02 21:10:01 -070098 last_gc_type_(collector::kGcTypeNone),
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -070099 next_gc_type_(collector::kGcTypePartial),
Mathieu Chartier80de7a62012-11-27 17:21:50 -0800100 capacity_(capacity),
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700101 growth_limit_(growth_limit),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700102 max_allowed_footprint_(initial_size),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700103 native_footprint_gc_watermark_(initial_size),
104 native_footprint_limit_(2 * initial_size),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700105 native_need_to_run_finalization_(false),
Mathieu Chartierc39e3422013-08-07 16:41:36 -0700106 activity_thread_class_(NULL),
107 application_thread_class_(NULL),
108 activity_thread_(NULL),
109 application_thread_(NULL),
110 last_process_state_id_(NULL),
111 // Initially care about pauses in case we never get notified of process states, or if the JNI
112 // code becomes broken.
113 care_about_pause_times_(true),
Mathieu Chartier720ef762013-08-17 14:46:54 -0700114 concurrent_start_bytes_(concurrent_gc_ ? initial_size - kMinConcurrentRemainingBytes
115 : std::numeric_limits<size_t>::max()),
Ian Rogers1d54e732013-05-02 21:10:01 -0700116 total_bytes_freed_ever_(0),
117 total_objects_freed_ever_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800118 num_bytes_allocated_(0),
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700119 native_bytes_allocated_(0),
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700120 gc_memory_overhead_(0),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700121 verify_missing_card_marks_(false),
122 verify_system_weaks_(false),
123 verify_pre_gc_heap_(false),
124 verify_post_gc_heap_(false),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700125 verify_mod_union_table_(false),
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700126 min_alloc_space_size_for_sticky_gc_(2 * MB),
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700127 min_remaining_space_for_sticky_gc_(1 * MB),
Ian Rogers1d54e732013-05-02 21:10:01 -0700128 last_trim_time_ms_(0),
Mathieu Chartier65db8802012-11-20 12:36:46 -0800129 allocation_rate_(0),
Mathieu Chartier0418ae22013-07-31 13:35:46 -0700130 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
131 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
132 * verification is enabled, we limit the size of allocation stacks to speed up their
133 * searching.
134 */
135 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
Mathieu Chartier590fee92013-09-13 13:46:47 -0700136 : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB),
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800137 current_allocator_(kMovingCollector ? kAllocatorTypeBumpPointer : kAllocatorTypeFreeList),
138 current_non_moving_allocator_(kAllocatorTypeFreeList),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700139 bump_pointer_space_(nullptr),
140 temp_space_(nullptr),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800141 reference_referent_offset_(0),
142 reference_queue_offset_(0),
143 reference_queueNext_offset_(0),
144 reference_pendingNext_offset_(0),
145 finalizer_reference_zombie_offset_(0),
Mathieu Chartier0051be62012-10-12 17:47:11 -0700146 min_free_(min_free),
147 max_free_(max_free),
148 target_utilization_(target_utilization),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700149 total_wait_time_(0),
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700150 total_allocation_time_(0),
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700151 verify_object_mode_(kHeapVerificationNotPermitted),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700152 gc_disable_count_(0),
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700153 running_on_valgrind_(RUNNING_ON_VALGRIND) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800154 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800155 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700156 }
Mathieu Chartier50482232013-11-21 11:48:14 -0800157 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
158 // entrypoints.
159 if (!Runtime::Current()->IsZygote()) {
160 ChangeAllocator(kDefaultPreZygoteAllocator);
161 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700162 live_bitmap_.reset(new accounting::HeapBitmap(this));
163 mark_bitmap_.reset(new accounting::HeapBitmap(this));
Ian Rogers30fab402012-01-23 15:43:46 -0800164 // Requested begin for the alloc space, to follow the mapped image and oat files
Mathieu Chartier50482232013-11-21 11:48:14 -0800165 byte* requested_alloc_space_begin = nullptr;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800166 if (!image_file_name.empty()) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700167 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str());
Mathieu Chartier50482232013-11-21 11:48:14 -0800168 CHECK(image_space != nullptr) << "Failed to create space for " << image_file_name;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700169 AddSpace(image_space);
Ian Rogers30fab402012-01-23 15:43:46 -0800170 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
171 // isn't going to get in the middle
Brian Carlstrom700c8d32012-11-05 10:42:02 -0800172 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
173 CHECK_GT(oat_file_end_addr, image_space->End());
Brian Carlstrom56d947f2013-07-15 13:14:23 -0700174 if (oat_file_end_addr > requested_alloc_space_begin) {
Mathieu Chartier50482232013-11-21 11:48:14 -0800175 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700176 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700177 }
178
Mathieu Chartier590fee92013-09-13 13:46:47 -0700179 const char* name = Runtime::Current()->IsZygote() ? "zygote space" : "alloc space";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700180 if (!kUseRosAlloc) {
181 non_moving_space_ = space::DlMallocSpace::Create(name, initial_size, growth_limit, capacity,
182 requested_alloc_space_begin);
183 } else {
184 non_moving_space_ = space::RosAllocSpace::Create(name, initial_size, growth_limit, capacity,
185 requested_alloc_space_begin);
186 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700187 if (kMovingCollector) {
188 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
189 // TODO: Having 3+ spaces as big as the large heap size can cause virtual memory fragmentation
190 // issues.
191 const size_t bump_pointer_space_size = std::min(non_moving_space_->Capacity(), 128 * MB);
192 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
193 bump_pointer_space_size, nullptr);
194 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
195 AddSpace(bump_pointer_space_);
196 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", bump_pointer_space_size,
197 nullptr);
198 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
199 AddSpace(temp_space_);
200 }
201
202 CHECK(non_moving_space_ != NULL) << "Failed to create non-moving space";
203 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
204 AddSpace(non_moving_space_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700205
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700206 // Allocate the large object space.
207 const bool kUseFreeListSpaceForLOS = false;
208 if (kUseFreeListSpaceForLOS) {
209 large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity);
210 } else {
211 large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
212 }
213 CHECK(large_object_space_ != NULL) << "Failed to create large object space";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700214 AddSpace(large_object_space_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700215
Ian Rogers1d54e732013-05-02 21:10:01 -0700216 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
Mathieu Chartier590fee92013-09-13 13:46:47 -0700217 CHECK(!continuous_spaces_.empty());
218 // Relies on the spaces being sorted.
Ian Rogers1d54e732013-05-02 21:10:01 -0700219 byte* heap_begin = continuous_spaces_.front()->Begin();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700220 byte* heap_end = continuous_spaces_.back()->Limit();
221 size_t heap_capacity = heap_end - heap_begin;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700222
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800223 // Allocate the card table.
Ian Rogers1d54e732013-05-02 21:10:01 -0700224 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700225 CHECK(card_table_.get() != NULL) << "Failed to create card table";
Ian Rogers5d76c432011-10-31 21:42:49 -0700226
Mathieu Chartier590fee92013-09-13 13:46:47 -0700227 // Card cache for now since it makes it easier for us to update the references to the copying
228 // spaces.
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700229 accounting::ModUnionTable* mod_union_table =
Mathieu Chartier590fee92013-09-13 13:46:47 -0700230 new accounting::ModUnionTableCardCache("Image mod-union table", this, GetImageSpace());
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700231 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
232 AddModUnionTable(mod_union_table);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700233
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700234 // TODO: Count objects in the image space here.
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700235 num_bytes_allocated_ = 0;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700236
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800237 // Default mark stack size in bytes.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700238 static const size_t default_mark_stack_size = 64 * KB;
Ian Rogers1d54e732013-05-02 21:10:01 -0700239 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size));
240 allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack",
241 max_allocation_stack_size_));
242 live_stack_.reset(accounting::ObjectStack::Create("live stack",
243 max_allocation_stack_size_));
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700244
Mathieu Chartier65db8802012-11-20 12:36:46 -0800245 // It's still too early to take a lock because there are no threads yet, but we can create locks
246 // now. We don't create it earlier to make it clear that you can't use locks during heap
247 // initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700248 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogersc604d732012-10-14 16:09:54 -0700249 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
250 *gc_complete_lock_));
Ian Rogers1d54e732013-05-02 21:10:01 -0700251 last_gc_time_ns_ = NanoTime();
Mathieu Chartier65db8802012-11-20 12:36:46 -0800252 last_gc_size_ = GetBytesAllocated();
253
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700254 if (ignore_max_footprint_) {
255 SetIdealFootprint(std::numeric_limits<size_t>::max());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700256 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700257 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700258 CHECK_NE(max_allowed_footprint_, 0U);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700259
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800260 // Create our garbage collectors.
Mathieu Chartier50482232013-11-21 11:48:14 -0800261 for (size_t i = 0; i < 2; ++i) {
262 const bool concurrent = i != 0;
263 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
264 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
265 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
266 }
267 gc_plan_.push_back(collector::kGcTypeSticky);
268 gc_plan_.push_back(collector::kGcTypePartial);
269 gc_plan_.push_back(collector::kGcTypeFull);
270 if (kMovingCollector) {
271 // TODO: Clean this up.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700272 semi_space_collector_ = new collector::SemiSpace(this);
273 garbage_collectors_.push_back(semi_space_collector_);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700274 }
275
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700276 if (running_on_valgrind_) {
Ian Rogersfa824272013-11-05 16:12:57 -0800277 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700278 }
279
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800280 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800281 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700282 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700283}
284
Mathieu Chartier50482232013-11-21 11:48:14 -0800285void Heap::ChangeAllocator(AllocatorType allocator) {
286 DCHECK_NE(allocator, kAllocatorTypeLOS);
287 if (current_allocator_ != allocator) {
288 current_allocator_ = allocator;
289 SetQuickAllocEntryPointsAllocator(current_allocator_);
290 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
291 }
292}
293
Mathieu Chartier590fee92013-09-13 13:46:47 -0700294bool Heap::IsCompilingBoot() const {
295 for (const auto& space : continuous_spaces_) {
296 if (space->IsImageSpace()) {
297 return false;
298 } else if (space->IsZygoteSpace()) {
299 return false;
300 }
301 }
302 return true;
303}
304
305bool Heap::HasImageSpace() const {
306 for (const auto& space : continuous_spaces_) {
307 if (space->IsImageSpace()) {
308 return true;
309 }
310 }
311 return false;
312}
313
314void Heap::IncrementDisableGC(Thread* self) {
315 // Need to do this holding the lock to prevent races where the GC is about to run / running when
316 // we attempt to disable it.
317 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
318 MutexLock mu(self, *gc_complete_lock_);
319 WaitForGcToCompleteLocked(self);
320 ++gc_disable_count_;
321}
322
323void Heap::DecrementDisableGC(Thread* self) {
324 MutexLock mu(self, *gc_complete_lock_);
325 CHECK_GE(gc_disable_count_, 0U);
326 --gc_disable_count_;
327}
328
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700329void Heap::CreateThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700330 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
331 if (num_threads != 0) {
Mathieu Chartierbcd5e9d2013-11-13 14:33:28 -0800332 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700333 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700334}
335
Mathieu Chartier590fee92013-09-13 13:46:47 -0700336void Heap::VisitObjects(ObjectVisitorCallback callback, void* arg) {
337 // Visit objects in bump pointer space.
338 Thread* self = Thread::Current();
339 // TODO: Use reference block.
340 std::vector<SirtRef<mirror::Object>*> saved_refs;
341 if (bump_pointer_space_ != nullptr) {
342 // Need to put all these in sirts since the callback may trigger a GC. TODO: Use a better data
343 // structure.
344 mirror::Object* obj = reinterpret_cast<mirror::Object*>(bump_pointer_space_->Begin());
345 const mirror::Object* end = reinterpret_cast<const mirror::Object*>(
346 bump_pointer_space_->End());
347 while (obj < end) {
348 saved_refs.push_back(new SirtRef<mirror::Object>(self, obj));
349 obj = space::BumpPointerSpace::GetNextObject(obj);
350 }
351 }
352 // TODO: Switch to standard begin and end to use ranged a based loop.
353 for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
354 it < end; ++it) {
355 mirror::Object* obj = *it;
356 // Objects in the allocation stack might be in a movable space.
357 saved_refs.push_back(new SirtRef<mirror::Object>(self, obj));
358 }
359 GetLiveBitmap()->Walk(callback, arg);
360 for (const auto& ref : saved_refs) {
361 callback(ref->get(), arg);
362 }
363 // Need to free the sirts in reverse order they were allocated.
364 for (size_t i = saved_refs.size(); i != 0; --i) {
365 delete saved_refs[i - 1];
366 }
367}
368
369void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
370 MarkAllocStack(non_moving_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(), stack);
371}
372
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700373void Heap::DeleteThreadPool() {
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700374 thread_pool_.reset(nullptr);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700375}
376
Mathieu Chartierc39e3422013-08-07 16:41:36 -0700377static bool ReadStaticInt(JNIEnvExt* env, jclass clz, const char* name, int* out_value) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700378 DCHECK(out_value != NULL);
Mathieu Chartierc39e3422013-08-07 16:41:36 -0700379 jfieldID field = env->GetStaticFieldID(clz, name, "I");
380 if (field == NULL) {
381 env->ExceptionClear();
382 return false;
383 }
384 *out_value = env->GetStaticIntField(clz, field);
385 return true;
386}
387
388void Heap::ListenForProcessStateChange() {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700389 VLOG(heap) << "Heap notified of process state change";
Mathieu Chartierc39e3422013-08-07 16:41:36 -0700390
391 Thread* self = Thread::Current();
392 JNIEnvExt* env = self->GetJniEnv();
393
394 if (!have_zygote_space_) {
395 return;
396 }
397
398 if (activity_thread_class_ == NULL) {
399 jclass clz = env->FindClass("android/app/ActivityThread");
400 if (clz == NULL) {
401 env->ExceptionClear();
402 LOG(WARNING) << "Could not find activity thread class in process state change";
403 return;
404 }
405 activity_thread_class_ = reinterpret_cast<jclass>(env->NewGlobalRef(clz));
406 }
407
408 if (activity_thread_class_ != NULL && activity_thread_ == NULL) {
409 jmethodID current_activity_method = env->GetStaticMethodID(activity_thread_class_,
410 "currentActivityThread",
411 "()Landroid/app/ActivityThread;");
412 if (current_activity_method == NULL) {
413 env->ExceptionClear();
414 LOG(WARNING) << "Could not get method for currentActivityThread";
415 return;
416 }
417
418 jobject obj = env->CallStaticObjectMethod(activity_thread_class_, current_activity_method);
419 if (obj == NULL) {
420 env->ExceptionClear();
421 LOG(WARNING) << "Could not get current activity";
422 return;
423 }
424 activity_thread_ = env->NewGlobalRef(obj);
425 }
426
427 if (process_state_cares_about_pause_time_.empty()) {
428 // Just attempt to do this the first time.
429 jclass clz = env->FindClass("android/app/ActivityManager");
430 if (clz == NULL) {
431 LOG(WARNING) << "Activity manager class is null";
432 return;
433 }
434 ScopedLocalRef<jclass> activity_manager(env, clz);
435 std::vector<const char*> care_about_pauses;
436 care_about_pauses.push_back("PROCESS_STATE_TOP");
437 care_about_pauses.push_back("PROCESS_STATE_IMPORTANT_BACKGROUND");
438 // Attempt to read the constants and classify them as whether or not we care about pause times.
439 for (size_t i = 0; i < care_about_pauses.size(); ++i) {
440 int process_state = 0;
441 if (ReadStaticInt(env, activity_manager.get(), care_about_pauses[i], &process_state)) {
442 process_state_cares_about_pause_time_.insert(process_state);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700443 VLOG(heap) << "Adding process state " << process_state
444 << " to set of states which care about pause time";
Mathieu Chartierc39e3422013-08-07 16:41:36 -0700445 }
446 }
447 }
448
449 if (application_thread_class_ == NULL) {
450 jclass clz = env->FindClass("android/app/ActivityThread$ApplicationThread");
451 if (clz == NULL) {
452 env->ExceptionClear();
453 LOG(WARNING) << "Could not get application thread class";
454 return;
455 }
456 application_thread_class_ = reinterpret_cast<jclass>(env->NewGlobalRef(clz));
457 last_process_state_id_ = env->GetFieldID(application_thread_class_, "mLastProcessState", "I");
458 if (last_process_state_id_ == NULL) {
459 env->ExceptionClear();
460 LOG(WARNING) << "Could not get last process state member";
461 return;
462 }
463 }
464
465 if (application_thread_class_ != NULL && application_thread_ == NULL) {
466 jmethodID get_application_thread =
467 env->GetMethodID(activity_thread_class_, "getApplicationThread",
468 "()Landroid/app/ActivityThread$ApplicationThread;");
469 if (get_application_thread == NULL) {
470 LOG(WARNING) << "Could not get method ID for get application thread";
471 return;
472 }
473
474 jobject obj = env->CallObjectMethod(activity_thread_, get_application_thread);
475 if (obj == NULL) {
476 LOG(WARNING) << "Could not get application thread";
477 return;
478 }
479
480 application_thread_ = env->NewGlobalRef(obj);
481 }
482
483 if (application_thread_ != NULL && last_process_state_id_ != NULL) {
484 int process_state = env->GetIntField(application_thread_, last_process_state_id_);
485 env->ExceptionClear();
486
487 care_about_pause_times_ = process_state_cares_about_pause_time_.find(process_state) !=
488 process_state_cares_about_pause_time_.end();
489
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700490 VLOG(heap) << "New process state " << process_state
491 << " care about pauses " << care_about_pause_times_;
Mathieu Chartierc39e3422013-08-07 16:41:36 -0700492 }
Mathieu Chartier82353312013-07-18 10:47:51 -0700493}
494
Mathieu Chartier590fee92013-09-13 13:46:47 -0700495void Heap::AddSpace(space::Space* space) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700496 DCHECK(space != NULL);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700497 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
498 if (space->IsContinuousSpace()) {
499 DCHECK(!space->IsDiscontinuousSpace());
500 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
501 // Continuous spaces don't necessarily have bitmaps.
502 accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
503 accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
504 if (live_bitmap != nullptr) {
505 DCHECK(mark_bitmap != nullptr);
506 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
507 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700508 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700509
510 continuous_spaces_.push_back(continuous_space);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700511 if (continuous_space->IsMallocSpace()) {
512 non_moving_space_ = continuous_space->AsMallocSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700513 }
514
515 // Ensure that spaces remain sorted in increasing order of start address.
516 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
517 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
518 return a->Begin() < b->Begin();
519 });
520 // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
521 // avoid redundant marking.
522 bool seen_zygote = false, seen_alloc = false;
523 for (const auto& space : continuous_spaces_) {
524 if (space->IsImageSpace()) {
525 CHECK(!seen_zygote);
526 CHECK(!seen_alloc);
527 } else if (space->IsZygoteSpace()) {
528 CHECK(!seen_alloc);
529 seen_zygote = true;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700530 } else if (space->IsMallocSpace()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700531 seen_alloc = true;
532 }
533 }
534 } else {
535 DCHECK(space->IsDiscontinuousSpace());
536 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
537 DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
538 live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
539 DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
540 mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
541 discontinuous_spaces_.push_back(discontinuous_space);
542 }
543 if (space->IsAllocSpace()) {
544 alloc_spaces_.push_back(space->AsAllocSpace());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700545 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800546}
547
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700548void Heap::RegisterGCAllocation(size_t bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700549 if (this != nullptr) {
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700550 gc_memory_overhead_.fetch_add(bytes);
551 }
552}
553
554void Heap::RegisterGCDeAllocation(size_t bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700555 if (this != nullptr) {
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700556 gc_memory_overhead_.fetch_sub(bytes);
557 }
558}
559
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700560void Heap::DumpGcPerformanceInfo(std::ostream& os) {
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700561 // Dump cumulative timings.
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700562 os << "Dumping cumulative Gc timings\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700563 uint64_t total_duration = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800564
565 // Dump cumulative loggers for each GC type.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800566 uint64_t total_paused_time = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700567 for (const auto& collector : garbage_collectors_) {
Sameer Abu Asala8439542013-02-14 16:06:42 -0800568 CumulativeLogger& logger = collector->GetCumulativeTimings();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800569 if (logger.GetTotalNs() != 0) {
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700570 os << Dumpable<CumulativeLogger>(logger);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800571 const uint64_t total_ns = logger.GetTotalNs();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700572 const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800573 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
574 const uint64_t freed_bytes = collector->GetTotalFreedBytes();
575 const uint64_t freed_objects = collector->GetTotalFreedObjects();
Mathieu Chartierb2f99362013-11-20 17:26:00 -0800576 Histogram<uint64_t>::CumulativeData cumulative_data;
577 collector->GetPauseHistogram().CreateHistogram(&cumulative_data);
578 collector->GetPauseHistogram().PrintConfidenceIntervals(os, 0.99, cumulative_data);
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700579 os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n"
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700580 << collector->GetName() << " freed: " << freed_objects
581 << " objects with total size " << PrettySize(freed_bytes) << "\n"
582 << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
583 << PrettySize(freed_bytes / seconds) << "/s\n";
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800584 total_duration += total_ns;
585 total_paused_time += total_pause_ns;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700586 }
587 }
588 uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
Ian Rogers1d54e732013-05-02 21:10:01 -0700589 size_t total_objects_allocated = GetObjectsAllocatedEver();
590 size_t total_bytes_allocated = GetBytesAllocatedEver();
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700591 if (total_duration != 0) {
Brian Carlstrom2d888622013-07-18 17:02:00 -0700592 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700593 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
594 os << "Mean GC size throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700595 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700596 os << "Mean GC object throughput: "
Ian Rogers1d54e732013-05-02 21:10:01 -0700597 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700598 }
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700599 os << "Total number of allocations: " << total_objects_allocated << "\n";
600 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700601 if (kMeasureAllocationTime) {
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700602 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
603 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
604 << "\n";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700605 }
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700606 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
607 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700608 os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700609}
610
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800611Heap::~Heap() {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700612 VLOG(heap) << "Starting ~Heap()";
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700613 if (kDumpGcPerformanceOnShutdown) {
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700614 DumpGcPerformanceInfo(LOG(INFO));
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700615 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700616 STLDeleteElements(&garbage_collectors_);
617 // If we don't reset then the mark stack complains in its destructor.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700618 allocation_stack_->Reset();
619 live_stack_->Reset();
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700620 STLDeleteValues(&mod_union_tables_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700621 STLDeleteElements(&continuous_spaces_);
622 STLDeleteElements(&discontinuous_spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700623 delete gc_complete_lock_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700624 VLOG(heap) << "Finished ~Heap()";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700625}
626
Ian Rogers1d54e732013-05-02 21:10:01 -0700627space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
628 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700629 for (const auto& space : continuous_spaces_) {
630 if (space->Contains(obj)) {
631 return space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700632 }
633 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700634 if (!fail_ok) {
635 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
636 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700637 return NULL;
638}
639
Ian Rogers1d54e732013-05-02 21:10:01 -0700640space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
641 bool fail_ok) const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700642 for (const auto& space : discontinuous_spaces_) {
643 if (space->Contains(obj)) {
644 return space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700645 }
646 }
647 if (!fail_ok) {
648 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
649 }
650 return NULL;
651}
652
653space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
654 space::Space* result = FindContinuousSpaceFromObject(obj, true);
655 if (result != NULL) {
656 return result;
657 }
658 return FindDiscontinuousSpaceFromObject(obj, true);
659}
660
Mathieu Chartier39e32612013-11-12 16:28:05 -0800661struct SoftReferenceArgs {
662 RootVisitor* is_marked_callback_;
663 RootVisitor* recursive_mark_callback_;
664 void* arg_;
665};
666
667mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
668 SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg);
669 // TODO: Not preserve all soft references.
670 return args->recursive_mark_callback_(obj, args->arg_);
671}
672
673// Process reference class instances and schedule finalizations.
674void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
675 RootVisitor* is_marked_callback,
676 RootVisitor* recursive_mark_object_callback, void* arg) {
677 // Unless we are in the zygote or required to clear soft references with white references,
678 // preserve some white referents.
679 if (!clear_soft && !Runtime::Current()->IsZygote()) {
680 SoftReferenceArgs soft_reference_args;
681 soft_reference_args.is_marked_callback_ = is_marked_callback;
682 soft_reference_args.recursive_mark_callback_ = recursive_mark_object_callback;
683 soft_reference_args.arg_ = arg;
684 soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
685 &soft_reference_args);
686 }
687 timings.StartSplit("ProcessReferences");
688 // Clear all remaining soft and weak references with white referents.
689 soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
690 weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
691 timings.EndSplit();
692 // Preserve all white objects with finalize methods and schedule them for finalization.
693 timings.StartSplit("EnqueueFinalizerReferences");
694 finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
695 recursive_mark_object_callback, arg);
696 timings.EndSplit();
697 timings.StartSplit("ProcessReferences");
698 // Clear all f-reachable soft and weak references with white referents.
699 soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
700 weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
701 // Clear all phantom references with white referents.
702 phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
703 // At this point all reference queues other than the cleared references should be empty.
704 DCHECK(soft_reference_queue_.IsEmpty());
705 DCHECK(weak_reference_queue_.IsEmpty());
706 DCHECK(finalizer_reference_queue_.IsEmpty());
707 DCHECK(phantom_reference_queue_.IsEmpty());
708 timings.EndSplit();
709}
710
711bool Heap::IsEnqueued(mirror::Object* ref) const {
712 // Since the references are stored as cyclic lists it means that once enqueued, the pending next
713 // will always be non-null.
714 return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr;
715}
716
717bool Heap::IsEnqueuable(const mirror::Object* ref) const {
718 DCHECK(ref != nullptr);
719 const mirror::Object* queue =
720 ref->GetFieldObject<mirror::Object*>(GetReferenceQueueOffset(), false);
721 const mirror::Object* queue_next =
722 ref->GetFieldObject<mirror::Object*>(GetReferenceQueueNextOffset(), false);
723 return queue != nullptr && queue_next == nullptr;
724}
725
726// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
727// marked, put it on the appropriate list in the heap for later processing.
728void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
729 RootVisitor mark_visitor, void* arg) {
730 DCHECK(klass != nullptr);
731 DCHECK(klass->IsReferenceClass());
732 DCHECK(obj != nullptr);
733 mirror::Object* referent = GetReferenceReferent(obj);
734 if (referent != nullptr) {
735 mirror::Object* forward_address = mark_visitor(referent, arg);
736 // Null means that the object is not currently marked.
737 if (forward_address == nullptr) {
738 Thread* self = Thread::Current();
739 // TODO: Remove these locks, and use atomic stacks for storing references?
740 // We need to check that the references haven't already been enqueued since we can end up
741 // scanning the same reference multiple times due to dirty cards.
742 if (klass->IsSoftReferenceClass()) {
743 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
744 } else if (klass->IsWeakReferenceClass()) {
745 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
746 } else if (klass->IsFinalizerReferenceClass()) {
747 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
748 } else if (klass->IsPhantomReferenceClass()) {
749 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
750 } else {
751 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
752 << klass->GetAccessFlags();
753 }
754 } else if (referent != forward_address) {
755 // Referent is already marked and we need to update it.
756 SetReferenceReferent(obj, forward_address);
757 }
758 }
759}
760
Ian Rogers1d54e732013-05-02 21:10:01 -0700761space::ImageSpace* Heap::GetImageSpace() const {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700762 for (const auto& space : continuous_spaces_) {
763 if (space->IsImageSpace()) {
764 return space->AsImageSpace();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700765 }
766 }
767 return NULL;
768}
769
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700770static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700771 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700772 if (used_bytes < chunk_size) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700773 size_t chunk_free_bytes = chunk_size - used_bytes;
774 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
775 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700776 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700777}
778
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700779void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) {
780 std::ostringstream oss;
781 int64_t total_bytes_free = GetFreeMemory();
782 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
783 << " free bytes";
784 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
785 if (!large_object_allocation && total_bytes_free >= byte_count) {
786 size_t max_contiguous_allocation = 0;
787 for (const auto& space : continuous_spaces_) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700788 if (space->IsMallocSpace()) {
789 // To allow the Walk/InspectAll() to exclusively-lock the mutator
790 // lock, temporarily release the shared access to the mutator
791 // lock here by transitioning to the suspended state.
792 Locks::mutator_lock_->AssertSharedHeld(self);
793 self->TransitionFromRunnableToSuspended(kSuspended);
794 space->AsMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
795 self->TransitionFromSuspendedToRunnable();
796 Locks::mutator_lock_->AssertSharedHeld(self);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700797 }
798 }
799 oss << "; failed due to fragmentation (largest possible contiguous allocation "
800 << max_contiguous_allocation << " bytes)";
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700801 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700802 self->ThrowOutOfMemoryError(oss.str().c_str());
803}
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700804
Mathieu Chartier590fee92013-09-13 13:46:47 -0700805void Heap::Trim() {
806 uint64_t start_ns = NanoTime();
807 // Trim the managed spaces.
808 uint64_t total_alloc_space_allocated = 0;
809 uint64_t total_alloc_space_size = 0;
810 uint64_t managed_reclaimed = 0;
811 for (const auto& space : continuous_spaces_) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700812 if (space->IsMallocSpace() && !space->IsZygoteSpace()) {
813 gc::space::MallocSpace* alloc_space = space->AsMallocSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700814 total_alloc_space_size += alloc_space->Size();
815 managed_reclaimed += alloc_space->Trim();
816 }
817 }
818 total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated() -
819 bump_pointer_space_->GetBytesAllocated();
820 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
821 static_cast<float>(total_alloc_space_size);
822 uint64_t gc_heap_end_ns = NanoTime();
823 // Trim the native heap.
824 dlmalloc_trim(0);
825 size_t native_reclaimed = 0;
826 dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
827 uint64_t end_ns = NanoTime();
828 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
829 << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
830 << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
831 << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
832 << "%.";
833}
834
835bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
836 // Note: we deliberately don't take the lock here, and mustn't test anything that would require
837 // taking the lock.
838 if (obj == nullptr) {
Elliott Hughes88c5c352012-03-15 18:49:48 -0700839 return true;
840 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700841 return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj);
842}
843
844bool Heap::IsHeapAddress(const mirror::Object* obj) const {
845 if (kMovingCollector && bump_pointer_space_->HasAddress(obj)) {
846 return true;
Elliott Hughesa2501992011-08-26 19:39:54 -0700847 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700848 // TODO: This probably doesn't work for large objects.
849 return FindSpaceFromObject(obj, true) != nullptr;
Elliott Hughesa2501992011-08-26 19:39:54 -0700850}
851
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700852bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack,
853 bool search_live_stack, bool sorted) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700854 // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700855 if (obj == nullptr || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700856 return false;
857 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700858 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
859 space::DiscontinuousSpace* d_space = NULL;
860 if (c_space != NULL) {
861 if (c_space->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700862 return true;
863 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700864 } else if (bump_pointer_space_->Contains(obj) || temp_space_->Contains(obj)) {
865 return true;
Ian Rogers1d54e732013-05-02 21:10:01 -0700866 } else {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700867 d_space = FindDiscontinuousSpaceFromObject(obj, true);
868 if (d_space != NULL) {
869 if (d_space->GetLiveObjects()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700870 return true;
871 }
872 }
873 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700874 // This is covering the allocation/live stack swapping that is done without mutators suspended.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700875 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
876 if (i > 0) {
877 NanoSleep(MsToNs(10));
Ian Rogers1d54e732013-05-02 21:10:01 -0700878 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700879 if (search_allocation_stack) {
880 if (sorted) {
881 if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
882 return true;
883 }
884 } else if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj))) {
885 return true;
886 }
887 }
888
889 if (search_live_stack) {
890 if (sorted) {
891 if (live_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
892 return true;
893 }
894 } else if (live_stack_->Contains(const_cast<mirror::Object*>(obj))) {
895 return true;
896 }
897 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700898 }
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700899 // We need to check the bitmaps again since there is a race where we mark something as live and
900 // then clear the stack containing it.
901 if (c_space != NULL) {
902 if (c_space->GetLiveBitmap()->Test(obj)) {
903 return true;
904 }
905 } else {
906 d_space = FindDiscontinuousSpaceFromObject(obj, true);
907 if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) {
908 return true;
909 }
910 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700911 return false;
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700912}
913
Ian Rogers04d7aa92013-03-16 14:29:17 -0700914void Heap::VerifyObjectImpl(const mirror::Object* obj) {
915 if (Thread::Current() == NULL ||
jeffhao25045522012-03-13 19:34:37 -0700916 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700917 return;
918 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700919 VerifyObjectBody(obj);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700920}
Elliott Hughes92b3b562011-09-08 16:32:26 -0700921
Mathieu Chartier590fee92013-09-13 13:46:47 -0700922void Heap::DumpSpaces(std::ostream& stream) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700923 for (const auto& space : continuous_spaces_) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700924 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
925 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700926 stream << space << " " << *space << "\n";
927 if (live_bitmap != nullptr) {
928 stream << live_bitmap << " " << *live_bitmap << "\n";
929 }
930 if (mark_bitmap != nullptr) {
931 stream << mark_bitmap << " " << *mark_bitmap << "\n";
932 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700933 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700934 for (const auto& space : discontinuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700935 stream << space << " " << *space << "\n";
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700936 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700937}
938
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800939void Heap::VerifyObjectBody(const mirror::Object* obj) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700940 CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
941 // Ignore early dawn of the universe verifications.
942 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.load()) < 10 * KB)) {
Ian Rogers62d6c772013-02-27 08:32:07 -0800943 return;
944 }
945 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
946 mirror::Object::ClassOffset().Int32Value();
947 const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
948 if (UNLIKELY(c == NULL)) {
949 LOG(FATAL) << "Null class in object: " << obj;
950 } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) {
951 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
952 }
953 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
954 // Note: we don't use the accessors here as they have internal sanity checks
955 // that we don't want to run
956 raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
957 const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
958 raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
959 const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
960 CHECK_EQ(c_c, c_c_c);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700961
Mathieu Chartier590fee92013-09-13 13:46:47 -0700962 if (verify_object_mode_ > kVerifyAllFast) {
Ian Rogers62d6c772013-02-27 08:32:07 -0800963 // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
964 // heap_bitmap_lock_.
Ian Rogers1d54e732013-05-02 21:10:01 -0700965 if (!IsLiveObjectLocked(obj)) {
966 DumpSpaces();
967 LOG(FATAL) << "Object is dead: " << obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700968 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700969 if (!IsLiveObjectLocked(c)) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700970 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
971 }
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700972 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700973}
974
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800975void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700976 DCHECK(obj != NULL);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700977 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700978}
979
980void Heap::VerifyHeap() {
Ian Rogers50b35e22012-10-04 10:09:15 -0700981 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700982 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700983}
984
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700985void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700986 DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_));
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -0700987 num_bytes_allocated_.fetch_sub(freed_bytes);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700988
989 if (Runtime::Current()->HasStatsEnabled()) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700990 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700991 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700992 thread_stats->freed_bytes += freed_bytes;
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700993
994 // TODO: Do this concurrently.
995 RuntimeStats* global_stats = Runtime::Current()->GetStats();
996 global_stats->freed_objects += freed_objects;
997 global_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700998 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700999}
1000
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001001mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
Mathieu Chartier720ef762013-08-17 14:46:54 -07001002 size_t alloc_size, size_t* bytes_allocated) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001003 mirror::Object* ptr = nullptr;
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001004 // The allocation failed. If the GC is running, block until it completes, and then retry the
1005 // allocation.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001006 collector::GcType last_gc = WaitForGcToComplete(self);
Ian Rogers1d54e732013-05-02 21:10:01 -07001007 if (last_gc != collector::kGcTypeNone) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001008 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001009 ptr = TryToAllocate<true>(self, allocator, alloc_size, false, bytes_allocated);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001010 }
1011
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001012 // Loop through our different Gc types and try to Gc until we get enough free memory.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001013 for (collector::GcType gc_type : gc_plan_) {
1014 if (ptr != nullptr) {
1015 break;
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001016 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001017 // Attempt to run the collector, if we succeed, re-try the allocation.
1018 if (CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001019 // Did we free sufficient memory for the allocation to succeed?
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001020 ptr = TryToAllocate<true>(self, allocator, alloc_size, false, bytes_allocated);
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001021 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001022 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001023 // Allocations have failed after GCs; this is an exceptional state.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001024 if (ptr == nullptr) {
1025 // Try harder, growing the heap if necessary.
1026 ptr = TryToAllocate<true>(self, allocator, alloc_size, true, bytes_allocated);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001027 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001028 if (ptr == nullptr) {
1029 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1030 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1031 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1032 // OOME.
1033 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1034 << " allocation";
1035 // TODO: Run finalization, but this may cause more allocations to occur.
1036 // We don't need a WaitForGcToComplete here either.
1037 DCHECK(!gc_plan_.empty());
1038 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1039 ptr = TryToAllocate<true>(self, allocator, alloc_size, true, bytes_allocated);
1040 if (ptr == nullptr) {
1041 ThrowOutOfMemoryError(self, alloc_size, false);
1042 }
1043 }
1044 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001045}
1046
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001047void Heap::SetTargetHeapUtilization(float target) {
1048 DCHECK_GT(target, 0.0f); // asserted in Java code
1049 DCHECK_LT(target, 1.0f);
1050 target_utilization_ = target;
1051}
1052
Ian Rogers1d54e732013-05-02 21:10:01 -07001053size_t Heap::GetObjectsAllocated() const {
1054 size_t total = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001055 for (space::AllocSpace* space : alloc_spaces_) {
1056 total += space->GetObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001057 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001058 return total;
1059}
1060
Ian Rogers1d54e732013-05-02 21:10:01 -07001061size_t Heap::GetObjectsAllocatedEver() const {
1062 size_t total = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001063 for (space::AllocSpace* space : alloc_spaces_) {
1064 total += space->GetTotalObjectsAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001065 }
1066 return total;
1067}
1068
1069size_t Heap::GetBytesAllocatedEver() const {
1070 size_t total = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001071 for (space::AllocSpace* space : alloc_spaces_) {
1072 total += space->GetTotalBytesAllocated();
Ian Rogers1d54e732013-05-02 21:10:01 -07001073 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001074 return total;
1075}
1076
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001077class InstanceCounter {
1078 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001079 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001080 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001081 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001082 }
1083
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001084 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001085 for (size_t i = 0; i < classes_.size(); ++i) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001086 const mirror::Class* instance_class = o->GetClass();
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001087 if (use_is_assignable_from_) {
1088 if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) {
1089 ++counts_[i];
1090 }
1091 } else {
1092 if (instance_class == classes_[i]) {
1093 ++counts_[i];
1094 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001095 }
1096 }
1097 }
1098
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001099 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001100 const std::vector<mirror::Class*>& classes_;
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001101 bool use_is_assignable_from_;
1102 uint64_t* const counts_;
1103
1104 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001105};
1106
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001107void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -08001108 uint64_t* counts) {
1109 // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1110 // is empty, so the live bitmap is the only place we need to look.
1111 Thread* self = Thread::Current();
1112 self->TransitionFromRunnableToSuspended(kNative);
1113 CollectGarbage(false);
1114 self->TransitionFromSuspendedToRunnable();
1115
1116 InstanceCounter counter(classes, use_is_assignable_from, counts);
1117 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001118 GetLiveBitmap()->Visit(counter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001119}
1120
Elliott Hughes3b78c942013-01-15 17:35:41 -08001121class InstanceCollector {
1122 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001123 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
Elliott Hughes3b78c942013-01-15 17:35:41 -08001124 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1125 : class_(c), max_count_(max_count), instances_(instances) {
1126 }
1127
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001128 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1129 const mirror::Class* instance_class = o->GetClass();
Elliott Hughes3b78c942013-01-15 17:35:41 -08001130 if (instance_class == class_) {
1131 if (max_count_ == 0 || instances_.size() < max_count_) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001132 instances_.push_back(const_cast<mirror::Object*>(o));
Elliott Hughes3b78c942013-01-15 17:35:41 -08001133 }
1134 }
1135 }
1136
1137 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001138 mirror::Class* class_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001139 uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001140 std::vector<mirror::Object*>& instances_;
Elliott Hughes3b78c942013-01-15 17:35:41 -08001141
1142 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1143};
1144
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001145void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1146 std::vector<mirror::Object*>& instances) {
Elliott Hughes3b78c942013-01-15 17:35:41 -08001147 // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1148 // is empty, so the live bitmap is the only place we need to look.
1149 Thread* self = Thread::Current();
1150 self->TransitionFromRunnableToSuspended(kNative);
1151 CollectGarbage(false);
1152 self->TransitionFromSuspendedToRunnable();
1153
1154 InstanceCollector collector(c, max_count, instances);
1155 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1156 GetLiveBitmap()->Visit(collector);
1157}
1158
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001159class ReferringObjectsFinder {
1160 public:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001161 ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1162 std::vector<mirror::Object*>& referring_objects)
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001163 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1164 : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1165 }
1166
1167 // For bitmap Visit.
1168 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1169 // annotalysis on visitors.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001170 void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1171 collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(o), *this, true);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001172 }
1173
1174 // For MarkSweep::VisitObjectReferences.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001175 void operator()(mirror::Object* referrer, mirror::Object* object,
Brian Carlstromdf629502013-07-17 22:39:56 -07001176 const MemberOffset&, bool) const {
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001177 if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001178 referring_objects_.push_back(referrer);
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001179 }
1180 }
1181
1182 private:
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001183 mirror::Object* object_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001184 uint32_t max_count_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001185 std::vector<mirror::Object*>& referring_objects_;
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001186
1187 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1188};
1189
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001190void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1191 std::vector<mirror::Object*>& referring_objects) {
Elliott Hughes0cbaff52013-01-16 15:28:01 -08001192 // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1193 // is empty, so the live bitmap is the only place we need to look.
1194 Thread* self = Thread::Current();
1195 self->TransitionFromRunnableToSuspended(kNative);
1196 CollectGarbage(false);
1197 self->TransitionFromSuspendedToRunnable();
1198
1199 ReferringObjectsFinder finder(o, max_count, referring_objects);
1200 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1201 GetLiveBitmap()->Visit(finder);
1202}
1203
Ian Rogers30fab402012-01-23 15:43:46 -08001204void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001205 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1206 // last GC will not have necessarily been cleared.
Ian Rogers1d54e732013-05-02 21:10:01 -07001207 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001208}
1209
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001210void Heap::PreZygoteFork() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001211 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
Ian Rogers81d425b2012-09-27 16:03:43 -07001212 Thread* self = Thread::Current();
1213 MutexLock mu(self, zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001214 // Try to see if we have any Zygote spaces.
1215 if (have_zygote_space_) {
1216 return;
1217 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001218 VLOG(heap) << "Starting PreZygoteFork";
1219 // Do this before acquiring the zygote creation lock so that we don't get lock order violations.
1220 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
1221 // Trim the pages at the end of the non moving space.
1222 non_moving_space_->Trim();
1223 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Mathieu Chartier50482232013-11-21 11:48:14 -08001224 // Change the allocator to the post zygote one.
1225 ChangeAllocator(kDefaultPostZygoteAllocator);
1226 // TODO: Delete bump_pointer_space_ and temp_pointer_space_?
Mathieu Chartier590fee92013-09-13 13:46:47 -07001227 if (semi_space_collector_ != nullptr) {
Mathieu Chartier50482232013-11-21 11:48:14 -08001228 // Create a new bump pointer space which we will compact into.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001229 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1230 non_moving_space_->Limit());
1231 // Compact the bump pointer space to a new zygote bump pointer space.
1232 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1233 Compact(&target_space, bump_pointer_space_);
1234 CHECK_EQ(temp_space_->GetBytesAllocated(), 0U);
1235 total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
1236 total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
1237 // Update the end and write out image.
1238 non_moving_space_->SetEnd(target_space.End());
1239 non_moving_space_->SetLimit(target_space.Limit());
1240 accounting::SpaceBitmap* bitmap = non_moving_space_->GetLiveBitmap();
1241 // Record the allocations in the bitmap.
1242 VLOG(heap) << "Recording zygote allocations";
1243 mirror::Object* obj = reinterpret_cast<mirror::Object*>(target_space.Begin());
1244 const mirror::Object* end = reinterpret_cast<const mirror::Object*>(target_space.End());
1245 while (obj < end) {
1246 bitmap->Set(obj);
1247 obj = space::BumpPointerSpace::GetNextObject(obj);
1248 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001249 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001250 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
1251 // the remaining available heap memory.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001252 space::MallocSpace* zygote_space = non_moving_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001253 non_moving_space_ = zygote_space->CreateZygoteSpace("alloc space");
1254 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
Ian Rogers1d54e732013-05-02 21:10:01 -07001255 // Change the GC retention policy of the zygote space to only collect when full.
1256 zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001257 AddSpace(non_moving_space_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001258 have_zygote_space_ = true;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001259 zygote_space->InvalidateAllocator();
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001260 // Create the zygote space mod union table.
1261 accounting::ModUnionTable* mod_union_table =
1262 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
1263 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
1264 AddModUnionTable(mod_union_table);
Ian Rogers5f5a2c02012-09-17 10:52:08 -07001265 // Reset the cumulative loggers since we now have a few additional timing phases.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001266 for (const auto& collector : garbage_collectors_) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001267 collector->ResetCumulativeStatistics();
Mathieu Chartier0325e622012-09-05 14:22:51 -07001268 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001269}
1270
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001271void Heap::FlushAllocStack() {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001272 MarkAllocStack(non_moving_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001273 allocation_stack_.get());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001274 allocation_stack_->Reset();
1275}
1276
Ian Rogers1d54e732013-05-02 21:10:01 -07001277void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
1278 accounting::ObjectStack* stack) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001279 mirror::Object** limit = stack->End();
1280 for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
1281 const mirror::Object* obj = *it;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001282 DCHECK(obj != NULL);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001283 if (LIKELY(bitmap->HasAddress(obj))) {
1284 bitmap->Set(obj);
1285 } else {
1286 large_objects->Set(obj);
1287 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001288 }
1289}
1290
Mathieu Chartier590fee92013-09-13 13:46:47 -07001291const char* PrettyCause(GcCause cause) {
1292 switch (cause) {
1293 case kGcCauseForAlloc: return "Alloc";
1294 case kGcCauseBackground: return "Background";
1295 case kGcCauseExplicit: return "Explicit";
1296 default:
1297 LOG(FATAL) << "Unreachable";
1298 }
1299 return "";
1300}
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001301
Mathieu Chartier590fee92013-09-13 13:46:47 -07001302void Heap::SwapSemiSpaces() {
1303 // Swap the spaces so we allocate into the space which we just evacuated.
1304 std::swap(bump_pointer_space_, temp_space_);
1305}
1306
1307void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
1308 space::ContinuousMemMapAllocSpace* source_space) {
1309 CHECK(kMovingCollector);
Mathieu Chartier50482232013-11-21 11:48:14 -08001310 CHECK_NE(target_space, source_space) << "In-place compaction currently unsupported";
Mathieu Chartier590fee92013-09-13 13:46:47 -07001311 if (target_space != source_space) {
1312 semi_space_collector_->SetFromSpace(source_space);
1313 semi_space_collector_->SetToSpace(target_space);
1314 semi_space_collector_->Run(false);
1315 }
1316}
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001317
Ian Rogers1d54e732013-05-02 21:10:01 -07001318collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
1319 bool clear_soft_references) {
Ian Rogers81d425b2012-09-27 16:03:43 -07001320 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001321 Runtime* runtime = Runtime::Current();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001322 // If the heap can't run the GC, silently fail and return that no GC was run.
1323 switch (gc_type) {
1324 case collector::kGcTypeSticky: {
1325 const size_t alloc_space_size = non_moving_space_->Size();
1326 if (alloc_space_size < min_alloc_space_size_for_sticky_gc_ ||
1327 non_moving_space_->Capacity() - alloc_space_size < min_remaining_space_for_sticky_gc_) {
1328 return collector::kGcTypeNone;
1329 }
1330 break;
1331 }
1332 case collector::kGcTypePartial: {
1333 if (!have_zygote_space_) {
1334 return collector::kGcTypeNone;
1335 }
1336 break;
1337 }
1338 default: {
1339 // Other GC types don't have any special cases which makes them not runnable. The main case
1340 // here is full GC.
1341 }
1342 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08001343 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Ian Rogers81d425b2012-09-27 16:03:43 -07001344 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers120f1c72012-09-28 17:17:10 -07001345 if (self->IsHandlingStackOverflow()) {
1346 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
1347 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001348 {
1349 gc_complete_lock_->AssertNotHeld(self);
1350 MutexLock mu(self, *gc_complete_lock_);
1351 // Ensure there is only one GC at a time.
1352 WaitForGcToCompleteLocked(self);
1353 // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
1354 // Not doing at the moment to ensure soft references are cleared.
1355 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
1356 if (gc_disable_count_ != 0) {
1357 LOG(WARNING) << "Skipping GC due to disable count " << gc_disable_count_;
1358 return collector::kGcTypeNone;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001359 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001360 is_gc_running_ = true;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001361 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001362 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
1363 ++runtime->GetStats()->gc_for_alloc_count;
1364 ++self->GetStats()->gc_for_alloc_count;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001365 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001366 uint64_t gc_start_time_ns = NanoTime();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001367 uint64_t gc_start_size = GetBytesAllocated();
1368 // Approximate allocation rate in bytes / second.
Ian Rogers1d54e732013-05-02 21:10:01 -07001369 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001370 // Back to back GCs can cause 0 ms of wait time in between GC invocations.
1371 if (LIKELY(ms_delta != 0)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001372 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
Mathieu Chartier65db8802012-11-20 12:36:46 -08001373 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
1374 }
1375
Ian Rogers1d54e732013-05-02 21:10:01 -07001376 DCHECK_LT(gc_type, collector::kGcTypeMax);
1377 DCHECK_NE(gc_type, collector::kGcTypeNone);
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001378
Mathieu Chartier590fee92013-09-13 13:46:47 -07001379 collector::GarbageCollector* collector = nullptr;
Mathieu Chartier50482232013-11-21 11:48:14 -08001380 // TODO: Clean this up.
1381 if (current_allocator_ == kAllocatorTypeBumpPointer) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001382 gc_type = semi_space_collector_->GetGcType();
1383 CHECK_EQ(temp_space_->GetObjectsAllocated(), 0U);
1384 semi_space_collector_->SetFromSpace(bump_pointer_space_);
1385 semi_space_collector_->SetToSpace(temp_space_);
1386 mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
Mathieu Chartier50482232013-11-21 11:48:14 -08001387 collector = semi_space_collector_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001388 gc_type = collector::kGcTypeFull;
Mathieu Chartier50482232013-11-21 11:48:14 -08001389 } else if (current_allocator_ == kAllocatorTypeFreeList) {
1390 for (const auto& cur_collector : garbage_collectors_) {
1391 if (cur_collector->IsConcurrent() == concurrent_gc_ &&
1392 cur_collector->GetGcType() == gc_type) {
1393 collector = cur_collector;
1394 break;
1395 }
1396 }
1397 } else {
1398 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001399 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001400 CHECK(collector != NULL)
1401 << "Could not find garbage collector with concurrent=" << concurrent_gc_
1402 << " and type=" << gc_type;
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001403
Mathieu Chartier590fee92013-09-13 13:46:47 -07001404 ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
1405
1406 collector->Run(clear_soft_references);
Ian Rogers1d54e732013-05-02 21:10:01 -07001407 total_objects_freed_ever_ += collector->GetFreedObjects();
1408 total_bytes_freed_ever_ += collector->GetFreedBytes();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001409
Mathieu Chartier39e32612013-11-12 16:28:05 -08001410 // Enqueue cleared references.
1411 EnqueueClearedReferences();
1412
Mathieu Chartier590fee92013-09-13 13:46:47 -07001413 // Grow the heap so that we know when to perform the next GC.
1414 GrowForUtilization(gc_type, collector->GetDurationNs());
1415
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001416 if (care_about_pause_times_) {
1417 const size_t duration = collector->GetDurationNs();
1418 std::vector<uint64_t> pauses = collector->GetPauseTimes();
1419 // GC for alloc pauses the allocating thread, so consider it as a pause.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001420 bool was_slow = duration > long_gc_log_threshold_ ||
1421 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001422 if (!was_slow) {
1423 for (uint64_t pause : pauses) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001424 was_slow = was_slow || pause > long_pause_log_threshold_;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001425 }
1426 }
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001427 if (was_slow) {
1428 const size_t percent_free = GetPercentFree();
1429 const size_t current_heap_size = GetBytesAllocated();
1430 const size_t total_memory = GetTotalMemory();
1431 std::ostringstream pause_string;
1432 for (size_t i = 0; i < pauses.size(); ++i) {
1433 pause_string << PrettyDuration((pauses[i] / 1000) * 1000)
1434 << ((i != pauses.size() - 1) ? ", " : "");
1435 }
1436 LOG(INFO) << gc_cause << " " << collector->GetName()
1437 << " GC freed " << collector->GetFreedObjects() << "("
1438 << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
1439 << collector->GetFreedLargeObjects() << "("
1440 << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
1441 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
1442 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
1443 << " total " << PrettyDuration((duration / 1000) * 1000);
1444 if (VLOG_IS_ON(heap)) {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001445 LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings());
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001446 }
1447 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001448 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001449
Ian Rogers15bf2d32012-08-28 17:33:04 -07001450 {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001451 MutexLock mu(self, *gc_complete_lock_);
1452 is_gc_running_ = false;
1453 last_gc_type_ = gc_type;
1454 // Wake anyone who may have been waiting for the GC to complete.
1455 gc_complete_cond_->Broadcast(self);
Ian Rogers15bf2d32012-08-28 17:33:04 -07001456 }
Mathieu Chartier0a9dc052013-07-25 11:01:28 -07001457
Mathieu Chartier752a0e62013-06-27 11:03:27 -07001458 ATRACE_END();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001459
1460 // Inform DDMS that a GC completed.
Ian Rogers15bf2d32012-08-28 17:33:04 -07001461 Dbg::GcDidFinish();
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001462 return gc_type;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001463}
Mathieu Chartiera6399032012-06-11 18:49:50 -07001464
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001465static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001466 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001467 if (root == obj) {
1468 LOG(INFO) << "Object " << obj << " is a root";
1469 }
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001470 return root;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001471}
1472
1473class ScanVisitor {
1474 public:
Brian Carlstromdf629502013-07-17 22:39:56 -07001475 void operator()(const mirror::Object* obj) const {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001476 LOG(ERROR) << "Would have rescanned object " << obj;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001477 }
1478};
1479
Ian Rogers1d54e732013-05-02 21:10:01 -07001480// Verify a reference from an object.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001481class VerifyReferenceVisitor {
1482 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001483 explicit VerifyReferenceVisitor(Heap* heap)
Ian Rogers1d54e732013-05-02 21:10:01 -07001484 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001485 : heap_(heap), failed_(false) {}
Ian Rogers1d54e732013-05-02 21:10:01 -07001486
1487 bool Failed() const {
1488 return failed_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001489 }
1490
1491 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
Ian Rogers1d54e732013-05-02 21:10:01 -07001492 // analysis on visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001493 void operator()(const mirror::Object* obj, const mirror::Object* ref,
1494 const MemberOffset& offset, bool /* is_static */) const
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001495 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001496 // Verify that the reference is live.
Ian Rogers1d54e732013-05-02 21:10:01 -07001497 if (UNLIKELY(ref != NULL && !IsLive(ref))) {
1498 accounting::CardTable* card_table = heap_->GetCardTable();
1499 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
1500 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001501 if (!failed_) {
1502 // Print message on only on first failure to prevent spam.
1503 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
1504 failed_ = true;
1505 }
1506 if (obj != nullptr) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001507 byte* card_addr = card_table->CardFromAddr(obj);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001508 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
1509 << offset << "\n card value = " << static_cast<int>(*card_addr);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001510 if (heap_->IsValidObjectAddress(obj->GetClass())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001511 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
1512 } else {
1513 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
1514 }
1515
1516 // Attmept to find the class inside of the recently freed objects.
1517 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001518 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
1519 space::MallocSpace* space = ref_space->AsMallocSpace();
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001520 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
1521 if (ref_class != nullptr) {
1522 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
1523 << PrettyClass(ref_class);
1524 } else {
1525 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
1526 }
1527 }
1528
Mathieu Chartier590fee92013-09-13 13:46:47 -07001529 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001530 ref->GetClass()->IsClass()) {
1531 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
1532 } else {
1533 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
1534 << ") is not a valid heap address";
1535 }
1536
Ian Rogers1d54e732013-05-02 21:10:01 -07001537 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
1538 void* cover_begin = card_table->AddrFromCard(card_addr);
1539 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
1540 accounting::CardTable::kCardSize);
1541 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
1542 << "-" << cover_end;
1543 accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001544
Ian Rogers1d54e732013-05-02 21:10:01 -07001545 // Print out how the object is live.
1546 if (bitmap != NULL && bitmap->Test(obj)) {
1547 LOG(ERROR) << "Object " << obj << " found in live bitmap";
1548 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001549 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001550 LOG(ERROR) << "Object " << obj << " found in allocation stack";
1551 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001552 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001553 LOG(ERROR) << "Object " << obj << " found in live stack";
1554 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001555 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
1556 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
1557 }
1558 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
1559 LOG(ERROR) << "Ref " << ref << " found in live stack";
1560 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001561 // Attempt to see if the card table missed the reference.
1562 ScanVisitor scan_visitor;
1563 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
1564 card_table->Scan(bitmap, byte_cover_begin,
Mathieu Chartier184e3222013-08-03 14:02:57 -07001565 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
Ian Rogers1d54e732013-05-02 21:10:01 -07001566
1567 // Search to see if any of the roots reference our object.
1568 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
1569 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
1570
1571 // Search to see if any of the roots reference our reference.
1572 arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
1573 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
1574 } else {
1575 LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001576 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001577 }
1578 }
1579
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001580 bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001581 return heap_->IsLiveObjectLocked(obj, true, false, true);
Ian Rogers1d54e732013-05-02 21:10:01 -07001582 }
1583
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001584 static mirror::Object* VerifyRoots(mirror::Object* root, void* arg) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001585 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001586 (*visitor)(nullptr, root, MemberOffset(0), true);
1587 return root;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001588 }
1589
1590 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07001591 Heap* const heap_;
1592 mutable bool failed_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001593};
1594
Ian Rogers1d54e732013-05-02 21:10:01 -07001595// Verify all references within an object, for use with HeapBitmap::Visit.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001596class VerifyObjectVisitor {
1597 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001598 explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001599
Mathieu Chartier590fee92013-09-13 13:46:47 -07001600 void operator()(mirror::Object* obj) const
Ian Rogersb726dcb2012-09-05 08:57:23 -07001601 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001602 // Note: we are verifying the references in obj but not obj itself, this is because obj must
1603 // be live or else how did we find it in the live bitmap?
1604 VerifyReferenceVisitor visitor(heap_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001605 // The class doesn't count as a reference but we should verify it anyways.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001606 collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
1607 if (obj->GetClass()->IsReferenceClass()) {
1608 visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false);
1609 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001610 failed_ = failed_ || visitor.Failed();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001611 }
1612
Mathieu Chartier590fee92013-09-13 13:46:47 -07001613 static void VisitCallback(mirror::Object* obj, void* arg)
1614 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1615 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
1616 visitor->operator()(obj);
1617 }
1618
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001619 bool Failed() const {
1620 return failed_;
1621 }
1622
1623 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07001624 Heap* const heap_;
1625 mutable bool failed_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001626};
1627
1628// Must do this with mutators suspended since we are directly accessing the allocation stacks.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001629bool Heap::VerifyHeapReferences() {
Ian Rogers81d425b2012-09-27 16:03:43 -07001630 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001631 // Lets sort our allocation stacks so that we can efficiently binary search them.
Ian Rogers1d54e732013-05-02 21:10:01 -07001632 allocation_stack_->Sort();
1633 live_stack_->Sort();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001634 VerifyObjectVisitor visitor(this);
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001635 // Verify objects in the allocation stack since these will be objects which were:
1636 // 1. Allocated prior to the GC (pre GC verification).
1637 // 2. Allocated during the GC (pre sweep GC verification).
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001638 // We don't want to verify the objects in the live stack since they themselves may be
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001639 // pointing to dead objects if they are not reachable.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001640 VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
1641 // Verify the roots:
1642 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001643 if (visitor.Failed()) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001644 // Dump mod-union tables.
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001645 for (const auto& table_pair : mod_union_tables_) {
1646 accounting::ModUnionTable* mod_union_table = table_pair.second;
1647 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
1648 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001649 DumpSpaces();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001650 return false;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001651 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001652 return true;
1653}
1654
1655class VerifyReferenceCardVisitor {
1656 public:
1657 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
1658 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
1659 Locks::heap_bitmap_lock_)
Ian Rogers1d54e732013-05-02 21:10:01 -07001660 : heap_(heap), failed_(failed) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001661 }
1662
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001663 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1664 // annotalysis on visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001665 void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset,
1666 bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001667 // Filter out class references since changing an object's class does not mark the card as dirty.
1668 // Also handles large objects, since the only reference they hold is a class reference.
1669 if (ref != NULL && !ref->IsClass()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001670 accounting::CardTable* card_table = heap_->GetCardTable();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001671 // If the object is not dirty and it is referencing something in the live stack other than
1672 // class, then it must be on a dirty card.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001673 if (!card_table->AddrIsInCardTable(obj)) {
1674 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
1675 *failed_ = true;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001676 } else if (!card_table->IsDirty(obj)) {
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001677 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
1678 // kCardDirty - 1 if it didnt get touched since we aged it.
Ian Rogers1d54e732013-05-02 21:10:01 -07001679 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
Mathieu Chartierf082d3c2013-07-29 17:04:07 -07001680 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) {
1681 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001682 LOG(ERROR) << "Object " << obj << " found in live stack";
1683 }
1684 if (heap_->GetLiveBitmap()->Test(obj)) {
1685 LOG(ERROR) << "Object " << obj << " found in live bitmap";
1686 }
1687 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
1688 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
1689
1690 // Print which field of the object is dead.
1691 if (!obj->IsObjectArray()) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001692 const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001693 CHECK(klass != NULL);
Brian Carlstromea46f952013-07-30 01:26:50 -07001694 const mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
1695 : klass->GetIFields();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001696 CHECK(fields != NULL);
1697 for (int32_t i = 0; i < fields->GetLength(); ++i) {
Brian Carlstromea46f952013-07-30 01:26:50 -07001698 const mirror::ArtField* cur = fields->Get(i);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001699 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1700 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
1701 << PrettyField(cur);
1702 break;
1703 }
1704 }
1705 } else {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001706 const mirror::ObjectArray<mirror::Object>* object_array =
1707 obj->AsObjectArray<mirror::Object>();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001708 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
1709 if (object_array->Get(i) == ref) {
1710 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
1711 }
1712 }
1713 }
1714
1715 *failed_ = true;
1716 }
1717 }
1718 }
1719 }
1720
1721 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07001722 Heap* const heap_;
1723 bool* const failed_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001724};
1725
1726class VerifyLiveStackReferences {
1727 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001728 explicit VerifyLiveStackReferences(Heap* heap)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001729 : heap_(heap),
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001730 failed_(false) {}
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001731
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001732 void operator()(mirror::Object* obj) const
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001733 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1734 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
Mathieu Chartier590fee92013-09-13 13:46:47 -07001735 collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001736 }
1737
1738 bool Failed() const {
1739 return failed_;
1740 }
1741
1742 private:
Ian Rogers1d54e732013-05-02 21:10:01 -07001743 Heap* const heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001744 bool failed_;
1745};
1746
1747bool Heap::VerifyMissingCardMarks() {
Ian Rogers81d425b2012-09-27 16:03:43 -07001748 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001749
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001750 // We need to sort the live stack since we binary search it.
Ian Rogers1d54e732013-05-02 21:10:01 -07001751 live_stack_->Sort();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001752 VerifyLiveStackReferences visitor(this);
1753 GetLiveBitmap()->Visit(visitor);
1754
1755 // We can verify objects in the live stack since none of these should reference dead objects.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001756 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001757 visitor(*it);
1758 }
1759
1760 if (visitor.Failed()) {
1761 DumpSpaces();
1762 return false;
1763 }
1764 return true;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001765}
1766
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001767void Heap::SwapStacks() {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001768 allocation_stack_.swap(live_stack_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001769}
1770
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001771accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
1772 auto it = mod_union_tables_.find(space);
1773 if (it == mod_union_tables_.end()) {
1774 return nullptr;
1775 }
1776 return it->second;
1777}
1778
Ian Rogers5fe9af72013-11-14 00:17:20 -08001779void Heap::ProcessCards(TimingLogger& timings) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001780 // Clear cards and keep track of cards cleared in the mod-union table.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001781 for (const auto& space : continuous_spaces_) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001782 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
1783 if (table != nullptr) {
1784 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
1785 "ImageModUnionClearCards";
Ian Rogers5fe9af72013-11-14 00:17:20 -08001786 TimingLogger::ScopedSplit split(name, &timings);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001787 table->ClearCards();
Mathieu Chartier590fee92013-09-13 13:46:47 -07001788 } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001789 TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001790 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
1791 // were dirty before the GC started.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001792 // TODO: Don't need to use atomic.
1793 // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
1794 // roots and then we scan / update mod union tables after. We will always scan either card.//
1795 // If we end up with the non aged card, we scan it it in the pause.
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001796 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001797 }
1798 }
1799}
1800
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001801static mirror::Object* IdentityCallback(mirror::Object* obj, void*) {
1802 return obj;
1803}
1804
Ian Rogers1d54e732013-05-02 21:10:01 -07001805void Heap::PreGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001806 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1807 Thread* self = Thread::Current();
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001808
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001809 if (verify_pre_gc_heap_) {
1810 thread_list->SuspendAll();
1811 {
1812 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1813 if (!VerifyHeapReferences()) {
1814 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed";
1815 }
1816 }
1817 thread_list->ResumeAll();
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001818 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001819
1820 // Check that all objects which reference things in the live stack are on dirty cards.
1821 if (verify_missing_card_marks_) {
1822 thread_list->SuspendAll();
1823 {
1824 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1825 SwapStacks();
1826 // Sort the live stack so that we can quickly binary search it later.
1827 if (!VerifyMissingCardMarks()) {
1828 LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
1829 }
1830 SwapStacks();
1831 }
1832 thread_list->ResumeAll();
1833 }
1834
1835 if (verify_mod_union_table_) {
1836 thread_list->SuspendAll();
1837 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001838 for (const auto& table_pair : mod_union_tables_) {
1839 accounting::ModUnionTable* mod_union_table = table_pair.second;
1840 mod_union_table->UpdateAndMarkReferences(IdentityCallback, nullptr);
1841 mod_union_table->Verify();
1842 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001843 thread_list->ResumeAll();
1844 }
Mathieu Chartier4da7f2f2012-11-13 12:51:01 -08001845}
1846
Ian Rogers1d54e732013-05-02 21:10:01 -07001847void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001848 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
1849 // reachable objects.
1850 if (verify_post_gc_heap_) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001851 Thread* self = Thread::Current();
1852 CHECK_NE(self->GetState(), kRunnable);
Ian Rogers1d54e732013-05-02 21:10:01 -07001853 {
1854 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1855 // Swapping bound bitmaps does nothing.
1856 gc->SwapBitmaps();
1857 if (!VerifyHeapReferences()) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001858 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed";
Ian Rogers1d54e732013-05-02 21:10:01 -07001859 }
1860 gc->SwapBitmaps();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001861 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001862 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001863}
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001864
Ian Rogers1d54e732013-05-02 21:10:01 -07001865void Heap::PostGcVerification(collector::GarbageCollector* gc) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001866 if (verify_system_weaks_) {
Anwar Ghuloum67f99412013-08-12 14:19:48 -07001867 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001868 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001869 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001870 mark_sweep->VerifySystemWeaks();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001871 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001872}
1873
Mathieu Chartier590fee92013-09-13 13:46:47 -07001874collector::GcType Heap::WaitForGcToComplete(Thread* self) {
1875 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1876 MutexLock mu(self, *gc_complete_lock_);
1877 return WaitForGcToCompleteLocked(self);
1878}
1879
1880collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001881 collector::GcType last_gc_type = collector::kGcTypeNone;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001882 uint64_t wait_start = NanoTime();
1883 while (is_gc_running_) {
1884 ATRACE_BEGIN("GC: Wait For Completion");
1885 // We must wait, change thread state then sleep on gc_complete_cond_;
1886 gc_complete_cond_->Wait(self);
1887 last_gc_type = last_gc_type_;
Mathieu Chartier752a0e62013-06-27 11:03:27 -07001888 ATRACE_END();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001889 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001890 uint64_t wait_time = NanoTime() - wait_start;
1891 total_wait_time_ += wait_time;
1892 if (wait_time > long_pause_log_threshold_) {
1893 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time);
1894 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001895 return last_gc_type;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001896}
1897
Elliott Hughesc967f782012-04-16 10:23:15 -07001898void Heap::DumpForSigQuit(std::ostream& os) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001899 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001900 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
Elliott Hughes8b788fe2013-04-17 15:57:01 -07001901 DumpGcPerformanceInfo(os);
Elliott Hughesc967f782012-04-16 10:23:15 -07001902}
1903
1904size_t Heap::GetPercentFree() {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001905 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory());
Elliott Hughesc967f782012-04-16 10:23:15 -07001906}
1907
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001908void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001909 if (max_allowed_footprint > GetMaxMemory()) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001910 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001911 << PrettySize(GetMaxMemory());
1912 max_allowed_footprint = GetMaxMemory();
1913 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001914 max_allowed_footprint_ = max_allowed_footprint;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001915}
1916
Mathieu Chartier590fee92013-09-13 13:46:47 -07001917bool Heap::IsMovableObject(const mirror::Object* obj) const {
1918 if (kMovingCollector) {
1919 DCHECK(!IsInTempSpace(obj));
1920 if (bump_pointer_space_->HasAddress(obj)) {
1921 return true;
1922 }
1923 }
1924 return false;
1925}
1926
1927bool Heap::IsInTempSpace(const mirror::Object* obj) const {
1928 if (temp_space_->HasAddress(obj) && !temp_space_->Contains(obj)) {
1929 return true;
1930 }
1931 return false;
1932}
1933
Mathieu Chartier987ccff2013-07-08 11:05:21 -07001934void Heap::UpdateMaxNativeFootprint() {
1935 size_t native_size = native_bytes_allocated_;
1936 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
1937 size_t target_size = native_size / GetTargetHeapUtilization();
1938 if (target_size > native_size + max_free_) {
1939 target_size = native_size + max_free_;
1940 } else if (target_size < native_size + min_free_) {
1941 target_size = native_size + min_free_;
1942 }
1943 native_footprint_gc_watermark_ = target_size;
1944 native_footprint_limit_ = 2 * target_size - native_size;
1945}
1946
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001947void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001948 // We know what our utilization is at this moment.
1949 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
Mathieu Chartier65db8802012-11-20 12:36:46 -08001950 const size_t bytes_allocated = GetBytesAllocated();
1951 last_gc_size_ = bytes_allocated;
Ian Rogers1d54e732013-05-02 21:10:01 -07001952 last_gc_time_ns_ = NanoTime();
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001953 size_t target_size;
1954 if (gc_type != collector::kGcTypeSticky) {
1955 // Grow the heap for non sticky GC.
1956 target_size = bytes_allocated / GetTargetHeapUtilization();
1957 if (target_size > bytes_allocated + max_free_) {
1958 target_size = bytes_allocated + max_free_;
1959 } else if (target_size < bytes_allocated + min_free_) {
1960 target_size = bytes_allocated + min_free_;
1961 }
Mathieu Chartier590fee92013-09-13 13:46:47 -07001962 native_need_to_run_finalization_ = true;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001963 next_gc_type_ = collector::kGcTypeSticky;
1964 } else {
1965 // Based on how close the current heap size is to the target size, decide
1966 // whether or not to do a partial or sticky GC next.
1967 if (bytes_allocated + min_free_ <= max_allowed_footprint_) {
1968 next_gc_type_ = collector::kGcTypeSticky;
1969 } else {
1970 next_gc_type_ = collector::kGcTypePartial;
1971 }
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001972 // If we have freed enough memory, shrink the heap back down.
1973 if (bytes_allocated + max_free_ < max_allowed_footprint_) {
1974 target_size = bytes_allocated + max_free_;
1975 } else {
1976 target_size = std::max(bytes_allocated, max_allowed_footprint_);
1977 }
1978 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001979 if (!ignore_max_footprint_) {
1980 SetIdealFootprint(target_size);
Mathieu Chartier50482232013-11-21 11:48:14 -08001981 if (concurrent_gc_ && AllocatorHasConcurrentGC(current_allocator_)) {
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001982 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001983 // Calculate the estimated GC duration.
1984 double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
1985 // Estimate how many remaining bytes we will have when we need to start the next GC.
1986 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
1987 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
1988 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
1989 // A never going to happen situation that from the estimated allocation rate we will exceed
1990 // the applications entire footprint with the given estimated allocation rate. Schedule
1991 // another GC straight away.
1992 concurrent_start_bytes_ = bytes_allocated;
1993 } else {
1994 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
1995 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
1996 // right away.
Mathieu Chartier50482232013-11-21 11:48:14 -08001997 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
1998 bytes_allocated);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001999 }
2000 DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_);
2001 DCHECK_LE(max_allowed_footprint_, growth_limit_);
Mathieu Chartier65db8802012-11-20 12:36:46 -08002002 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002003 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07002004}
2005
jeffhaoc1160702011-10-27 15:48:45 -07002006void Heap::ClearGrowthLimit() {
Mathieu Chartier80de7a62012-11-27 17:21:50 -08002007 growth_limit_ = capacity_;
Mathieu Chartier590fee92013-09-13 13:46:47 -07002008 non_moving_space_->ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -07002009}
2010
Elliott Hughesadb460d2011-10-05 17:02:34 -07002011void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
Mathieu Chartier50482232013-11-21 11:48:14 -08002012 MemberOffset reference_queue_offset,
2013 MemberOffset reference_queueNext_offset,
2014 MemberOffset reference_pendingNext_offset,
2015 MemberOffset finalizer_reference_zombie_offset) {
Elliott Hughesadb460d2011-10-05 17:02:34 -07002016 reference_referent_offset_ = reference_referent_offset;
2017 reference_queue_offset_ = reference_queue_offset;
2018 reference_queueNext_offset_ = reference_queueNext_offset;
2019 reference_pendingNext_offset_ = reference_pendingNext_offset;
2020 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
2021 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2022 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
2023 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
2024 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
2025 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
2026}
2027
Mathieu Chartier590fee92013-09-13 13:46:47 -07002028void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) {
2029 DCHECK(reference != NULL);
2030 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2031 reference->SetFieldObject(reference_referent_offset_, referent, true);
2032}
2033
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002034mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
Elliott Hughesadb460d2011-10-05 17:02:34 -07002035 DCHECK(reference != NULL);
2036 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002037 return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true);
Elliott Hughesadb460d2011-10-05 17:02:34 -07002038}
2039
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08002040void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002041 ScopedObjectAccess soa(self);
Jeff Hao5d917302013-02-27 17:57:33 -08002042 JValue result;
Jeff Hao5d917302013-02-27 17:57:33 -08002043 ArgArray arg_array(NULL, 0);
2044 arg_array.Append(reinterpret_cast<uint32_t>(object));
2045 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self,
Jeff Hao6474d192013-03-26 14:08:09 -07002046 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002047}
2048
Mathieu Chartier39e32612013-11-12 16:28:05 -08002049void Heap::EnqueueClearedReferences() {
2050 if (!cleared_references_.IsEmpty()) {
Ian Rogers64b6d142012-10-29 16:34:15 -07002051 // When a runtime isn't started there are no reference queues to care about so ignore.
2052 if (LIKELY(Runtime::Current()->IsStarted())) {
2053 ScopedObjectAccess soa(Thread::Current());
Jeff Hao5d917302013-02-27 17:57:33 -08002054 JValue result;
Jeff Hao5d917302013-02-27 17:57:33 -08002055 ArgArray arg_array(NULL, 0);
Mathieu Chartier39e32612013-11-12 16:28:05 -08002056 arg_array.Append(reinterpret_cast<uint32_t>(cleared_references_.GetList()));
Jeff Hao5d917302013-02-27 17:57:33 -08002057 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
Jeff Hao6474d192013-03-26 14:08:09 -07002058 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
Ian Rogers64b6d142012-10-29 16:34:15 -07002059 }
Mathieu Chartier39e32612013-11-12 16:28:05 -08002060 cleared_references_.Clear();
Elliott Hughesadb460d2011-10-05 17:02:34 -07002061 }
2062}
2063
Ian Rogers1f539342012-10-03 21:09:42 -07002064void Heap::RequestConcurrentGC(Thread* self) {
Mathieu Chartier069387a2012-06-18 12:01:01 -07002065 // Make sure that we can do a concurrent GC.
Ian Rogers120f1c72012-09-28 17:17:10 -07002066 Runtime* runtime = Runtime::Current();
Mathieu Chartier65db8802012-11-20 12:36:46 -08002067 DCHECK(concurrent_gc_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002068 if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
2069 self->IsHandlingStackOverflow()) {
Ian Rogers120f1c72012-09-28 17:17:10 -07002070 return;
2071 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002072 // We already have a request pending, no reason to start more until we update
2073 // concurrent_start_bytes_.
2074 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
Ian Rogers120f1c72012-09-28 17:17:10 -07002075 JNIEnv* env = self->GetJniEnv();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002076 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2077 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002078 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2079 WellKnownClasses::java_lang_Daemons_requestGC);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002080 CHECK(!env->ExceptionCheck());
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002081}
2082
Ian Rogers81d425b2012-09-27 16:03:43 -07002083void Heap::ConcurrentGC(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002084 if (Runtime::Current()->IsShuttingDown(self)) {
2085 return;
Mathieu Chartier2542d662012-06-21 17:14:11 -07002086 }
Mathieu Chartier65db8802012-11-20 12:36:46 -08002087 // Wait for any GCs currently running to finish.
Mathieu Chartier590fee92013-09-13 13:46:47 -07002088 if (WaitForGcToComplete(self) == collector::kGcTypeNone) {
Mathieu Chartierf9ed0d32013-11-21 16:42:47 -08002089 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
2090 // instead. E.g. can't do partial, so do full instead.
2091 if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
2092 collector::kGcTypeNone) {
2093 for (collector::GcType gc_type : gc_plan_) {
2094 // Attempt to run the collector, if we succeed, we are done.
2095 if (gc_type > next_gc_type_ &&
2096 CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
2097 break;
2098 }
2099 }
2100 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07002101 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07002102}
2103
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002104void Heap::RequestHeapTrim() {
Ian Rogers48931882013-01-22 14:35:16 -08002105 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
2106 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
2107 // a space it will hold its lock and can become a cause of jank.
2108 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
2109 // forking.
2110
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002111 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
2112 // because that only marks object heads, so a large array looks like lots of empty space. We
2113 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
2114 // to utilization (which is probably inversely proportional to how much benefit we can expect).
2115 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
2116 // not how much use we're making of those pages.
Ian Rogers48931882013-01-22 14:35:16 -08002117 uint64_t ms_time = MilliTime();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002118 // Don't bother trimming the alloc space if a heap trim occurred in the last two seconds.
2119 if (ms_time - last_trim_time_ms_ < 2 * 1000) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -07002120 return;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002121 }
Ian Rogers120f1c72012-09-28 17:17:10 -07002122
2123 Thread* self = Thread::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -07002124 Runtime* runtime = Runtime::Current();
2125 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) {
2126 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
2127 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
2128 // as we don't hold the lock while requesting the trim).
2129 return;
Ian Rogerse1d490c2012-02-03 09:09:07 -08002130 }
Ian Rogers48931882013-01-22 14:35:16 -08002131
Ian Rogers1d54e732013-05-02 21:10:01 -07002132 last_trim_time_ms_ = ms_time;
Mathieu Chartierc39e3422013-08-07 16:41:36 -07002133 ListenForProcessStateChange();
2134
2135 // Trim only if we do not currently care about pause times.
2136 if (!care_about_pause_times_) {
2137 JNIEnv* env = self->GetJniEnv();
2138 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
2139 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
2140 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2141 WellKnownClasses::java_lang_Daemons_requestHeapTrim);
2142 CHECK(!env->ExceptionCheck());
2143 }
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08002144}
2145
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002146void Heap::RevokeThreadLocalBuffers(Thread* thread) {
2147 non_moving_space_->RevokeThreadLocalBuffers(thread);
2148}
2149
2150void Heap::RevokeAllThreadLocalBuffers() {
2151 non_moving_space_->RevokeAllThreadLocalBuffers();
2152}
2153
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002154bool Heap::IsGCRequestPending() const {
2155 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
2156}
2157
Mathieu Chartier590fee92013-09-13 13:46:47 -07002158void Heap::RunFinalization(JNIEnv* env) {
2159 // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
2160 if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
2161 CHECK(WellKnownClasses::java_lang_System != nullptr);
2162 WellKnownClasses::java_lang_System_runFinalization =
2163 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
2164 CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
2165 }
2166 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
2167 WellKnownClasses::java_lang_System_runFinalization);
2168}
2169
Ian Rogers1eb512d2013-10-18 15:42:20 -07002170void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002171 Thread* self = ThreadForEnv(env);
2172 if (native_need_to_run_finalization_) {
2173 RunFinalization(env);
2174 UpdateMaxNativeFootprint();
2175 native_need_to_run_finalization_ = false;
2176 }
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002177 // Total number of native bytes allocated.
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07002178 native_bytes_allocated_.fetch_add(bytes);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002179 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002180 collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
2181 collector::kGcTypeFull;
2182
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002183 // The second watermark is higher than the gc watermark. If you hit this it means you are
2184 // allocating native objects faster than the GC can keep up with.
2185 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002186 if (WaitForGcToComplete(self) != collector::kGcTypeNone) {
2187 // Just finished a GC, attempt to run finalizers.
2188 RunFinalization(env);
2189 CHECK(!env->ExceptionCheck());
2190 }
2191 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
2192 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002193 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -07002194 RunFinalization(env);
2195 native_need_to_run_finalization_ = false;
2196 CHECK(!env->ExceptionCheck());
2197 }
2198 // We have just run finalizers, update the native watermark since it is very likely that
2199 // finalizers released native managed allocations.
2200 UpdateMaxNativeFootprint();
2201 } else if (!IsGCRequestPending()) {
Mathieu Chartier50482232013-11-21 11:48:14 -08002202 if (concurrent_gc_ && AllocatorHasConcurrentGC(current_allocator_)) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002203 RequestConcurrentGC(self);
2204 } else {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08002205 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002206 }
2207 }
2208 }
2209}
2210
Ian Rogers1eb512d2013-10-18 15:42:20 -07002211void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002212 int expected_size, new_size;
2213 do {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002214 expected_size = native_bytes_allocated_.load();
2215 new_size = expected_size - bytes;
2216 if (UNLIKELY(new_size < 0)) {
2217 ScopedObjectAccess soa(env);
2218 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
2219 StringPrintf("Attempted to free %d native bytes with only %d native bytes "
2220 "registered as allocated", bytes, expected_size).c_str());
2221 break;
2222 }
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07002223 } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size));
Mathieu Chartier987ccff2013-07-08 11:05:21 -07002224}
2225
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002226int64_t Heap::GetTotalMemory() const {
2227 int64_t ret = 0;
Mathieu Chartier02e25112013-08-14 16:14:24 -07002228 for (const auto& space : continuous_spaces_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07002229 // Currently don't include the image space.
2230 if (!space->IsImageSpace()) {
2231 ret += space->Size();
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002232 }
2233 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07002234 for (const auto& space : discontinuous_spaces_) {
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -07002235 if (space->IsLargeObjectSpace()) {
2236 ret += space->AsLargeObjectSpace()->GetBytesAllocated();
2237 }
2238 }
2239 return ret;
2240}
2241
Mathieu Chartier11409ae2013-09-23 11:49:36 -07002242void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
2243 DCHECK(mod_union_table != nullptr);
2244 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
2245}
2246
Ian Rogers1d54e732013-05-02 21:10:01 -07002247} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07002248} // namespace art