blob: bb0d11a1d79a37a1b854e37028bd09fb295c8298 [file] [log] [blame]
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro1fb86202011-06-27 17:43:13 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_HEAP_H_
18#define ART_RUNTIME_GC_HEAP_H_
Carl Shapiro1fb86202011-06-27 17:43:13 -070019
Elliott Hughesc967f782012-04-16 10:23:15 -070020#include <iosfwd>
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080021#include <string>
Mathieu Chartier31000802015-06-14 14:14:37 -070022#include <unordered_set>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
24
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070025#include "allocator_type.h"
Ian Rogersd582fa42014-11-05 23:46:43 -080026#include "arch/instruction_set.h"
Ian Rogersef7d42f2014-01-06 12:55:46 -080027#include "atomic.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010028#include "base/time_utils.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070029#include "gc/accounting/atomic_stack.h"
30#include "gc/accounting/card_table.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080031#include "gc/accounting/read_barrier_table.h"
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080032#include "gc/gc_cause.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070033#include "gc/collector/gc_type.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080034#include "gc/collector_type.h"
Mathieu Chartier2dbe6272014-09-16 10:43:23 -070035#include "gc/space/large_object_space.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070036#include "globals.h"
Mathieu Chartier83c8ee02014-01-28 14:50:23 -080037#include "object_callbacks.h"
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070038#include "offsets.h"
Mathieu Chartierf8cb1782016-03-18 18:45:41 -070039#include "process_state.h"
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070040#include "safe_map.h"
Mathieu Chartier4e305412014-02-19 10:54:44 -080041#include "verify_object.h"
Carl Shapiro1fb86202011-06-27 17:43:13 -070042
43namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070044
Ian Rogers81d425b2012-09-27 16:03:43 -070045class ConditionVariable;
Ian Rogers81d425b2012-09-27 16:03:43 -070046class Mutex;
Ian Rogers40e3bac2012-11-20 00:09:14 -080047class StackVisitor;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070048class Thread;
Mathieu Chartier3cf22532015-07-09 15:15:09 -070049class ThreadPool;
Mathieu Chartier357e9be2012-08-01 11:00:14 -070050class TimingLogger;
Carl Shapiro69759ea2011-07-21 18:13:35 -070051
Ian Rogers1d54e732013-05-02 21:10:01 -070052namespace mirror {
53 class Class;
54 class Object;
55} // namespace mirror
56
57namespace gc {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070058
Man Cao8c2ff642015-05-27 17:25:30 -070059class AllocRecordObjectMap;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070060class ReferenceProcessor;
Mathieu Chartiera5eae692014-12-17 17:56:03 -080061class TaskProcessor;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070062
Ian Rogers1d54e732013-05-02 21:10:01 -070063namespace accounting {
64 class HeapBitmap;
65 class ModUnionTable;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080066 class RememberedSet;
Ian Rogers1d54e732013-05-02 21:10:01 -070067} // namespace accounting
68
69namespace collector {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070070 class ConcurrentCopying;
Ian Rogers1d54e732013-05-02 21:10:01 -070071 class GarbageCollector;
Mathieu Chartier52e4b432014-06-10 11:22:31 -070072 class MarkCompact;
Ian Rogers1d54e732013-05-02 21:10:01 -070073 class MarkSweep;
Mathieu Chartier590fee92013-09-13 13:46:47 -070074 class SemiSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070075} // namespace collector
76
Zuo Wangf37a88b2014-07-10 04:26:41 -070077namespace allocator {
78 class RosAlloc;
79} // namespace allocator
80
Ian Rogers1d54e732013-05-02 21:10:01 -070081namespace space {
82 class AllocSpace;
Mathieu Chartier590fee92013-09-13 13:46:47 -070083 class BumpPointerSpace;
Mathieu Chartiere4cab172014-08-19 18:24:04 -070084 class ContinuousMemMapAllocSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070085 class DiscontinuousSpace;
86 class DlMallocSpace;
87 class ImageSpace;
88 class LargeObjectSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070089 class MallocSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080090 class RegionSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070091 class RosAllocSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070092 class Space;
Mathieu Chartiere4cab172014-08-19 18:24:04 -070093 class ZygoteSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070094} // namespace space
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070095
Mathieu Chartierd22d5482012-11-06 17:14:12 -080096class AgeCardVisitor {
97 public:
Ian Rogers13735952014-10-08 12:43:28 -070098 uint8_t operator()(uint8_t card) const {
Mathieu Chartiera4f6af92015-08-11 17:35:25 -070099 return (card == accounting::CardTable::kCardDirty) ? card - 1 : 0;
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800100 }
101};
102
Zuo Wangf37a88b2014-07-10 04:26:41 -0700103enum HomogeneousSpaceCompactResult {
104 // Success.
105 kSuccess,
106 // Reject due to disabled moving GC.
107 kErrorReject,
Mathieu Chartierdb00eaf2015-08-31 17:10:05 -0700108 // Unsupported due to the current configuration.
109 kErrorUnsupported,
Zuo Wangf37a88b2014-07-10 04:26:41 -0700110 // System is shutting down.
111 kErrorVMShuttingDown,
112};
113
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700114// If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
Hiroshi Yamauchid31fb972013-11-19 11:08:27 -0800115static constexpr bool kUseRosAlloc = true;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700116
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800117// If true, use thread-local allocation stack.
118static constexpr bool kUseThreadLocalAllocationStack = true;
119
Ian Rogers50b35e22012-10-04 10:09:15 -0700120class Heap {
Carl Shapiro1fb86202011-06-27 17:43:13 -0700121 public:
Mathieu Chartier590fee92013-09-13 13:46:47 -0700122 // If true, measure the total allocation time.
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700123 static constexpr size_t kDefaultStartingSize = kPageSize;
Mathieu Chartier720ef762013-08-17 14:46:54 -0700124 static constexpr size_t kDefaultInitialSize = 2 * MB;
Mathieu Chartier2f4f0a32014-07-10 17:50:34 -0700125 static constexpr size_t kDefaultMaximumSize = 256 * MB;
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700126 static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
Mathieu Chartier720ef762013-08-17 14:46:54 -0700127 static constexpr size_t kDefaultMaxFree = 2 * MB;
128 static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700129 static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
130 static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
Mathieu Chartier9e03b2f2016-05-27 13:50:59 -0700131 static constexpr size_t kDefaultTLABSize = 32 * KB;
Mathieu Chartier720ef762013-08-17 14:46:54 -0700132 static constexpr double kDefaultTargetUtilization = 0.5;
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700133 static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700134 // Primitive arrays larger than this size are put in the large object space.
135 static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
Mathieu Chartier10d68862015-04-15 14:21:33 -0700136 // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
137 static constexpr bool kDefaultEnableParallelGC = false;
Igor Murashkin446ba4b2015-02-04 15:11:27 -0800138
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700139 // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
140 // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700141 static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
Igor Murashkin446ba4b2015-02-04 15:11:27 -0800142 USE_ART_LOW_4G_ALLOCATOR ?
143 space::LargeObjectSpaceType::kFreeList
144 : space::LargeObjectSpaceType::kMap;
145
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700146 // Used so that we don't overflow the allocation time atomic integer.
Mathieu Chartier720ef762013-08-17 14:46:54 -0700147 static constexpr size_t kTimeAdjust = 1024;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700148
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700149 // How often we allow heap trimming to happen (nanoseconds).
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800150 static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700151 // How long we wait after a transition request to perform a collector transition (nanoseconds).
152 static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800153
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700154 // Create a heap with the requested sizes. The possible empty
155 // image_file_names names specify Spaces to load based on
156 // ImageWriter output.
Roland Levillain3887c462015-08-12 18:15:42 +0100157 Heap(size_t initial_size,
158 size_t growth_limit,
159 size_t min_free,
160 size_t max_free,
161 double target_utilization,
162 double foreground_heap_growth_multiplier,
163 size_t capacity,
164 size_t non_moving_space_capacity,
165 const std::string& original_image_file_name,
166 InstructionSet image_instruction_set,
167 CollectorType foreground_collector_type,
168 CollectorType background_collector_type,
169 space::LargeObjectSpaceType large_object_space_type,
170 size_t large_object_threshold,
171 size_t parallel_gc_threads,
172 size_t conc_gc_threads,
173 bool low_memory_mode,
174 size_t long_pause_threshold,
175 size_t long_gc_threshold,
176 bool ignore_max_footprint,
177 bool use_tlab,
178 bool verify_pre_gc_heap,
179 bool verify_pre_sweeping_heap,
180 bool verify_post_gc_heap,
181 bool verify_pre_gc_rosalloc,
182 bool verify_pre_sweeping_rosalloc,
183 bool verify_post_gc_rosalloc,
184 bool gc_stress_mode,
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700185 bool measure_gc_performance,
Roland Levillain3887c462015-08-12 18:15:42 +0100186 bool use_homogeneous_space_compaction,
187 uint64_t min_interval_homogeneous_space_compaction_by_oom);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700188
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800189 ~Heap();
Brian Carlstroma7f4f482011-07-17 17:01:34 -0700190
Brian Carlstroma40f9bc2011-07-26 21:26:07 -0700191 // Allocates and initializes storage for an object instance.
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700192 template <bool kInstrumented, typename PreFenceVisitor>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700193 mirror::Object* AllocObject(Thread* self,
194 mirror::Class* klass,
195 size_t num_bytes,
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700196 const PreFenceVisitor& pre_fence_visitor)
Mathieu Chartier90443472015-07-16 20:32:27 -0700197 SHARED_REQUIRES(Locks::mutator_lock_)
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700198 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
199 !Roles::uninterruptible_) {
Mathieu Chartier90443472015-07-16 20:32:27 -0700200 return AllocObjectWithAllocator<kInstrumented, true>(
201 self, klass, num_bytes, GetCurrentAllocator(), pre_fence_visitor);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700202 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800203
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700204 template <bool kInstrumented, typename PreFenceVisitor>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700205 mirror::Object* AllocNonMovableObject(Thread* self,
206 mirror::Class* klass,
207 size_t num_bytes,
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700208 const PreFenceVisitor& pre_fence_visitor)
Mathieu Chartier90443472015-07-16 20:32:27 -0700209 SHARED_REQUIRES(Locks::mutator_lock_)
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700210 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
211 !Roles::uninterruptible_) {
Mathieu Chartier90443472015-07-16 20:32:27 -0700212 return AllocObjectWithAllocator<kInstrumented, true>(
213 self, klass, num_bytes, GetCurrentNonMovingAllocator(), pre_fence_visitor);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700214 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800215
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700216 template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700217 ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
218 mirror::Class* klass,
219 size_t byte_count,
220 AllocatorType allocator,
221 const PreFenceVisitor& pre_fence_visitor)
Mathieu Chartier90443472015-07-16 20:32:27 -0700222 SHARED_REQUIRES(Locks::mutator_lock_)
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700223 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
224 !Roles::uninterruptible_);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800225
226 AllocatorType GetCurrentAllocator() const {
227 return current_allocator_;
228 }
229
230 AllocatorType GetCurrentNonMovingAllocator() const {
231 return current_non_moving_allocator_;
232 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700233
Mathieu Chartier590fee92013-09-13 13:46:47 -0700234 // Visit all of the live objects in the heap.
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800235 void VisitObjects(ObjectCallback callback, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700236 SHARED_REQUIRES(Locks::mutator_lock_)
237 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800238 void VisitObjectsPaused(ObjectCallback callback, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700239 REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700240
Mathieu Chartierc645f1d2014-03-06 18:11:53 -0800241 void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
Mathieu Chartier90443472015-07-16 20:32:27 -0700242 SHARED_REQUIRES(Locks::mutator_lock_);
Brian Carlstroma7f4f482011-07-17 17:01:34 -0700243
Mathieu Chartier90443472015-07-16 20:32:27 -0700244 void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
Mathieu Chartier5d2a3f72016-05-11 11:35:39 -0700245 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700246 void RegisterNativeFree(JNIEnv* env, size_t bytes)
Mathieu Chartier5d2a3f72016-05-11 11:35:39 -0700247 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700248
Mathieu Chartier50482232013-11-21 11:48:14 -0800249 // Change the allocator, updates entrypoints.
Mathieu Chartierd8891782014-03-02 13:28:37 -0800250 void ChangeAllocator(AllocatorType allocator)
Mathieu Chartier90443472015-07-16 20:32:27 -0700251 REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
Mathieu Chartier50482232013-11-21 11:48:14 -0800252
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800253 // Transition the garbage collector during runtime, may copy objects from one space to another.
Mathieu Chartier90443472015-07-16 20:32:27 -0700254 void TransitionCollector(CollectorType collector_type) REQUIRES(!*gc_complete_lock_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800255
Mathieu Chartier0de9f732013-11-22 17:58:48 -0800256 // Change the collector to be one of the possible options (MS, CMS, SS).
Mathieu Chartierd8891782014-03-02 13:28:37 -0800257 void ChangeCollector(CollectorType collector_type)
Mathieu Chartier90443472015-07-16 20:32:27 -0700258 REQUIRES(Locks::mutator_lock_);
Mathieu Chartier0de9f732013-11-22 17:58:48 -0800259
Ian Rogers04d7aa92013-03-16 14:29:17 -0700260 // The given reference is believed to be to an object in the Java heap, check the soundness of it.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800261 // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
262 // proper lock ordering for it.
263 void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
Ian Rogers408f79a2011-08-23 18:22:33 -0700264
Ian Rogers04d7aa92013-03-16 14:29:17 -0700265 // Check sanity of all live references.
Mathieu Chartier90443472015-07-16 20:32:27 -0700266 void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -0700267 // Returns how many failures occured.
268 size_t VerifyHeapReferences(bool verify_referents = true)
Mathieu Chartier90443472015-07-16 20:32:27 -0700269 REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700270 bool VerifyMissingCardMarks()
Mathieu Chartier90443472015-07-16 20:32:27 -0700271 REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700272
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700273 // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
Elliott Hughesa2501992011-08-26 19:39:54 -0700274 // and doesn't abort on error, allowing the caller to report more
275 // meaningful diagnostics.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700276 bool IsValidObjectAddress(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700277
Mathieu Chartierd68ac702014-02-11 14:50:51 -0800278 // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
279 // very slow.
280 bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700281 SHARED_REQUIRES(Locks::mutator_lock_);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800282
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700283 // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
284 // Requires the heap lock to be held.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700285 bool IsLiveObjectLocked(mirror::Object* obj,
286 bool search_allocation_stack = true,
287 bool search_live_stack = true,
288 bool sorted = false)
Mathieu Chartier90443472015-07-16 20:32:27 -0700289 SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
Elliott Hughesa2501992011-08-26 19:39:54 -0700290
Mathieu Chartier590fee92013-09-13 13:46:47 -0700291 // Returns true if there is any chance that the object (obj) will move.
Mathieu Chartier90443472015-07-16 20:32:27 -0700292 bool IsMovableObject(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700293
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800294 // Enables us to compacting GC until objects are released.
Mathieu Chartier90443472015-07-16 20:32:27 -0700295 void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
296 void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700297
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700298 // Temporarily disable thread flip for JNI critical calls.
299 void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
300 void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
301 void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
302 void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
303
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700304 // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800305 // Mutator lock is required for GetContinuousSpaces.
306 void ClearMarkedObjects()
307 REQUIRES(Locks::heap_bitmap_lock_)
308 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700309
Carl Shapiro69759ea2011-07-21 18:13:35 -0700310 // Initiates an explicit garbage collection.
Mathieu Chartier90443472015-07-16 20:32:27 -0700311 void CollectGarbage(bool clear_soft_references)
312 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700313
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700314 // Does a concurrent GC, should only be called by the GC daemon thread
315 // through runtime.
Mathieu Chartier90443472015-07-16 20:32:27 -0700316 void ConcurrentGC(Thread* self, bool force_full)
317 REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, !*pending_task_lock_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700318
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800319 // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
320 // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700321 void CountInstances(const std::vector<mirror::Class*>& classes,
322 bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800323 uint64_t* counts)
Mathieu Chartier90443472015-07-16 20:32:27 -0700324 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
325 SHARED_REQUIRES(Locks::mutator_lock_);
Elliott Hughes3b78c942013-01-15 17:35:41 -0800326 // Implements JDWP RT_Instances.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800327 void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
Mathieu Chartier90443472015-07-16 20:32:27 -0700328 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
329 SHARED_REQUIRES(Locks::mutator_lock_);
Elliott Hughes0cbaff52013-01-16 15:28:01 -0800330 // Implements JDWP OR_ReferringObjects.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700331 void GetReferringObjects(mirror::Object* o,
332 int32_t max_count,
Mathieu Chartier90443472015-07-16 20:32:27 -0700333 std::vector<mirror::Object*>& referring_objects)
334 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
335 SHARED_REQUIRES(Locks::mutator_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700336
Ian Rogers3bb17a62012-01-27 23:56:44 -0800337 // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
338 // implement dalvik.system.VMRuntime.clearGrowthLimit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800339 void ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -0700340
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800341 // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
342 // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
Mathieu Chartier90443472015-07-16 20:32:27 -0700343 void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_);
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800344
Ian Rogers30fab402012-01-23 15:43:46 -0800345 // Target ideal heap utilization ratio, implements
346 // dalvik.system.VMRuntime.getTargetHeapUtilization.
Mathieu Chartier0051be62012-10-12 17:47:11 -0700347 double GetTargetHeapUtilization() const {
348 return target_utilization_;
349 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700350
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700351 // Data structure memory usage tracking.
352 void RegisterGCAllocation(size_t bytes);
353 void RegisterGCDeAllocation(size_t bytes);
354
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700355 // Set the heap's private space pointers to be the same as the space based on it's type. Public
356 // due to usage by tests.
357 void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
Mathieu Chartier90443472015-07-16 20:32:27 -0700358 REQUIRES(!Locks::heap_bitmap_lock_);
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800359 void AddSpace(space::Space* space)
360 REQUIRES(!Locks::heap_bitmap_lock_)
361 REQUIRES(Locks::mutator_lock_);
362 void RemoveSpace(space::Space* space)
363 REQUIRES(!Locks::heap_bitmap_lock_)
364 REQUIRES(Locks::mutator_lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700365
Ian Rogers30fab402012-01-23 15:43:46 -0800366 // Set target ideal heap utilization ratio, implements
367 // dalvik.system.VMRuntime.setTargetHeapUtilization.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700368 void SetTargetHeapUtilization(float target);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800369
370 // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
371 // from the system. Doesn't allow the space to exceed its growth limit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800372 void SetIdealFootprint(size_t max_allowed_footprint);
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700373
Mathieu Chartier590fee92013-09-13 13:46:47 -0700374 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
375 // waited for.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700376 collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700377
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800378 // Update the heap's process state to a new value, may cause compaction to occur.
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700379 void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state)
Mathieu Chartier90443472015-07-16 20:32:27 -0700380 REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800381
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800382 bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS {
383 // No lock since vector empty is thread safe.
384 return !continuous_spaces_.empty();
385 }
386
387 const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const
388 SHARED_REQUIRES(Locks::mutator_lock_) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700389 return continuous_spaces_;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800390 }
391
Ian Rogers1d54e732013-05-02 21:10:01 -0700392 const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
393 return discontinuous_spaces_;
Carl Shapiro58551df2011-07-24 03:09:51 -0700394 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700395
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700396 const collector::Iteration* GetCurrentGcIteration() const {
397 return &current_gc_iteration_;
398 }
399 collector::Iteration* GetCurrentGcIteration() {
400 return &current_gc_iteration_;
401 }
402
Ian Rogers04d7aa92013-03-16 14:29:17 -0700403 // Enable verification of object references when the runtime is sufficiently initialized.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800404 void EnableObjectValidation() {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800405 verify_object_mode_ = kVerifyObjectSupport;
406 if (verify_object_mode_ > kVerifyObjectModeDisabled) {
Ian Rogers04d7aa92013-03-16 14:29:17 -0700407 VerifyHeap();
408 }
Elliott Hughes85d15452011-09-16 17:33:01 -0700409 }
410
Ian Rogers04d7aa92013-03-16 14:29:17 -0700411 // Disable object reference verification for image writing.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800412 void DisableObjectValidation() {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800413 verify_object_mode_ = kVerifyObjectModeDisabled;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700414 }
415
Ian Rogers04d7aa92013-03-16 14:29:17 -0700416 // Other checks may be performed if we know the heap should be in a sane state.
Ian Rogers23435d02012-09-24 11:23:12 -0700417 bool IsObjectValidationEnabled() const {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800418 return verify_object_mode_ > kVerifyObjectModeDisabled;
Ian Rogers23435d02012-09-24 11:23:12 -0700419 }
420
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700421 // Returns true if low memory mode is enabled.
422 bool IsLowMemoryMode() const {
423 return low_memory_mode_;
424 }
425
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700426 // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
427 // Scales heap growth, min free, and max free.
428 double HeapGrowthMultiplier() const;
429
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800430 // Freed bytes can be negative in cases where we copy objects from a compacted space to a
431 // free-list backed space.
Mathieu Chartiere76e70f2014-05-02 16:35:37 -0700432 void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
Brian Carlstrom693267a2011-09-06 09:25:34 -0700433
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700434 // Record the bytes freed by thread-local buffer revoke.
435 void RecordFreeRevoke();
436
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700437 // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700438 // The call is not needed if null is stored in the field.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700439 ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst,
440 MemberOffset offset ATTRIBUTE_UNUSED,
441 const mirror::Object* new_value ATTRIBUTE_UNUSED) {
Mathieu Chartiere35517a2012-10-30 18:49:55 -0700442 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700443 }
444
445 // Write barrier for array operations that update many field positions
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700446 ALWAYS_INLINE void WriteBarrierArray(const mirror::Object* dst,
447 int start_offset ATTRIBUTE_UNUSED,
448 // TODO: element_count or byte_count?
449 size_t length ATTRIBUTE_UNUSED) {
Mathieu Chartiere35517a2012-10-30 18:49:55 -0700450 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700451 }
452
Mathieu Chartier2d2621a2014-10-23 16:48:06 -0700453 ALWAYS_INLINE void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
Mathieu Chartier0732d592013-11-06 11:02:50 -0800454 card_table_->MarkCard(obj);
455 }
456
Ian Rogers1d54e732013-05-02 21:10:01 -0700457 accounting::CardTable* GetCardTable() const {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700458 return card_table_.get();
Ian Rogers5d76c432011-10-31 21:42:49 -0700459 }
460
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800461 accounting::ReadBarrierTable* GetReadBarrierTable() const {
462 return rb_table_.get();
463 }
464
Mathieu Chartier8668c3c2014-04-24 16:48:11 -0700465 void AddFinalizerReference(Thread* self, mirror::Object** object);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700466
Ian Rogers1d54e732013-05-02 21:10:01 -0700467 // Returns the number of bytes currently allocated.
468 size_t GetBytesAllocated() const {
Ian Rogers3e5cf302014-05-20 16:40:37 -0700469 return num_bytes_allocated_.LoadSequentiallyConsistent();
Ian Rogers1d54e732013-05-02 21:10:01 -0700470 }
471
472 // Returns the number of objects currently allocated.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700473 size_t GetObjectsAllocated() const
474 REQUIRES(!Locks::heap_bitmap_lock_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700475
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700476 // Returns the total number of objects allocated since the heap was created.
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700477 uint64_t GetObjectsAllocatedEver() const;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700478
479 // Returns the total number of bytes allocated since the heap was created.
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700480 uint64_t GetBytesAllocatedEver() const;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700481
482 // Returns the total number of objects freed since the heap was created.
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700483 uint64_t GetObjectsFreedEver() const {
Ian Rogers1d54e732013-05-02 21:10:01 -0700484 return total_objects_freed_ever_;
485 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700486
487 // Returns the total number of bytes freed since the heap was created.
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700488 uint64_t GetBytesFreedEver() const {
Ian Rogers1d54e732013-05-02 21:10:01 -0700489 return total_bytes_freed_ever_;
490 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700491
Ian Rogers1d54e732013-05-02 21:10:01 -0700492 // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
493 // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
494 // were specified. Android apps start with a growth limit (small heap size) which is
495 // cleared/extended for large apps.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800496 size_t GetMaxMemory() const {
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700497 // There is some race conditions in the allocation code that can cause bytes allocated to
498 // become larger than growth_limit_ in rare cases.
499 return std::max(GetBytesAllocated(), growth_limit_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700500 }
501
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700502 // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
503 // consumed by an application.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800504 size_t GetTotalMemory() const;
Ian Rogers1d54e732013-05-02 21:10:01 -0700505
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700506 // Returns approximately how much free memory we have until the next GC happens.
507 size_t GetFreeMemoryUntilGC() const {
508 return max_allowed_footprint_ - GetBytesAllocated();
509 }
510
511 // Returns approximately how much free memory we have until the next OOME happens.
512 size_t GetFreeMemoryUntilOOME() const {
513 return growth_limit_ - GetBytesAllocated();
514 }
515
516 // Returns how much free memory we have until we need to grow the heap to perform an allocation.
517 // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800518 size_t GetFreeMemory() const {
Mathieu Chartier146b1872014-06-27 09:57:12 -0700519 size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700520 size_t total_memory = GetTotalMemory();
521 // Make sure we don't get a negative number.
522 return total_memory - std::min(total_memory, byte_allocated);
Ian Rogers1d54e732013-05-02 21:10:01 -0700523 }
524
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700525 // get the space that corresponds to an object's address. Current implementation searches all
Ian Rogers1d54e732013-05-02 21:10:01 -0700526 // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
527 // TODO: consider using faster data structure like binary tree.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800528 space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const
529 SHARED_REQUIRES(Locks::mutator_lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700530 space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800531 bool fail_ok) const
532 SHARED_REQUIRES(Locks::mutator_lock_);
533 space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const
534 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700535
Mathieu Chartier5d2a3f72016-05-11 11:35:39 -0700536 void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800537
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800538 // Do a pending collector transition.
Mathieu Chartier90443472015-07-16 20:32:27 -0700539 void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800540
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800541 // Deflate monitors, ... and trim the spaces.
Mathieu Chartier90443472015-07-16 20:32:27 -0700542 void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700543
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700544 void RevokeThreadLocalBuffers(Thread* thread);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700545 void RevokeRosAllocThreadLocalBuffers(Thread* thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700546 void RevokeAllThreadLocalBuffers();
Ian Rogers68d8b422014-07-17 11:09:10 -0700547 void AssertThreadLocalBuffersAreRevoked(Thread* thread);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700548 void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700549 void RosAllocVerification(TimingLogger* timings, const char* name)
Mathieu Chartier90443472015-07-16 20:32:27 -0700550 REQUIRES(Locks::mutator_lock_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800551
Mathieu Chartier90443472015-07-16 20:32:27 -0700552 accounting::HeapBitmap* GetLiveBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700553 return live_bitmap_.get();
554 }
555
Mathieu Chartier90443472015-07-16 20:32:27 -0700556 accounting::HeapBitmap* GetMarkBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700557 return mark_bitmap_.get();
558 }
559
Mathieu Chartier90443472015-07-16 20:32:27 -0700560 accounting::ObjectStack* GetLiveStack() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800561 return live_stack_.get();
562 }
563
Mathieu Chartier590fee92013-09-13 13:46:47 -0700564 void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700565
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700566 // Mark and empty stack.
567 void FlushAllocStack()
Mathieu Chartier90443472015-07-16 20:32:27 -0700568 SHARED_REQUIRES(Locks::mutator_lock_)
569 REQUIRES(Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700570
Hiroshi Yamauchi90d70682014-02-20 16:17:30 -0800571 // Revoke all the thread-local allocation stacks.
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800572 void RevokeAllThreadLocalAllocationStacks(Thread* self)
Mathieu Chartier90443472015-07-16 20:32:27 -0700573 REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
Hiroshi Yamauchi90d70682014-02-20 16:17:30 -0800574
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700575 // Mark all the objects in the allocation stack in the specified bitmap.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700576 // TODO: Refactor?
577 void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
578 accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700579 accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
580 accounting::ObjectStack* stack)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700581 SHARED_REQUIRES(Locks::mutator_lock_)
582 REQUIRES(Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700583
Mathieu Chartier590fee92013-09-13 13:46:47 -0700584 // Mark the specified allocation stack as live.
585 void MarkAllocStackAsLive(accounting::ObjectStack* stack)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700586 SHARED_REQUIRES(Locks::mutator_lock_)
587 REQUIRES(Locks::heap_bitmap_lock_);
Mathieu Chartier82353312013-07-18 10:47:51 -0700588
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800589 // Unbind any bound bitmaps.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800590 void UnBindBitmaps()
591 REQUIRES(Locks::heap_bitmap_lock_)
592 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800593
Jeff Haodcdc85b2015-12-04 14:06:18 -0800594 // Returns the boot image spaces. There may be multiple boot image spaces.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800595 const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
596 return boot_image_spaces_;
597 }
598
599 bool ObjectIsInBootImageSpace(mirror::Object* obj) const
600 SHARED_REQUIRES(Locks::mutator_lock_);
601
Mingyao Yang6ea1a0e2016-01-29 12:12:49 -0800602 bool IsInBootImageOatFile(const void* p) const
603 SHARED_REQUIRES(Locks::mutator_lock_);
604
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800605 void GetBootImagesSize(uint32_t* boot_image_begin,
606 uint32_t* boot_image_end,
607 uint32_t* boot_oat_begin,
608 uint32_t* boot_oat_end);
Ian Rogers1d54e732013-05-02 21:10:01 -0700609
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700610 // Permenantly disable moving garbage collection.
Mathieu Chartier90443472015-07-16 20:32:27 -0700611 void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800612
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800613 space::DlMallocSpace* GetDlMallocSpace() const {
614 return dlmalloc_space_;
615 }
616
617 space::RosAllocSpace* GetRosAllocSpace() const {
618 return rosalloc_space_;
619 }
620
Zuo Wangf37a88b2014-07-10 04:26:41 -0700621 // Return the corresponding rosalloc space.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800622 space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const
623 SHARED_REQUIRES(Locks::mutator_lock_);
Zuo Wangf37a88b2014-07-10 04:26:41 -0700624
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700625 space::MallocSpace* GetNonMovingSpace() const {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700626 return non_moving_space_;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700627 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700628
629 space::LargeObjectSpace* GetLargeObjectsSpace() const {
630 return large_object_space_;
631 }
632
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800633 // Returns the free list space that may contain movable objects (the
634 // one that's not the non-moving space), either rosalloc_space_ or
635 // dlmalloc_space_.
636 space::MallocSpace* GetPrimaryFreeListSpace() {
637 if (kUseRosAlloc) {
638 DCHECK(rosalloc_space_ != nullptr);
639 // reinterpret_cast is necessary as the space class hierarchy
640 // isn't known (#included) yet here.
641 return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
642 } else {
643 DCHECK(dlmalloc_space_ != nullptr);
644 return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
645 }
646 }
647
Mathieu Chartierbcd9dd72016-03-07 10:25:04 -0800648 void DumpSpaces(std::ostream& stream) const SHARED_REQUIRES(Locks::mutator_lock_);
649 std::string DumpSpaces() const SHARED_REQUIRES(Locks::mutator_lock_);
Elliott Hughesf8349362012-06-18 15:00:06 -0700650
Mathieu Chartier15d34022014-02-26 17:16:38 -0800651 // Dump object should only be used by the signal handler.
652 void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
653 // Safe version of pretty type of which check to make sure objects are heap addresses.
654 std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
655 std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
656
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700657 // GC performance measuring
Mathieu Chartier5d2a3f72016-05-11 11:35:39 -0700658 void DumpGcPerformanceInfo(std::ostream& os)
659 REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700660 void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700661
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700662 // Thread pool.
663 void CreateThreadPool();
664 void DeleteThreadPool();
665 ThreadPool* GetThreadPool() {
666 return thread_pool_.get();
667 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700668 size_t GetParallelGCThreadCount() const {
669 return parallel_gc_threads_;
670 }
671 size_t GetConcGCThreadCount() const {
672 return conc_gc_threads_;
673 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700674 accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
675 void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700676
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800677 accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
678 void AddRememberedSet(accounting::RememberedSet* remembered_set);
Mathieu Chartier5189e242014-07-24 11:11:05 -0700679 // Also deletes the remebered set.
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800680 void RemoveRememberedSet(space::Space* space);
681
Mathieu Chartier590fee92013-09-13 13:46:47 -0700682 bool IsCompilingBoot() const;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800683 bool HasBootImageSpace() const {
684 return !boot_image_spaces_.empty();
Mathieu Chartier073b16c2015-11-10 14:13:23 -0800685 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700686
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700687 ReferenceProcessor* GetReferenceProcessor() {
Mathieu Chartier3cf22532015-07-09 15:15:09 -0700688 return reference_processor_.get();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700689 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800690 TaskProcessor* GetTaskProcessor() {
691 return task_processor_.get();
692 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700693
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700694 bool HasZygoteSpace() const {
695 return zygote_space_ != nullptr;
696 }
697
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800698 collector::ConcurrentCopying* ConcurrentCopyingCollector() {
699 return concurrent_copying_collector_;
700 }
701
702 CollectorType CurrentCollectorType() {
703 return collector_type_;
704 }
705
706 bool IsGcConcurrentAndMoving() const {
707 if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
708 // Assume no transition when a concurrent moving collector is used.
709 DCHECK_EQ(collector_type_, foreground_collector_type_);
710 DCHECK_EQ(foreground_collector_type_, background_collector_type_)
711 << "Assume no transition such that collector_type_ won't change";
712 return true;
713 }
714 return false;
715 }
716
Mathieu Chartier90443472015-07-16 20:32:27 -0700717 bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800718 MutexLock mu(self, *gc_complete_lock_);
719 return disable_moving_gc_count_ > 0;
720 }
721
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800722 // Request an asynchronous trim.
Mathieu Chartier90443472015-07-16 20:32:27 -0700723 void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800724
725 // Request asynchronous GC.
Mathieu Chartier90443472015-07-16 20:32:27 -0700726 void RequestConcurrentGC(Thread* self, bool force_full) REQUIRES(!*pending_task_lock_);
Mathieu Chartier079101a2014-12-15 14:23:10 -0800727
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800728 // Whether or not we may use a garbage collector, used so that we only create collectors we need.
729 bool MayUseCollector(CollectorType type) const;
730
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700731 // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
732 void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
733 min_interval_homogeneous_space_compaction_by_oom_ = interval;
734 }
735
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -0700736 // Helpers for android.os.Debug.getRuntimeStat().
737 uint64_t GetGcCount() const;
738 uint64_t GetGcTime() const;
739 uint64_t GetBlockingGcCount() const;
740 uint64_t GetBlockingGcTime() const;
Mathieu Chartier90443472015-07-16 20:32:27 -0700741 void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
742 void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -0700743
Man Cao8c2ff642015-05-27 17:25:30 -0700744 // Allocation tracking support
745 // Callers to this function use double-checked locking to ensure safety on allocation_records_
746 bool IsAllocTrackingEnabled() const {
747 return alloc_tracking_enabled_.LoadRelaxed();
748 }
749
Mathieu Chartier90443472015-07-16 20:32:27 -0700750 void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
Man Cao8c2ff642015-05-27 17:25:30 -0700751 alloc_tracking_enabled_.StoreRelaxed(enabled);
752 }
753
754 AllocRecordObjectMap* GetAllocationRecords() const
Mathieu Chartier90443472015-07-16 20:32:27 -0700755 REQUIRES(Locks::alloc_tracker_lock_) {
Man Cao8c2ff642015-05-27 17:25:30 -0700756 return allocation_records_.get();
757 }
758
759 void SetAllocationRecords(AllocRecordObjectMap* records)
Mathieu Chartier90443472015-07-16 20:32:27 -0700760 REQUIRES(Locks::alloc_tracker_lock_);
Man Cao8c2ff642015-05-27 17:25:30 -0700761
Man Cao1ed11b92015-06-11 22:47:35 -0700762 void VisitAllocationRecords(RootVisitor* visitor) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700763 SHARED_REQUIRES(Locks::mutator_lock_)
764 REQUIRES(!Locks::alloc_tracker_lock_);
Man Cao1ed11b92015-06-11 22:47:35 -0700765
Mathieu Chartier97509952015-07-13 14:35:43 -0700766 void SweepAllocationRecords(IsMarkedVisitor* visitor) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700767 SHARED_REQUIRES(Locks::mutator_lock_)
768 REQUIRES(!Locks::alloc_tracker_lock_);
Man Cao42c3c332015-06-23 16:38:25 -0700769
770 void DisallowNewAllocationRecords() const
Mathieu Chartier90443472015-07-16 20:32:27 -0700771 SHARED_REQUIRES(Locks::mutator_lock_)
772 REQUIRES(!Locks::alloc_tracker_lock_);
Man Cao42c3c332015-06-23 16:38:25 -0700773
774 void AllowNewAllocationRecords() const
Mathieu Chartier90443472015-07-16 20:32:27 -0700775 SHARED_REQUIRES(Locks::mutator_lock_)
776 REQUIRES(!Locks::alloc_tracker_lock_);
Man Cao42c3c332015-06-23 16:38:25 -0700777
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700778 void BroadcastForNewAllocationRecords() const
779 SHARED_REQUIRES(Locks::mutator_lock_)
780 REQUIRES(!Locks::alloc_tracker_lock_);
781
Mathieu Chartier51168372015-08-12 16:40:32 -0700782 void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
783
Mathieu Chartierdb00eaf2015-08-31 17:10:05 -0700784 // Create a new alloc space and compact default alloc space to it.
785 HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
786 bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
787
Carl Shapiro58551df2011-07-24 03:09:51 -0700788 private:
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800789 class ConcurrentGCTask;
790 class CollectorTransitionTask;
791 class HeapTrimTask;
792
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -0800793 // Compact source space to target space. Returns the collector used.
794 collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
795 space::ContinuousMemMapAllocSpace* source_space,
796 GcCause gc_cause)
Mathieu Chartier90443472015-07-16 20:32:27 -0700797 REQUIRES(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700798
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -0800799 void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
Mathieu Chartieraa516822015-10-02 15:53:37 -0700800 void StartGC(Thread* self, GcCause cause, CollectorType collector_type)
801 REQUIRES(!*gc_complete_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700802 void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800803
Mathieu Chartierb363f662014-07-16 13:28:58 -0700804 // Create a mem map with a preferred base address.
Ian Rogers13735952014-10-08 12:43:28 -0700805 static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700806 size_t capacity, std::string* out_error_str);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700807
Zuo Wangf37a88b2014-07-10 04:26:41 -0700808 bool SupportHSpaceCompaction() const {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700809 // Returns true if we can do hspace compaction
Zuo Wangf37a88b2014-07-10 04:26:41 -0700810 return main_space_backup_ != nullptr;
811 }
812
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800813 static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
814 return
815 allocator_type != kAllocatorTypeBumpPointer &&
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800816 allocator_type != kAllocatorTypeTLAB &&
817 allocator_type != kAllocatorTypeRegion &&
818 allocator_type != kAllocatorTypeRegionTLAB;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800819 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800820 static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800821 return
822 allocator_type != kAllocatorTypeBumpPointer &&
823 allocator_type != kAllocatorTypeTLAB;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800824 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700825 static bool IsMovingGc(CollectorType collector_type) {
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700826 return
827 collector_type == kCollectorTypeSS ||
828 collector_type == kCollectorTypeGSS ||
829 collector_type == kCollectorTypeCC ||
830 collector_type == kCollectorTypeMC ||
Zuo Wangf37a88b2014-07-10 04:26:41 -0700831 collector_type == kCollectorTypeHomogeneousSpaceCompact;
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -0800832 }
Ian Rogersef7d42f2014-01-06 12:55:46 -0800833 bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700834 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700835 ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
836 size_t new_num_bytes_allocated,
Mathieu Chartierf517f1a2014-03-06 15:52:27 -0800837 mirror::Object** obj)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700838 SHARED_REQUIRES(Locks::mutator_lock_)
839 REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700840
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700841 accounting::ObjectStack* GetMarkStack() {
842 return mark_stack_.get();
843 }
844
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800845 // We don't force this to be inlined since it is a slow path.
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800846 template <bool kInstrumented, typename PreFenceVisitor>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700847 mirror::Object* AllocLargeObject(Thread* self,
848 mirror::Class** klass,
849 size_t byte_count,
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800850 const PreFenceVisitor& pre_fence_visitor)
Mathieu Chartier90443472015-07-16 20:32:27 -0700851 SHARED_REQUIRES(Locks::mutator_lock_)
852 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800853
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700854 // Handles Allocate()'s slow allocation path with GC involved after
855 // an initial allocation attempt failed.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700856 mirror::Object* AllocateInternalWithGc(Thread* self,
857 AllocatorType allocator,
Mathieu Chartiereebc3af2016-02-29 18:13:38 -0800858 bool instrumented,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700859 size_t num_bytes,
860 size_t* bytes_allocated,
861 size_t* usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700862 size_t* bytes_tl_bulk_allocated,
Ian Rogers6fac4472014-02-25 17:01:10 -0800863 mirror::Class** klass)
Mathieu Chartier90443472015-07-16 20:32:27 -0700864 REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
865 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartiera6399032012-06-11 18:49:50 -0700866
Mathieu Chartier590fee92013-09-13 13:46:47 -0700867 // Allocate into a specific space.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700868 mirror::Object* AllocateInto(Thread* self,
869 space::AllocSpace* space,
870 mirror::Class* c,
Mathieu Chartier590fee92013-09-13 13:46:47 -0700871 size_t bytes)
Mathieu Chartier90443472015-07-16 20:32:27 -0700872 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700873
Mathieu Chartier31f44142014-04-08 14:40:03 -0700874 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
875 // wrong space.
Mathieu Chartier90443472015-07-16 20:32:27 -0700876 void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700877
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800878 // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
879 // that the switch statement is constant optimized in the entrypoints.
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800880 template <const bool kInstrumented, const bool kGrow>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700881 ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self,
882 AllocatorType allocator_type,
883 size_t alloc_size,
884 size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700885 size_t* usable_size,
886 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700887 SHARED_REQUIRES(Locks::mutator_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700888
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700889 void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
Mathieu Chartier90443472015-07-16 20:32:27 -0700890 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800891
892 template <bool kGrow>
Hiroshi Yamauchieb1e9292014-08-06 12:41:15 -0700893 ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700894
Mathieu Chartier15d34022014-02-26 17:16:38 -0800895 // Returns true if the address passed in is within the address range of a continuous space.
896 bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700897 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier15d34022014-02-26 17:16:38 -0800898
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -0700899 // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
900 void RunFinalization(JNIEnv* env, uint64_t timeout);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700901
902 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
903 // waited for.
Mathieu Chartier89a201e2014-05-02 10:27:26 -0700904 collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
Mathieu Chartier90443472015-07-16 20:32:27 -0700905 REQUIRES(gc_complete_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700906
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700907 void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
Mathieu Chartier90443472015-07-16 20:32:27 -0700908 REQUIRES(!*pending_task_lock_);
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800909
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -0700910 void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700911 SHARED_REQUIRES(Locks::mutator_lock_)
912 REQUIRES(!*pending_task_lock_);
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700913 bool IsGCRequestPending() const;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800914
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700915 // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
916 // which type of Gc was actually ran.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700917 collector::GcType CollectGarbageInternal(collector::GcType gc_plan,
918 GcCause gc_cause,
Ian Rogers1d54e732013-05-02 21:10:01 -0700919 bool clear_soft_references)
Mathieu Chartier90443472015-07-16 20:32:27 -0700920 REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
921 !*pending_task_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800922
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700923 void PreGcVerification(collector::GarbageCollector* gc)
Mathieu Chartier90443472015-07-16 20:32:27 -0700924 REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700925 void PreGcVerificationPaused(collector::GarbageCollector* gc)
Mathieu Chartier90443472015-07-16 20:32:27 -0700926 REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700927 void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
Mathieu Chartier90443472015-07-16 20:32:27 -0700928 REQUIRES(Locks::mutator_lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700929 void PreSweepingGcVerification(collector::GarbageCollector* gc)
Mathieu Chartier90443472015-07-16 20:32:27 -0700930 REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
Mathieu Chartierad2541a2013-10-25 10:05:23 -0700931 void PostGcVerification(collector::GarbageCollector* gc)
Mathieu Chartier90443472015-07-16 20:32:27 -0700932 REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700933 void PostGcVerificationPaused(collector::GarbageCollector* gc)
Mathieu Chartier90443472015-07-16 20:32:27 -0700934 REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700935
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700936 // Update the watermark for the native allocated bytes based on the current number of native
937 // bytes allocated and the target utilization ratio.
938 void UpdateMaxNativeFootprint();
939
Mathieu Chartierafe49982014-03-27 10:55:04 -0700940 // Find a collector based on GC type.
941 collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
942
Zuo Wangf37a88b2014-07-10 04:26:41 -0700943 // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700944 void CreateMainMallocSpace(MemMap* mem_map,
945 size_t initial_size,
946 size_t growth_limit,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700947 size_t capacity);
948
Zuo Wangf37a88b2014-07-10 04:26:41 -0700949 // Create a malloc space based on a mem map. Does not set the space as default.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700950 space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map,
951 size_t initial_size,
952 size_t growth_limit,
953 size_t capacity,
954 const char* name,
955 bool can_move_objects);
Zuo Wangf37a88b2014-07-10 04:26:41 -0700956
Ian Rogers3bb17a62012-01-27 23:56:44 -0800957 // Given the current contents of the alloc space, increase the allowed heap footprint to match
958 // the target utilization ratio. This should only be called immediately after a full garbage
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -0800959 // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
960 // the GC was run.
961 void GrowForUtilization(collector::GarbageCollector* collector_ran,
962 uint64_t bytes_allocated_before_gc = 0);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700963
Mathieu Chartier637e3482012-08-17 10:41:32 -0700964 size_t GetPercentFree();
Elliott Hughesc967f782012-04-16 10:23:15 -0700965
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800966 static void VerificationCallback(mirror::Object* obj, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700967 SHARED_REQUIRES(Locks::heap_bitmap_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700968
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700969 // Swap the allocation stack with the live stack.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700970 void SwapStacks() SHARED_REQUIRES(Locks::mutator_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800971
Lei Li4add3b42015-01-15 11:55:26 +0800972 // Clear cards and update the mod union table. When process_alloc_space_cards is true,
973 // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
974 // not process the alloc space if process_alloc_space_cards is false.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700975 void ProcessCards(TimingLogger* timings,
976 bool use_rem_sets,
977 bool process_alloc_space_cards,
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800978 bool clear_alloc_space_cards)
979 SHARED_REQUIRES(Locks::mutator_lock_);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700980
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800981 // Push an object onto the allocation stack.
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700982 void PushOnAllocationStack(Thread* self, mirror::Object** obj)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700983 SHARED_REQUIRES(Locks::mutator_lock_)
984 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
Mathieu Chartierc1790162014-05-23 10:54:50 -0700985 void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700986 SHARED_REQUIRES(Locks::mutator_lock_)
987 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
Mathieu Chartierc1790162014-05-23 10:54:50 -0700988 void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700989 SHARED_REQUIRES(Locks::mutator_lock_)
990 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800991
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800992 void ClearConcurrentGCRequest();
Mathieu Chartier90443472015-07-16 20:32:27 -0700993 void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
994 void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800995
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -0700996 // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
997 // sweep GC, false for other GC types.
998 bool IsGcConcurrent() const ALWAYS_INLINE {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700999 return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC;
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001000 }
1001
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001002 // Trim the managed and native spaces by releasing unused memory back to the OS.
Mathieu Chartier90443472015-07-16 20:32:27 -07001003 void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001004
1005 // Trim 0 pages at the end of reference tables.
1006 void TrimIndirectReferenceTables(Thread* self);
1007
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001008 void VisitObjectsInternal(ObjectCallback callback, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -07001009 SHARED_REQUIRES(Locks::mutator_lock_)
1010 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001011 void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -07001012 REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001013
Mathieu Chartier90443472015-07-16 20:32:27 -07001014 void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001015
Mathieu Chartier31000802015-06-14 14:14:37 -07001016 // GC stress mode attempts to do one GC per unique backtrace.
1017 void CheckGcStressMode(Thread* self, mirror::Object** obj)
Mathieu Chartier90443472015-07-16 20:32:27 -07001018 SHARED_REQUIRES(Locks::mutator_lock_)
1019 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
Mathieu Chartier31000802015-06-14 14:14:37 -07001020
Ian Rogers1d54e732013-05-02 21:10:01 -07001021 // All-known continuous spaces, where objects lie within fixed bounds.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001022 std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001023
Ian Rogers1d54e732013-05-02 21:10:01 -07001024 // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001025 std::vector<space::DiscontinuousSpace*> discontinuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001026
Mathieu Chartier590fee92013-09-13 13:46:47 -07001027 // All-known alloc spaces, where objects may be or have been allocated.
1028 std::vector<space::AllocSpace*> alloc_spaces_;
1029
1030 // A space where non-movable objects are allocated, when compaction is enabled it contains
1031 // Classes, ArtMethods, ArtFields, and non moving objects.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001032 space::MallocSpace* non_moving_space_;
Ian Rogers1d54e732013-05-02 21:10:01 -07001033
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001034 // Space which we use for the kAllocatorTypeROSAlloc.
1035 space::RosAllocSpace* rosalloc_space_;
1036
1037 // Space which we use for the kAllocatorTypeDlMalloc.
1038 space::DlMallocSpace* dlmalloc_space_;
1039
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001040 // The main space is the space which the GC copies to and from on process state updates. This
1041 // space is typically either the dlmalloc_space_ or the rosalloc_space_.
1042 space::MallocSpace* main_space_;
1043
Ian Rogers1d54e732013-05-02 21:10:01 -07001044 // The large object space we are currently allocating into.
1045 space::LargeObjectSpace* large_object_space_;
1046
1047 // The card table, dirtied by the write barrier.
Ian Rogers700a4022014-05-19 16:49:03 -07001048 std::unique_ptr<accounting::CardTable> card_table_;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -07001049
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001050 std::unique_ptr<accounting::ReadBarrierTable> rb_table_;
1051
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001052 // A mod-union table remembers all of the references from the it's space to other spaces.
Mathieu Chartierbad02672014-08-25 13:08:22 -07001053 AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap>
1054 mod_union_tables_;
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001055
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001056 // A remembered set remembers all of the references from the it's space to the target space.
Mathieu Chartierbad02672014-08-25 13:08:22 -07001057 AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap>
1058 remembered_sets_;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001059
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001060 // The current collector type.
1061 CollectorType collector_type_;
Mathieu Chartier31f44142014-04-08 14:40:03 -07001062 // Which collector we use when the app is in the foreground.
1063 CollectorType foreground_collector_type_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001064 // Which collector we will use when the app is notified of a transition to background.
1065 CollectorType background_collector_type_;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08001066 // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
1067 CollectorType desired_collector_type_;
1068
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001069 // Lock which guards pending tasks.
1070 Mutex* pending_task_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001071
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001072 // How many GC threads we may use for paused parts of garbage collection.
1073 const size_t parallel_gc_threads_;
1074
1075 // How many GC threads we may use for unpaused parts of garbage collection.
1076 const size_t conc_gc_threads_;
Mathieu Chartier63a54342013-07-23 13:17:59 -07001077
Mathieu Chartiere0a53e92013-08-05 10:17:40 -07001078 // Boolean for if we are in low memory mode.
1079 const bool low_memory_mode_;
1080
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001081 // If we get a pause longer than long pause log threshold, then we print out the GC after it
1082 // finishes.
1083 const size_t long_pause_log_threshold_;
1084
1085 // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
1086 const size_t long_gc_log_threshold_;
1087
1088 // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
1089 // useful for benchmarking since it reduces time spent in GC to a low %.
1090 const bool ignore_max_footprint_;
1091
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07001092 // Lock which guards zygote space creation.
1093 Mutex zygote_creation_lock_;
1094
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001095 // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
1096 // zygote space creation.
1097 space::ZygoteSpace* zygote_space_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001098
Mathieu Chartierbd0a6532014-02-27 11:14:21 -08001099 // Minimum allocation size of large object.
1100 size_t large_object_threshold_;
1101
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001102 // Guards access to the state of GC, associated conditional variable is used to signal when a GC
1103 // completes.
1104 Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Ian Rogers700a4022014-05-19 16:49:03 -07001105 std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001106
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -07001107 // Used to synchronize between JNI critical calls and the thread flip of the CC collector.
1108 Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1109 std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_);
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -08001110 // This counter keeps track of how many threads are currently in a JNI critical section. This is
1111 // incremented once per thread even with nested enters.
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -07001112 size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_);
1113 bool thread_flip_running_ GUARDED_BY(thread_flip_lock_);
1114
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001115 // Reference processor;
Mathieu Chartier3cf22532015-07-09 15:15:09 -07001116 std::unique_ptr<ReferenceProcessor> reference_processor_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001117
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001118 // Task processor, proxies heap trim requests to the daemon threads.
1119 std::unique_ptr<TaskProcessor> task_processor_;
1120
Carl Shapiro58551df2011-07-24 03:09:51 -07001121 // True while the garbage collector is running.
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001122 volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001123
1124 // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
Ian Rogers1d54e732013-05-02 21:10:01 -07001125 volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001126 collector::GcType next_gc_type_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001127
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001128 // Maximum size that the heap can reach.
Mathieu Chartier379d09f2015-01-08 11:28:13 -08001129 size_t capacity_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001130
Ian Rogers1d54e732013-05-02 21:10:01 -07001131 // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
1132 // programs it is "cleared" making it the same as capacity.
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001133 size_t growth_limit_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001134
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001135 // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
Ian Rogers1d54e732013-05-02 21:10:01 -07001136 // a GC should be triggered.
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001137 size_t max_allowed_footprint_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001138
Mathieu Chartier987ccff2013-07-08 11:05:21 -07001139 // The watermark at which a concurrent GC is requested by registerNativeAllocation.
1140 size_t native_footprint_gc_watermark_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001141
Mathieu Chartier590fee92013-09-13 13:46:47 -07001142 // Whether or not we need to run finalizers in the next native allocation.
1143 bool native_need_to_run_finalization_;
1144
Ian Rogers1d54e732013-05-02 21:10:01 -07001145 // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
1146 // it completes ahead of an allocation failing.
Mathieu Chartier0051be62012-10-12 17:47:11 -07001147 size_t concurrent_start_bytes_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001148
Ian Rogers1d54e732013-05-02 21:10:01 -07001149 // Since the heap was created, how many bytes have been freed.
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001150 uint64_t total_bytes_freed_ever_;
Ian Rogers1d54e732013-05-02 21:10:01 -07001151
1152 // Since the heap was created, how many objects have been freed.
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001153 uint64_t total_objects_freed_ever_;
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001154
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001155 // Number of bytes allocated. Adjusted after each allocation and free.
Ian Rogersef7d42f2014-01-06 12:55:46 -08001156 Atomic<size_t> num_bytes_allocated_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001157
Mathieu Chartier987ccff2013-07-08 11:05:21 -07001158 // Bytes which are allocated and managed by native code but still need to be accounted for.
Ian Rogersef7d42f2014-01-06 12:55:46 -08001159 Atomic<size_t> native_bytes_allocated_;
Mathieu Chartier987ccff2013-07-08 11:05:21 -07001160
Mathieu Chartier5d2a3f72016-05-11 11:35:39 -07001161 // Native allocation stats.
1162 Mutex native_histogram_lock_;
1163 Histogram<uint64_t> native_allocation_histogram_;
1164 Histogram<uint64_t> native_free_histogram_;
1165
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001166 // Number of bytes freed by thread local buffer revokes. This will
1167 // cancel out the ahead-of-time bulk counting of bytes allocated in
1168 // rosalloc thread-local buffers. It is temporarily accumulated
1169 // here to be subtracted from num_bytes_allocated_ later at the next
1170 // GC.
1171 Atomic<size_t> num_bytes_freed_revoke_;
1172
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001173 // Info related to the current or previous GC iteration.
1174 collector::Iteration current_gc_iteration_;
1175
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001176 // Heap verification flags.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001177 const bool verify_missing_card_marks_;
1178 const bool verify_system_weaks_;
1179 const bool verify_pre_gc_heap_;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001180 const bool verify_pre_sweeping_heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001181 const bool verify_post_gc_heap_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001182 const bool verify_mod_union_table_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001183 bool verify_pre_gc_rosalloc_;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001184 bool verify_pre_sweeping_rosalloc_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001185 bool verify_post_gc_rosalloc_;
Mathieu Chartier31000802015-06-14 14:14:37 -07001186 const bool gc_stress_mode_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001187
1188 // RAII that temporarily disables the rosalloc verification during
1189 // the zygote fork.
1190 class ScopedDisableRosAllocVerification {
1191 private:
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001192 Heap* const heap_;
1193 const bool orig_verify_pre_gc_;
1194 const bool orig_verify_pre_sweeping_;
1195 const bool orig_verify_post_gc_;
1196
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001197 public:
1198 explicit ScopedDisableRosAllocVerification(Heap* heap)
1199 : heap_(heap),
1200 orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001201 orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001202 orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
1203 heap_->verify_pre_gc_rosalloc_ = false;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001204 heap_->verify_pre_sweeping_rosalloc_ = false;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001205 heap_->verify_post_gc_rosalloc_ = false;
1206 }
1207 ~ScopedDisableRosAllocVerification() {
1208 heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001209 heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001210 heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
1211 }
1212 };
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001213
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001214 // Parallel GC data structures.
Ian Rogers700a4022014-05-19 16:49:03 -07001215 std::unique_ptr<ThreadPool> thread_pool_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001216
Ian Rogers1d54e732013-05-02 21:10:01 -07001217 // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
1218 // and the start of the current one.
Mathieu Chartier65db8802012-11-20 12:36:46 -08001219 uint64_t allocation_rate_;
1220
Ian Rogers1d54e732013-05-02 21:10:01 -07001221 // For a GC cycle, a bitmap that is set corresponding to the
Ian Rogers700a4022014-05-19 16:49:03 -07001222 std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1223 std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001224
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001225 // Mark stack that we reuse to avoid re-allocating the mark stack.
Ian Rogers700a4022014-05-19 16:49:03 -07001226 std::unique_ptr<accounting::ObjectStack> mark_stack_;
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001227
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001228 // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
1229 // to use the live bitmap as the old mark bitmap.
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001230 const size_t max_allocation_stack_size_;
Ian Rogers700a4022014-05-19 16:49:03 -07001231 std::unique_ptr<accounting::ObjectStack> allocation_stack_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001232
1233 // Second allocation stack so that we can process allocation with the heap unlocked.
Ian Rogers700a4022014-05-19 16:49:03 -07001234 std::unique_ptr<accounting::ObjectStack> live_stack_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001235
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001236 // Allocator type.
Mathieu Chartier50482232013-11-21 11:48:14 -08001237 AllocatorType current_allocator_;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001238 const AllocatorType current_non_moving_allocator_;
1239
1240 // Which GCs we run in order when we an allocation fails.
1241 std::vector<collector::GcType> gc_plan_;
1242
Mathieu Chartier590fee92013-09-13 13:46:47 -07001243 // Bump pointer spaces.
1244 space::BumpPointerSpace* bump_pointer_space_;
1245 // Temp space is the space which the semispace collector copies to.
1246 space::BumpPointerSpace* temp_space_;
1247
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001248 space::RegionSpace* region_space_;
1249
Mathieu Chartier0051be62012-10-12 17:47:11 -07001250 // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
1251 // utilization, regardless of target utilization ratio.
1252 size_t min_free_;
1253
1254 // The ideal maximum free size, when we grow the heap for utilization.
1255 size_t max_free_;
1256
Brian Carlstrom395520e2011-09-25 19:35:00 -07001257 // Target ideal heap utilization ratio
Mathieu Chartier0051be62012-10-12 17:47:11 -07001258 double target_utilization_;
Brian Carlstrom395520e2011-09-25 19:35:00 -07001259
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07001260 // How much more we grow the heap when we are a foreground app instead of background.
1261 double foreground_heap_growth_multiplier_;
1262
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001263 // Total time which mutators are paused or waiting for GC to complete.
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001264 uint64_t total_wait_time_;
1265
Ian Rogers04d7aa92013-03-16 14:29:17 -07001266 // The current state of heap verification, may be enabled or disabled.
Mathieu Chartier4e305412014-02-19 10:54:44 -08001267 VerifyObjectMode verify_object_mode_;
Ian Rogers04d7aa92013-03-16 14:29:17 -07001268
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001269 // Compacting GC disable count, prevents compacting GC from running iff > 0.
1270 size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001271
1272 std::vector<collector::GarbageCollector*> garbage_collectors_;
1273 collector::SemiSpace* semi_space_collector_;
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001274 collector::MarkCompact* mark_compact_collector_;
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001275 collector::ConcurrentCopying* concurrent_copying_collector_;
Brian Carlstrom1f870082011-08-23 16:02:11 -07001276
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001277 const bool is_running_on_memory_tool_;
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001278 const bool use_tlab_;
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -07001279
Zuo Wangf37a88b2014-07-10 04:26:41 -07001280 // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
Mathieu Chartierb363f662014-07-16 13:28:58 -07001281 // Use unique_ptr since the space is only added during the homogeneous compaction phase.
1282 std::unique_ptr<space::MallocSpace> main_space_backup_;
Zuo Wangf37a88b2014-07-10 04:26:41 -07001283
1284 // Minimal interval allowed between two homogeneous space compactions caused by OOM.
1285 uint64_t min_interval_homogeneous_space_compaction_by_oom_;
1286
1287 // Times of the last homogeneous space compaction caused by OOM.
1288 uint64_t last_time_homogeneous_space_compaction_by_oom_;
1289
1290 // Saved OOMs by homogeneous space compaction.
1291 Atomic<size_t> count_delayed_oom_;
1292
1293 // Count for requested homogeneous space compaction.
1294 Atomic<size_t> count_requested_homogeneous_space_compaction_;
1295
1296 // Count for ignored homogeneous space compaction.
1297 Atomic<size_t> count_ignored_homogeneous_space_compaction_;
1298
1299 // Count for performed homogeneous space compaction.
1300 Atomic<size_t> count_performed_homogeneous_space_compaction_;
1301
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001302 // Whether or not a concurrent GC is pending.
1303 Atomic<bool> concurrent_gc_pending_;
1304
1305 // Active tasks which we can modify (change target time, desired collector type, etc..).
1306 CollectorTransitionTask* pending_collector_transition_ GUARDED_BY(pending_task_lock_);
1307 HeapTrimTask* pending_heap_trim_ GUARDED_BY(pending_task_lock_);
1308
Zuo Wangf37a88b2014-07-10 04:26:41 -07001309 // Whether or not we use homogeneous space compaction to avoid OOM errors.
1310 bool use_homogeneous_space_compaction_for_oom_;
1311
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001312 // True if the currently running collection has made some thread wait.
1313 bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
1314 // The number of blocking GC runs.
1315 uint64_t blocking_gc_count_;
1316 // The total duration of blocking GC runs.
1317 uint64_t blocking_gc_time_;
1318 // The duration of the window for the GC count rate histograms.
1319 static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000); // 10s.
1320 // The last time when the GC count rate histograms were updated.
1321 // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
1322 uint64_t last_update_time_gc_count_rate_histograms_;
1323 // The running count of GC runs in the last window.
1324 uint64_t gc_count_last_window_;
1325 // The running count of blocking GC runs in the last window.
1326 uint64_t blocking_gc_count_last_window_;
1327 // The maximum number of buckets in the GC count rate histograms.
1328 static constexpr size_t kGcCountRateMaxBucketCount = 200;
1329 // The histogram of the number of GC invocations per window duration.
1330 Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1331 // The histogram of the number of blocking GC invocations per window duration.
1332 Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1333
Man Cao8c2ff642015-05-27 17:25:30 -07001334 // Allocation tracking support
1335 Atomic<bool> alloc_tracking_enabled_;
Mathieu Chartier458b1052016-03-29 14:02:55 -07001336 std::unique_ptr<AllocRecordObjectMap> allocation_records_;
Man Cao8c2ff642015-05-27 17:25:30 -07001337
Mathieu Chartier31000802015-06-14 14:14:37 -07001338 // GC stress related data structures.
1339 Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1340 // Debugging variables, seen backtraces vs unique backtraces.
1341 Atomic<uint64_t> seen_backtrace_count_;
1342 Atomic<uint64_t> unique_backtrace_count_;
1343 // Stack trace hashes that we already saw,
1344 std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
1345
Mathieu Chartier51168372015-08-12 16:40:32 -07001346 // We disable GC when we are shutting down the runtime in case there are daemon threads still
1347 // allocating.
1348 bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
1349
Jeff Haodcdc85b2015-12-04 14:06:18 -08001350 // Boot image spaces.
1351 std::vector<space::ImageSpace*> boot_image_spaces_;
Mathieu Chartier073b16c2015-11-10 14:13:23 -08001352
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001353 friend class CollectorTransitionTask;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001354 friend class collector::GarbageCollector;
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001355 friend class collector::MarkCompact;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001356 friend class collector::ConcurrentCopying;
Ian Rogers1d54e732013-05-02 21:10:01 -07001357 friend class collector::MarkSweep;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001358 friend class collector::SemiSpace;
Mathieu Chartier39e32612013-11-12 16:28:05 -08001359 friend class ReferenceQueue;
Mathieu Chartieraa516822015-10-02 15:53:37 -07001360 friend class ScopedGCCriticalSection;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001361 friend class VerifyReferenceCardVisitor;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001362 friend class VerifyReferenceVisitor;
1363 friend class VerifyObjectVisitor;
Ian Rogers30fab402012-01-23 15:43:46 -08001364
Carl Shapiro69759ea2011-07-21 18:13:35 -07001365 DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
1366};
1367
Ian Rogers1d54e732013-05-02 21:10:01 -07001368} // namespace gc
Carl Shapiro1fb86202011-06-27 17:43:13 -07001369} // namespace art
1370
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001371#endif // ART_RUNTIME_GC_HEAP_H_