blob: 86dab210089f0dd15afaf24e16c7c6d0a9014bbe [file] [log] [blame]
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro1fb86202011-06-27 17:43:13 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_HEAP_H_
18#define ART_RUNTIME_GC_HEAP_H_
Carl Shapiro1fb86202011-06-27 17:43:13 -070019
Elliott Hughesc967f782012-04-16 10:23:15 -070020#include <iosfwd>
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080021#include <string>
Carl Shapiro58551df2011-07-24 03:09:51 -070022#include <vector>
23
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070024#include "allocator_type.h"
Ian Rogersef7d42f2014-01-06 12:55:46 -080025#include "atomic.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080026#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070027#include "gc/accounting/atomic_stack.h"
28#include "gc/accounting/card_table.h"
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080029#include "gc/gc_cause.h"
Mathieu Chartier10fb83a2014-06-15 15:15:43 -070030#include "gc/collector/garbage_collector.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070031#include "gc/collector/gc_type.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080032#include "gc/collector_type.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070033#include "globals.h"
Ian Rogers30fab402012-01-23 15:43:46 -080034#include "gtest/gtest.h"
Narayan Kamath11d9f062014-04-23 20:24:57 +010035#include "instruction_set.h"
Mathieu Chartierc39e3422013-08-07 16:41:36 -070036#include "jni.h"
Mathieu Chartier83c8ee02014-01-28 14:50:23 -080037#include "object_callbacks.h"
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070038#include "offsets.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070039#include "reference_processor.h"
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070040#include "safe_map.h"
Mathieu Chartier02b6a782012-10-26 13:51:26 -070041#include "thread_pool.h"
Mathieu Chartier4e305412014-02-19 10:54:44 -080042#include "verify_object.h"
Carl Shapiro1fb86202011-06-27 17:43:13 -070043
44namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070045
Ian Rogers81d425b2012-09-27 16:03:43 -070046class ConditionVariable;
Ian Rogers81d425b2012-09-27 16:03:43 -070047class Mutex;
Ian Rogers40e3bac2012-11-20 00:09:14 -080048class StackVisitor;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070049class Thread;
Mathieu Chartier357e9be2012-08-01 11:00:14 -070050class TimingLogger;
Carl Shapiro69759ea2011-07-21 18:13:35 -070051
Ian Rogers1d54e732013-05-02 21:10:01 -070052namespace mirror {
53 class Class;
54 class Object;
55} // namespace mirror
56
57namespace gc {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070058
59class ReferenceProcessor;
60
Ian Rogers1d54e732013-05-02 21:10:01 -070061namespace accounting {
62 class HeapBitmap;
63 class ModUnionTable;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080064 class RememberedSet;
Ian Rogers1d54e732013-05-02 21:10:01 -070065} // namespace accounting
66
67namespace collector {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070068 class ConcurrentCopying;
Ian Rogers1d54e732013-05-02 21:10:01 -070069 class GarbageCollector;
Mathieu Chartier52e4b432014-06-10 11:22:31 -070070 class MarkCompact;
Ian Rogers1d54e732013-05-02 21:10:01 -070071 class MarkSweep;
Mathieu Chartier590fee92013-09-13 13:46:47 -070072 class SemiSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070073} // namespace collector
74
75namespace space {
76 class AllocSpace;
Mathieu Chartier590fee92013-09-13 13:46:47 -070077 class BumpPointerSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070078 class DiscontinuousSpace;
79 class DlMallocSpace;
80 class ImageSpace;
81 class LargeObjectSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070082 class MallocSpace;
83 class RosAllocSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070084 class Space;
85 class SpaceTest;
Mathieu Chartier590fee92013-09-13 13:46:47 -070086 class ContinuousMemMapAllocSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070087} // namespace space
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070088
Mathieu Chartierd22d5482012-11-06 17:14:12 -080089class AgeCardVisitor {
90 public:
Brian Carlstromdf629502013-07-17 22:39:56 -070091 byte operator()(byte card) const {
Ian Rogers1d54e732013-05-02 21:10:01 -070092 if (card == accounting::CardTable::kCardDirty) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -080093 return card - 1;
94 } else {
95 return 0;
96 }
97 }
98};
99
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700100// If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
Hiroshi Yamauchid31fb972013-11-19 11:08:27 -0800101static constexpr bool kUseRosAlloc = true;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700102
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800103// If true, use thread-local allocation stack.
104static constexpr bool kUseThreadLocalAllocationStack = true;
105
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800106// The process state passed in from the activity manager, used to determine when to do trimming
107// and compaction.
108enum ProcessState {
109 kProcessStateJankPerceptible = 0,
110 kProcessStateJankImperceptible = 1,
111};
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800112std::ostream& operator<<(std::ostream& os, const ProcessState& process_state);
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800113
Ian Rogers50b35e22012-10-04 10:09:15 -0700114class Heap {
Carl Shapiro1fb86202011-06-27 17:43:13 -0700115 public:
Mathieu Chartier590fee92013-09-13 13:46:47 -0700116 // If true, measure the total allocation time.
117 static constexpr bool kMeasureAllocationTime = false;
118 // Primitive arrays larger than this size are put in the large object space.
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800119 static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700120
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700121 static constexpr size_t kDefaultStartingSize = kPageSize;
Mathieu Chartier720ef762013-08-17 14:46:54 -0700122 static constexpr size_t kDefaultInitialSize = 2 * MB;
Mathieu Chartier2f4f0a32014-07-10 17:50:34 -0700123 static constexpr size_t kDefaultMaximumSize = 256 * MB;
Mathieu Chartier720ef762013-08-17 14:46:54 -0700124 static constexpr size_t kDefaultMaxFree = 2 * MB;
125 static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700126 static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
127 static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800128 static constexpr size_t kDefaultTLABSize = 256 * KB;
Mathieu Chartier720ef762013-08-17 14:46:54 -0700129 static constexpr double kDefaultTargetUtilization = 0.5;
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700130 static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
Mathieu Chartier0051be62012-10-12 17:47:11 -0700131
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700132 // Used so that we don't overflow the allocation time atomic integer.
Mathieu Chartier720ef762013-08-17 14:46:54 -0700133 static constexpr size_t kTimeAdjust = 1024;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700134
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700135 // How often we allow heap trimming to happen (nanoseconds).
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800136 static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700137 // How long we wait after a transition request to perform a collector transition (nanoseconds).
138 static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800139
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700140 // Create a heap with the requested sizes. The possible empty
141 // image_file_names names specify Spaces to load based on
142 // ImageWriter output.
Mathieu Chartier0051be62012-10-12 17:47:11 -0700143 explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700144 size_t max_free, double target_utilization,
145 double foreground_heap_growth_multiplier, size_t capacity,
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800146 const std::string& original_image_file_name,
Brian Carlstrom2afe4942014-05-19 10:25:33 -0700147 InstructionSet image_instruction_set,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700148 CollectorType foreground_collector_type, CollectorType background_collector_type,
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700149 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
Mathieu Chartierff3b24a2013-11-22 16:04:25 -0800150 size_t long_pause_threshold, size_t long_gc_threshold,
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700151 bool ignore_max_footprint, bool use_tlab,
152 bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
153 bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800154 bool verify_post_gc_rosalloc);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700155
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800156 ~Heap();
Brian Carlstroma7f4f482011-07-17 17:01:34 -0700157
Brian Carlstroma40f9bc2011-07-26 21:26:07 -0700158 // Allocates and initializes storage for an object instance.
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700159 template <bool kInstrumented, typename PreFenceVisitor>
Ian Rogers6fac4472014-02-25 17:01:10 -0800160 mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes,
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700161 const PreFenceVisitor& pre_fence_visitor)
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700162 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800163 return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
Ian Rogers6fac4472014-02-25 17:01:10 -0800164 GetCurrentAllocator(),
165 pre_fence_visitor);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700166 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800167
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700168 template <bool kInstrumented, typename PreFenceVisitor>
Ian Rogers6fac4472014-02-25 17:01:10 -0800169 mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes,
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700170 const PreFenceVisitor& pre_fence_visitor)
Mathieu Chartier590fee92013-09-13 13:46:47 -0700171 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800172 return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
Ian Rogers6fac4472014-02-25 17:01:10 -0800173 GetCurrentNonMovingAllocator(),
174 pre_fence_visitor);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700175 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800176
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700177 template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
Mathieu Chartier1febddf2013-11-20 12:33:14 -0800178 ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(
179 Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator,
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700180 const PreFenceVisitor& pre_fence_visitor)
Mathieu Chartier590fee92013-09-13 13:46:47 -0700181 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800182
183 AllocatorType GetCurrentAllocator() const {
184 return current_allocator_;
185 }
186
187 AllocatorType GetCurrentNonMovingAllocator() const {
188 return current_non_moving_allocator_;
189 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700190
Mathieu Chartier590fee92013-09-13 13:46:47 -0700191 // Visit all of the live objects in the heap.
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800192 void VisitObjects(ObjectCallback callback, void* arg)
Mathieu Chartier590fee92013-09-13 13:46:47 -0700193 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
194
Mathieu Chartierc645f1d2014-03-06 18:11:53 -0800195 void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700196 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Brian Carlstroma7f4f482011-07-17 17:01:34 -0700197
Ian Rogers1eb512d2013-10-18 15:42:20 -0700198 void RegisterNativeAllocation(JNIEnv* env, int bytes);
199 void RegisterNativeFree(JNIEnv* env, int bytes);
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700200
Mathieu Chartier50482232013-11-21 11:48:14 -0800201 // Change the allocator, updates entrypoints.
Mathieu Chartierd8891782014-03-02 13:28:37 -0800202 void ChangeAllocator(AllocatorType allocator)
203 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
204 LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
Mathieu Chartier50482232013-11-21 11:48:14 -0800205
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800206 // Transition the garbage collector during runtime, may copy objects from one space to another.
207 void TransitionCollector(CollectorType collector_type);
208
Mathieu Chartier0de9f732013-11-22 17:58:48 -0800209 // Change the collector to be one of the possible options (MS, CMS, SS).
Mathieu Chartierd8891782014-03-02 13:28:37 -0800210 void ChangeCollector(CollectorType collector_type)
211 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartier0de9f732013-11-22 17:58:48 -0800212
Ian Rogers04d7aa92013-03-16 14:29:17 -0700213 // The given reference is believed to be to an object in the Java heap, check the soundness of it.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800214 // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
215 // proper lock ordering for it.
216 void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
Ian Rogers408f79a2011-08-23 18:22:33 -0700217
Ian Rogers04d7aa92013-03-16 14:29:17 -0700218 // Check sanity of all live references.
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700219 void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -0700220 // Returns how many failures occured.
221 size_t VerifyHeapReferences(bool verify_referents = true)
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800222 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700223 bool VerifyMissingCardMarks()
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800224 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700225
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700226 // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
Elliott Hughesa2501992011-08-26 19:39:54 -0700227 // and doesn't abort on error, allowing the caller to report more
228 // meaningful diagnostics.
Mathieu Chartierd68ac702014-02-11 14:50:51 -0800229 bool IsValidObjectAddress(const mirror::Object* obj) const
230 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700231
Mathieu Chartierd68ac702014-02-11 14:50:51 -0800232 // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
233 // very slow.
234 bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
235 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800236
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700237 // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
238 // Requires the heap lock to be held.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800239 bool IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack = true,
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700240 bool search_live_stack = true, bool sorted = false)
Ian Rogersef7d42f2014-01-06 12:55:46 -0800241 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
Elliott Hughesa2501992011-08-26 19:39:54 -0700242
Mathieu Chartier590fee92013-09-13 13:46:47 -0700243 // Returns true if there is any chance that the object (obj) will move.
244 bool IsMovableObject(const mirror::Object* obj) const;
245
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800246 // Enables us to compacting GC until objects are released.
247 void IncrementDisableMovingGC(Thread* self);
248 void DecrementDisableMovingGC(Thread* self);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700249
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700250 // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
251 void ClearMarkedObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
252
Carl Shapiro69759ea2011-07-21 18:13:35 -0700253 // Initiates an explicit garbage collection.
Mathieu Chartier412c7fc2014-02-07 12:18:39 -0800254 void CollectGarbage(bool clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700255
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700256 // Does a concurrent GC, should only be called by the GC daemon thread
257 // through runtime.
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700258 void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700259
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800260 // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
261 // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800262 void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800263 uint64_t* counts)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700264 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
265 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes3b78c942013-01-15 17:35:41 -0800266 // Implements JDWP RT_Instances.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800267 void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
Elliott Hughes3b78c942013-01-15 17:35:41 -0800268 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
269 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes0cbaff52013-01-16 15:28:01 -0800270 // Implements JDWP OR_ReferringObjects.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800271 void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects)
Elliott Hughes0cbaff52013-01-16 15:28:01 -0800272 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
273 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700274
Ian Rogers3bb17a62012-01-27 23:56:44 -0800275 // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
276 // implement dalvik.system.VMRuntime.clearGrowthLimit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800277 void ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -0700278
Ian Rogers30fab402012-01-23 15:43:46 -0800279 // Target ideal heap utilization ratio, implements
280 // dalvik.system.VMRuntime.getTargetHeapUtilization.
Mathieu Chartier0051be62012-10-12 17:47:11 -0700281 double GetTargetHeapUtilization() const {
282 return target_utilization_;
283 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700284
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700285 // Data structure memory usage tracking.
286 void RegisterGCAllocation(size_t bytes);
287 void RegisterGCDeAllocation(size_t bytes);
288
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700289 // Set the heap's private space pointers to be the same as the space based on it's type. Public
290 // due to usage by tests.
291 void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700292 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700293 void AddSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
294 void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700295
Ian Rogers30fab402012-01-23 15:43:46 -0800296 // Set target ideal heap utilization ratio, implements
297 // dalvik.system.VMRuntime.setTargetHeapUtilization.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700298 void SetTargetHeapUtilization(float target);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800299
300 // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
301 // from the system. Doesn't allow the space to exceed its growth limit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800302 void SetIdealFootprint(size_t max_allowed_footprint);
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700303
Mathieu Chartier590fee92013-09-13 13:46:47 -0700304 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
305 // waited for.
Mathieu Chartier89a201e2014-05-02 10:27:26 -0700306 collector::GcType WaitForGcToComplete(GcCause cause, Thread* self)
307 LOCKS_EXCLUDED(gc_complete_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700308
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800309 // Update the heap's process state to a new value, may cause compaction to occur.
310 void UpdateProcessState(ProcessState process_state);
311
Ian Rogers1d54e732013-05-02 21:10:01 -0700312 const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const {
313 return continuous_spaces_;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800314 }
315
Ian Rogers1d54e732013-05-02 21:10:01 -0700316 const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
317 return discontinuous_spaces_;
Carl Shapiro58551df2011-07-24 03:09:51 -0700318 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700319
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700320 const collector::Iteration* GetCurrentGcIteration() const {
321 return &current_gc_iteration_;
322 }
323 collector::Iteration* GetCurrentGcIteration() {
324 return &current_gc_iteration_;
325 }
326
Ian Rogers04d7aa92013-03-16 14:29:17 -0700327 // Enable verification of object references when the runtime is sufficiently initialized.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800328 void EnableObjectValidation() {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800329 verify_object_mode_ = kVerifyObjectSupport;
330 if (verify_object_mode_ > kVerifyObjectModeDisabled) {
Ian Rogers04d7aa92013-03-16 14:29:17 -0700331 VerifyHeap();
332 }
Elliott Hughes85d15452011-09-16 17:33:01 -0700333 }
334
Ian Rogers04d7aa92013-03-16 14:29:17 -0700335 // Disable object reference verification for image writing.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800336 void DisableObjectValidation() {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800337 verify_object_mode_ = kVerifyObjectModeDisabled;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700338 }
339
Ian Rogers04d7aa92013-03-16 14:29:17 -0700340 // Other checks may be performed if we know the heap should be in a sane state.
Ian Rogers23435d02012-09-24 11:23:12 -0700341 bool IsObjectValidationEnabled() const {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800342 return verify_object_mode_ > kVerifyObjectModeDisabled;
Ian Rogers23435d02012-09-24 11:23:12 -0700343 }
344
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700345 // Returns true if low memory mode is enabled.
346 bool IsLowMemoryMode() const {
347 return low_memory_mode_;
348 }
349
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700350 // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
351 // Scales heap growth, min free, and max free.
352 double HeapGrowthMultiplier() const;
353
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800354 // Freed bytes can be negative in cases where we copy objects from a compacted space to a
355 // free-list backed space.
Mathieu Chartiere76e70f2014-05-02 16:35:37 -0700356 void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
Brian Carlstrom693267a2011-09-06 09:25:34 -0700357
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700358 // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
359 // The call is not needed if NULL is stored in the field.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800360 void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
361 const mirror::Object* /*new_value*/) {
Mathieu Chartiere35517a2012-10-30 18:49:55 -0700362 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700363 }
364
365 // Write barrier for array operations that update many field positions
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800366 void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/,
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700367 size_t /*length TODO: element_count or byte_count?*/) {
Mathieu Chartiere35517a2012-10-30 18:49:55 -0700368 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700369 }
370
Mathieu Chartier0732d592013-11-06 11:02:50 -0800371 void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
372 card_table_->MarkCard(obj);
373 }
374
Ian Rogers1d54e732013-05-02 21:10:01 -0700375 accounting::CardTable* GetCardTable() const {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700376 return card_table_.get();
Ian Rogers5d76c432011-10-31 21:42:49 -0700377 }
378
Mathieu Chartier8668c3c2014-04-24 16:48:11 -0700379 void AddFinalizerReference(Thread* self, mirror::Object** object);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700380
Ian Rogers1d54e732013-05-02 21:10:01 -0700381 // Returns the number of bytes currently allocated.
382 size_t GetBytesAllocated() const {
Ian Rogers3e5cf302014-05-20 16:40:37 -0700383 return num_bytes_allocated_.LoadSequentiallyConsistent();
Ian Rogers1d54e732013-05-02 21:10:01 -0700384 }
385
386 // Returns the number of objects currently allocated.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700387 size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700388
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700389 // Returns the total number of objects allocated since the heap was created.
Ian Rogers1d54e732013-05-02 21:10:01 -0700390 size_t GetObjectsAllocatedEver() const;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700391
392 // Returns the total number of bytes allocated since the heap was created.
Ian Rogers1d54e732013-05-02 21:10:01 -0700393 size_t GetBytesAllocatedEver() const;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700394
395 // Returns the total number of objects freed since the heap was created.
Ian Rogers1d54e732013-05-02 21:10:01 -0700396 size_t GetObjectsFreedEver() const {
397 return total_objects_freed_ever_;
398 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700399
400 // Returns the total number of bytes freed since the heap was created.
Ian Rogers1d54e732013-05-02 21:10:01 -0700401 size_t GetBytesFreedEver() const {
402 return total_bytes_freed_ever_;
403 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700404
Ian Rogers1d54e732013-05-02 21:10:01 -0700405 // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
406 // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
407 // were specified. Android apps start with a growth limit (small heap size) which is
408 // cleared/extended for large apps.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800409 size_t GetMaxMemory() const {
Ian Rogers1d54e732013-05-02 21:10:01 -0700410 return growth_limit_;
411 }
412
413 // Implements java.lang.Runtime.totalMemory, returning the amount of memory consumed by an
414 // application.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800415 size_t GetTotalMemory() const;
Ian Rogers1d54e732013-05-02 21:10:01 -0700416
417 // Implements java.lang.Runtime.freeMemory.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800418 size_t GetFreeMemory() const {
Mathieu Chartier146b1872014-06-27 09:57:12 -0700419 size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
420 // Make sure we don't get a negative number since the max allowed footprint is only updated
421 // after the GC. But we can still allocate even if bytes_allocated > max_allowed_footprint_.
422 return std::max(max_allowed_footprint_, byte_allocated) - byte_allocated;
Ian Rogers1d54e732013-05-02 21:10:01 -0700423 }
424
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700425 // get the space that corresponds to an object's address. Current implementation searches all
Ian Rogers1d54e732013-05-02 21:10:01 -0700426 // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
427 // TODO: consider using faster data structure like binary tree.
428 space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const;
429 space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
430 bool fail_ok) const;
431 space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700432
Mathieu Chartier037813d2012-08-23 16:44:59 -0700433 void DumpForSigQuit(std::ostream& os);
Elliott Hughesc967f782012-04-16 10:23:15 -0700434
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800435
436 // Do a pending heap transition or trim.
437 void DoPendingTransitionOrTrim() LOCKS_EXCLUDED(heap_trim_request_lock_);
438
Mathieu Chartier590fee92013-09-13 13:46:47 -0700439 // Trim the managed and native heaps by releasing unused memory back to the OS.
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800440 void Trim() LOCKS_EXCLUDED(heap_trim_request_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700441
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700442 void RevokeThreadLocalBuffers(Thread* thread);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700443 void RevokeRosAllocThreadLocalBuffers(Thread* thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700444 void RevokeAllThreadLocalBuffers();
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700445 void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700446 void RosAllocVerification(TimingLogger* timings, const char* name)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800447 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
448
Ian Rogers1d54e732013-05-02 21:10:01 -0700449 accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700450 return live_bitmap_.get();
451 }
452
Ian Rogers1d54e732013-05-02 21:10:01 -0700453 accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700454 return mark_bitmap_.get();
455 }
456
Ian Rogers1d54e732013-05-02 21:10:01 -0700457 accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800458 return live_stack_.get();
459 }
460
Mathieu Chartier590fee92013-09-13 13:46:47 -0700461 void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700462
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700463 // Mark and empty stack.
464 void FlushAllocStack()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700465 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700466
Hiroshi Yamauchi90d70682014-02-20 16:17:30 -0800467 // Revoke all the thread-local allocation stacks.
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800468 void RevokeAllThreadLocalAllocationStacks(Thread* self)
Mathieu Chartierd8891782014-03-02 13:28:37 -0800469 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
470 LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
Hiroshi Yamauchi90d70682014-02-20 16:17:30 -0800471
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700472 // Mark all the objects in the allocation stack in the specified bitmap.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700473 // TODO: Refactor?
474 void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
475 accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700476 accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
477 accounting::ObjectStack* stack)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700478 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700479
Mathieu Chartier590fee92013-09-13 13:46:47 -0700480 // Mark the specified allocation stack as live.
481 void MarkAllocStackAsLive(accounting::ObjectStack* stack)
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800482 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartier82353312013-07-18 10:47:51 -0700483
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800484 // Unbind any bound bitmaps.
485 void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
486
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700487 // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700488 // Assumes there is only one image space.
Ian Rogers1d54e732013-05-02 21:10:01 -0700489 space::ImageSpace* GetImageSpace() const;
490
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800491 // Permenantly disable compaction.
492 void DisableCompaction();
493
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800494 space::DlMallocSpace* GetDlMallocSpace() const {
495 return dlmalloc_space_;
496 }
497
498 space::RosAllocSpace* GetRosAllocSpace() const {
499 return rosalloc_space_;
500 }
501
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700502 space::MallocSpace* GetNonMovingSpace() const {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700503 return non_moving_space_;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700504 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700505
506 space::LargeObjectSpace* GetLargeObjectsSpace() const {
507 return large_object_space_;
508 }
509
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800510 // Returns the free list space that may contain movable objects (the
511 // one that's not the non-moving space), either rosalloc_space_ or
512 // dlmalloc_space_.
513 space::MallocSpace* GetPrimaryFreeListSpace() {
514 if (kUseRosAlloc) {
515 DCHECK(rosalloc_space_ != nullptr);
516 // reinterpret_cast is necessary as the space class hierarchy
517 // isn't known (#included) yet here.
518 return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
519 } else {
520 DCHECK(dlmalloc_space_ != nullptr);
521 return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
522 }
523 }
524
Mathieu Chartier590fee92013-09-13 13:46:47 -0700525 void DumpSpaces(std::ostream& stream = LOG(INFO));
Elliott Hughesf8349362012-06-18 15:00:06 -0700526
Mathieu Chartier15d34022014-02-26 17:16:38 -0800527 // Dump object should only be used by the signal handler.
528 void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
529 // Safe version of pretty type of which check to make sure objects are heap addresses.
530 std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
531 std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
532
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700533 // GC performance measuring
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700534 void DumpGcPerformanceInfo(std::ostream& os);
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700535
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700536 // Returns true if we currently care about pause times.
537 bool CareAboutPauseTimes() const {
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800538 return process_state_ == kProcessStateJankPerceptible;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700539 }
540
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700541 // Thread pool.
542 void CreateThreadPool();
543 void DeleteThreadPool();
544 ThreadPool* GetThreadPool() {
545 return thread_pool_.get();
546 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700547 size_t GetParallelGCThreadCount() const {
548 return parallel_gc_threads_;
549 }
550 size_t GetConcGCThreadCount() const {
551 return conc_gc_threads_;
552 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700553 accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
554 void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700555
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800556 accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
557 void AddRememberedSet(accounting::RememberedSet* remembered_set);
558 void RemoveRememberedSet(space::Space* space);
559
Mathieu Chartier590fee92013-09-13 13:46:47 -0700560 bool IsCompilingBoot() const;
Mathieu Chartier36bf2162014-03-20 15:40:37 -0700561 bool RunningOnValgrind() const {
562 return running_on_valgrind_;
563 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700564 bool HasImageSpace() const;
565
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700566 ReferenceProcessor* GetReferenceProcessor() {
567 return &reference_processor_;
568 }
569
Carl Shapiro58551df2011-07-24 03:09:51 -0700570 private:
Mathieu Chartier590fee92013-09-13 13:46:47 -0700571 void Compact(space::ContinuousMemMapAllocSpace* target_space,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700572 space::ContinuousMemMapAllocSpace* source_space)
573 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700574
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800575 void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
576
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800577 static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
578 return
579 allocator_type != kAllocatorTypeBumpPointer &&
580 allocator_type != kAllocatorTypeTLAB;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800581 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800582 static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
583 return AllocatorHasAllocationStack(allocator_type);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800584 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700585 static bool IsMovingGc(CollectorType collector_type) {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700586 return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS ||
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700587 collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC;
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -0800588 }
Ian Rogersef7d42f2014-01-06 12:55:46 -0800589 bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
590 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800591 ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
Mathieu Chartierf517f1a2014-03-06 15:52:27 -0800592 mirror::Object** obj)
593 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700594
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700595 accounting::ObjectStack* GetMarkStack() {
596 return mark_stack_.get();
597 }
598
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800599 // We don't force this to be inlined since it is a slow path.
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800600 template <bool kInstrumented, typename PreFenceVisitor>
601 mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count,
602 const PreFenceVisitor& pre_fence_visitor)
603 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
604
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700605 // Handles Allocate()'s slow allocation path with GC involved after
606 // an initial allocation attempt failed.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800607 mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes,
Ian Rogers6fac4472014-02-25 17:01:10 -0800608 size_t* bytes_allocated, size_t* usable_size,
609 mirror::Class** klass)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700610 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
611 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartiera6399032012-06-11 18:49:50 -0700612
Mathieu Chartier590fee92013-09-13 13:46:47 -0700613 // Allocate into a specific space.
614 mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c,
615 size_t bytes)
616 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
617
Mathieu Chartier31f44142014-04-08 14:40:03 -0700618 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
619 // wrong space.
620 void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
621
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800622 // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
623 // that the switch statement is constant optimized in the entrypoints.
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800624 template <const bool kInstrumented, const bool kGrow>
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800625 ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type,
Ian Rogers6fac4472014-02-25 17:01:10 -0800626 size_t alloc_size, size_t* bytes_allocated,
627 size_t* usable_size)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700628 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
629
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700630 void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700631 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800632
633 template <bool kGrow>
634 bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700635
Mathieu Chartier15d34022014-02-26 17:16:38 -0800636 // Returns true if the address passed in is within the address range of a continuous space.
637 bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
638 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
639
Mathieu Chartier590fee92013-09-13 13:46:47 -0700640 // Run the finalizers.
641 void RunFinalization(JNIEnv* env);
642
643 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
644 // waited for.
Mathieu Chartier89a201e2014-05-02 10:27:26 -0700645 collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
Mathieu Chartier590fee92013-09-13 13:46:47 -0700646 EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
647
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700648 void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800649 LOCKS_EXCLUDED(heap_trim_request_lock_);
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700650 void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700651 void RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj)
652 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
653 void RequestConcurrentGC(Thread* self)
654 LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700655 bool IsGCRequestPending() const;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800656
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700657 // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
658 // which type of Gc was actually ran.
Ian Rogers1d54e732013-05-02 21:10:01 -0700659 collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause,
660 bool clear_soft_references)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700661 LOCKS_EXCLUDED(gc_complete_lock_,
Ian Rogersb726dcb2012-09-05 08:57:23 -0700662 Locks::heap_bitmap_lock_,
Ian Rogersb726dcb2012-09-05 08:57:23 -0700663 Locks::thread_suspend_count_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800664
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700665 void PreGcVerification(collector::GarbageCollector* gc)
666 LOCKS_EXCLUDED(Locks::mutator_lock_);
667 void PreGcVerificationPaused(collector::GarbageCollector* gc)
668 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
669 void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
670 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700671 void PreSweepingGcVerification(collector::GarbageCollector* gc)
Mathieu Chartierad2541a2013-10-25 10:05:23 -0700672 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
673 void PostGcVerification(collector::GarbageCollector* gc)
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700674 LOCKS_EXCLUDED(Locks::mutator_lock_);
675 void PostGcVerificationPaused(collector::GarbageCollector* gc)
676 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700677
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700678 // Update the watermark for the native allocated bytes based on the current number of native
679 // bytes allocated and the target utilization ratio.
680 void UpdateMaxNativeFootprint();
681
Mathieu Chartierafe49982014-03-27 10:55:04 -0700682 // Find a collector based on GC type.
683 collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
684
Mathieu Chartier31f44142014-04-08 14:40:03 -0700685 // Create the main free list space, typically either a RosAlloc space or DlMalloc space.
686 void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
687 size_t capacity);
688
Ian Rogers3bb17a62012-01-27 23:56:44 -0800689 // Given the current contents of the alloc space, increase the allowed heap footprint to match
690 // the target utilization ratio. This should only be called immediately after a full garbage
691 // collection.
Mathieu Chartierafe49982014-03-27 10:55:04 -0700692 void GrowForUtilization(collector::GarbageCollector* collector_ran);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700693
Mathieu Chartier637e3482012-08-17 10:41:32 -0700694 size_t GetPercentFree();
Elliott Hughesc967f782012-04-16 10:23:15 -0700695
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800696 static void VerificationCallback(mirror::Object* obj, void* arg)
Ian Rogers719d1a32014-03-06 12:13:39 -0800697 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700698
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700699 // Swap the allocation stack with the live stack.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800700 void SwapStacks(Thread* self);
701
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700702 // Clear cards and update the mod union table.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700703 void ProcessCards(TimingLogger* timings, bool use_rem_sets);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700704
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800705 // Signal the heap trim daemon that there is something to do, either a heap transition or heap
706 // trim.
707 void SignalHeapTrimDaemon(Thread* self);
708
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800709 // Push an object onto the allocation stack.
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700710 void PushOnAllocationStack(Thread* self, mirror::Object** obj)
711 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartierc1790162014-05-23 10:54:50 -0700712 void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
713 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
714 void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
715 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800716
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -0700717 // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
718 // sweep GC, false for other GC types.
719 bool IsGcConcurrent() const ALWAYS_INLINE {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700720 return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC;
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -0700721 }
722
Ian Rogers1d54e732013-05-02 21:10:01 -0700723 // All-known continuous spaces, where objects lie within fixed bounds.
724 std::vector<space::ContinuousSpace*> continuous_spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700725
Ian Rogers1d54e732013-05-02 21:10:01 -0700726 // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
727 std::vector<space::DiscontinuousSpace*> discontinuous_spaces_;
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700728
Mathieu Chartier590fee92013-09-13 13:46:47 -0700729 // All-known alloc spaces, where objects may be or have been allocated.
730 std::vector<space::AllocSpace*> alloc_spaces_;
731
732 // A space where non-movable objects are allocated, when compaction is enabled it contains
733 // Classes, ArtMethods, ArtFields, and non moving objects.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700734 space::MallocSpace* non_moving_space_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700735
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800736 // Space which we use for the kAllocatorTypeROSAlloc.
737 space::RosAllocSpace* rosalloc_space_;
738
739 // Space which we use for the kAllocatorTypeDlMalloc.
740 space::DlMallocSpace* dlmalloc_space_;
741
Mathieu Chartierfc5b5282014-01-09 16:15:36 -0800742 // The main space is the space which the GC copies to and from on process state updates. This
743 // space is typically either the dlmalloc_space_ or the rosalloc_space_.
744 space::MallocSpace* main_space_;
745
Ian Rogers1d54e732013-05-02 21:10:01 -0700746 // The large object space we are currently allocating into.
747 space::LargeObjectSpace* large_object_space_;
748
749 // The card table, dirtied by the write barrier.
Ian Rogers700a4022014-05-19 16:49:03 -0700750 std::unique_ptr<accounting::CardTable> card_table_;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700751
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700752 // A mod-union table remembers all of the references from the it's space to other spaces.
753 SafeMap<space::Space*, accounting::ModUnionTable*> mod_union_tables_;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700754
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800755 // A remembered set remembers all of the references from the it's space to the target space.
756 SafeMap<space::Space*, accounting::RememberedSet*> remembered_sets_;
757
Mathieu Chartier0de9f732013-11-22 17:58:48 -0800758 // The current collector type.
759 CollectorType collector_type_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700760 // Which collector we use when the app is in the foreground.
761 CollectorType foreground_collector_type_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800762 // Which collector we will use when the app is notified of a transition to background.
763 CollectorType background_collector_type_;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800764 // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
765 CollectorType desired_collector_type_;
766
767 // Lock which guards heap trim requests.
768 Mutex* heap_trim_request_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
769 // When we want to perform the next heap trim (nano seconds).
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700770 uint64_t last_trim_time_ GUARDED_BY(heap_trim_request_lock_);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700771 // When we want to perform the next heap transition (nano seconds) or heap trim.
772 uint64_t heap_transition_or_trim_target_time_ GUARDED_BY(heap_trim_request_lock_);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800773 // If we have a heap trim request pending.
774 bool heap_trim_request_pending_ GUARDED_BY(heap_trim_request_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700775
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700776 // How many GC threads we may use for paused parts of garbage collection.
777 const size_t parallel_gc_threads_;
778
779 // How many GC threads we may use for unpaused parts of garbage collection.
780 const size_t conc_gc_threads_;
Mathieu Chartier63a54342013-07-23 13:17:59 -0700781
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700782 // Boolean for if we are in low memory mode.
783 const bool low_memory_mode_;
784
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700785 // If we get a pause longer than long pause log threshold, then we print out the GC after it
786 // finishes.
787 const size_t long_pause_log_threshold_;
788
789 // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
790 const size_t long_gc_log_threshold_;
791
792 // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
793 // useful for benchmarking since it reduces time spent in GC to a low %.
794 const bool ignore_max_footprint_;
795
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -0700796 // Lock which guards zygote space creation.
797 Mutex zygote_creation_lock_;
798
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700799 // If we have a zygote space.
800 bool have_zygote_space_;
801
Mathieu Chartierbd0a6532014-02-27 11:14:21 -0800802 // Minimum allocation size of large object.
803 size_t large_object_threshold_;
804
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700805 // Guards access to the state of GC, associated conditional variable is used to signal when a GC
806 // completes.
807 Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Ian Rogers700a4022014-05-19 16:49:03 -0700808 std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700809
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700810 // Reference processor;
811 ReferenceProcessor reference_processor_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700812
Carl Shapiro58551df2011-07-24 03:09:51 -0700813 // True while the garbage collector is running.
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -0800814 volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700815
816 // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
Ian Rogers1d54e732013-05-02 21:10:01 -0700817 volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -0700818 collector::GcType next_gc_type_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700819
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700820 // Maximum size that the heap can reach.
Ian Rogers1d54e732013-05-02 21:10:01 -0700821 const size_t capacity_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700822
Ian Rogers1d54e732013-05-02 21:10:01 -0700823 // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
824 // programs it is "cleared" making it the same as capacity.
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700825 size_t growth_limit_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700826
Ian Rogers1d54e732013-05-02 21:10:01 -0700827 // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
828 // a GC should be triggered.
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700829 size_t max_allowed_footprint_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700830
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700831 // The watermark at which a concurrent GC is requested by registerNativeAllocation.
832 size_t native_footprint_gc_watermark_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700833
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700834 // The watermark at which a GC is performed inside of registerNativeAllocation.
835 size_t native_footprint_limit_;
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700836
Mathieu Chartier590fee92013-09-13 13:46:47 -0700837 // Whether or not we need to run finalizers in the next native allocation.
838 bool native_need_to_run_finalization_;
839
Mathieu Chartierc39e3422013-08-07 16:41:36 -0700840 // Whether or not we currently care about pause times.
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800841 ProcessState process_state_;
Mathieu Chartierc39e3422013-08-07 16:41:36 -0700842
Ian Rogers1d54e732013-05-02 21:10:01 -0700843 // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
844 // it completes ahead of an allocation failing.
Mathieu Chartier0051be62012-10-12 17:47:11 -0700845 size_t concurrent_start_bytes_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700846
Ian Rogers1d54e732013-05-02 21:10:01 -0700847 // Since the heap was created, how many bytes have been freed.
848 size_t total_bytes_freed_ever_;
849
850 // Since the heap was created, how many objects have been freed.
851 size_t total_objects_freed_ever_;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700852
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700853 // Number of bytes allocated. Adjusted after each allocation and free.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800854 Atomic<size_t> num_bytes_allocated_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700855
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700856 // Bytes which are allocated and managed by native code but still need to be accounted for.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800857 Atomic<size_t> native_bytes_allocated_;
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700858
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700859 // Data structure GC overhead.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800860 Atomic<size_t> gc_memory_overhead_;
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700861
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700862 // Info related to the current or previous GC iteration.
863 collector::Iteration current_gc_iteration_;
864
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700865 // Heap verification flags.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700866 const bool verify_missing_card_marks_;
867 const bool verify_system_weaks_;
868 const bool verify_pre_gc_heap_;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700869 const bool verify_pre_sweeping_heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700870 const bool verify_post_gc_heap_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700871 const bool verify_mod_union_table_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800872 bool verify_pre_gc_rosalloc_;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700873 bool verify_pre_sweeping_rosalloc_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800874 bool verify_post_gc_rosalloc_;
875
876 // RAII that temporarily disables the rosalloc verification during
877 // the zygote fork.
878 class ScopedDisableRosAllocVerification {
879 private:
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700880 Heap* const heap_;
881 const bool orig_verify_pre_gc_;
882 const bool orig_verify_pre_sweeping_;
883 const bool orig_verify_post_gc_;
884
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800885 public:
886 explicit ScopedDisableRosAllocVerification(Heap* heap)
887 : heap_(heap),
888 orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700889 orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800890 orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
891 heap_->verify_pre_gc_rosalloc_ = false;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700892 heap_->verify_pre_sweeping_rosalloc_ = false;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800893 heap_->verify_post_gc_rosalloc_ = false;
894 }
895 ~ScopedDisableRosAllocVerification() {
896 heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700897 heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800898 heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
899 }
900 };
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700901
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700902 // Parallel GC data structures.
Ian Rogers700a4022014-05-19 16:49:03 -0700903 std::unique_ptr<ThreadPool> thread_pool_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700904
Ian Rogers1d54e732013-05-02 21:10:01 -0700905 // The nanosecond time at which the last GC ended.
906 uint64_t last_gc_time_ns_;
Mathieu Chartier65db8802012-11-20 12:36:46 -0800907
908 // How many bytes were allocated at the end of the last GC.
909 uint64_t last_gc_size_;
910
Ian Rogers1d54e732013-05-02 21:10:01 -0700911 // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
912 // and the start of the current one.
Mathieu Chartier65db8802012-11-20 12:36:46 -0800913 uint64_t allocation_rate_;
914
Ian Rogers1d54e732013-05-02 21:10:01 -0700915 // For a GC cycle, a bitmap that is set corresponding to the
Ian Rogers700a4022014-05-19 16:49:03 -0700916 std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
917 std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700918
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700919 // Mark stack that we reuse to avoid re-allocating the mark stack.
Ian Rogers700a4022014-05-19 16:49:03 -0700920 std::unique_ptr<accounting::ObjectStack> mark_stack_;
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700921
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700922 // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
923 // to use the live bitmap as the old mark bitmap.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700924 const size_t max_allocation_stack_size_;
Ian Rogers700a4022014-05-19 16:49:03 -0700925 std::unique_ptr<accounting::ObjectStack> allocation_stack_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700926
927 // Second allocation stack so that we can process allocation with the heap unlocked.
Ian Rogers700a4022014-05-19 16:49:03 -0700928 std::unique_ptr<accounting::ObjectStack> live_stack_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700929
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800930 // Allocator type.
Mathieu Chartier50482232013-11-21 11:48:14 -0800931 AllocatorType current_allocator_;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800932 const AllocatorType current_non_moving_allocator_;
933
934 // Which GCs we run in order when we an allocation fails.
935 std::vector<collector::GcType> gc_plan_;
936
Mathieu Chartier590fee92013-09-13 13:46:47 -0700937 // Bump pointer spaces.
938 space::BumpPointerSpace* bump_pointer_space_;
939 // Temp space is the space which the semispace collector copies to.
940 space::BumpPointerSpace* temp_space_;
941
Mathieu Chartier0051be62012-10-12 17:47:11 -0700942 // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
943 // utilization, regardless of target utilization ratio.
944 size_t min_free_;
945
946 // The ideal maximum free size, when we grow the heap for utilization.
947 size_t max_free_;
948
Brian Carlstrom395520e2011-09-25 19:35:00 -0700949 // Target ideal heap utilization ratio
Mathieu Chartier0051be62012-10-12 17:47:11 -0700950 double target_utilization_;
Brian Carlstrom395520e2011-09-25 19:35:00 -0700951
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700952 // How much more we grow the heap when we are a foreground app instead of background.
953 double foreground_heap_growth_multiplier_;
954
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700955 // Total time which mutators are paused or waiting for GC to complete.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700956 uint64_t total_wait_time_;
957
958 // Total number of objects allocated in microseconds.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700959 AtomicInteger total_allocation_time_;
960
Ian Rogers04d7aa92013-03-16 14:29:17 -0700961 // The current state of heap verification, may be enabled or disabled.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800962 VerifyObjectMode verify_object_mode_;
Ian Rogers04d7aa92013-03-16 14:29:17 -0700963
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800964 // Compacting GC disable count, prevents compacting GC from running iff > 0.
965 size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700966
967 std::vector<collector::GarbageCollector*> garbage_collectors_;
968 collector::SemiSpace* semi_space_collector_;
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700969 collector::MarkCompact* mark_compact_collector_;
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700970 collector::ConcurrentCopying* concurrent_copying_collector_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700971
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700972 const bool running_on_valgrind_;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800973 const bool use_tlab_;
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700974
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700975 friend class collector::GarbageCollector;
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700976 friend class collector::MarkCompact;
Ian Rogers1d54e732013-05-02 21:10:01 -0700977 friend class collector::MarkSweep;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700978 friend class collector::SemiSpace;
Mathieu Chartier39e32612013-11-12 16:28:05 -0800979 friend class ReferenceQueue;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700980 friend class VerifyReferenceCardVisitor;
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700981 friend class VerifyReferenceVisitor;
982 friend class VerifyObjectVisitor;
Mathieu Chartier7c88c602014-07-08 17:46:19 -0700983 friend class ScopedHeapFill;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700984 friend class ScopedHeapLock;
Ian Rogers1d54e732013-05-02 21:10:01 -0700985 friend class space::SpaceTest;
Ian Rogers30fab402012-01-23 15:43:46 -0800986
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700987 class AllocationTimer {
988 private:
989 Heap* heap_;
990 mirror::Object** allocated_obj_ptr_;
991 uint64_t allocation_start_time_;
992 public:
993 AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr);
994 ~AllocationTimer();
995 };
996
Carl Shapiro69759ea2011-07-21 18:13:35 -0700997 DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
998};
999
Mathieu Chartier7c88c602014-07-08 17:46:19 -07001000// ScopedHeapFill changes the bytes allocated counter to be equal to the growth limit. This
1001// causes the next allocation to perform a GC and possibly an OOM. It can be used to ensure that a
1002// GC happens in specific methods such as ThrowIllegalMonitorStateExceptionF in Monitor::Wait.
1003class ScopedHeapFill {
1004 public:
1005 explicit ScopedHeapFill(Heap* heap)
1006 : heap_(heap),
1007 delta_(heap_->GetMaxMemory() - heap_->GetBytesAllocated()) {
1008 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(delta_);
1009 }
1010 ~ScopedHeapFill() {
1011 heap_->num_bytes_allocated_.FetchAndSubSequentiallyConsistent(delta_);
1012 }
1013
1014 private:
1015 Heap* const heap_;
1016 const int64_t delta_;
1017};
1018
Ian Rogers1d54e732013-05-02 21:10:01 -07001019} // namespace gc
Carl Shapiro1fb86202011-06-27 17:43:13 -07001020} // namespace art
1021
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001022#endif // ART_RUNTIME_GC_HEAP_H_