Clean-up heap spaces.
We now use the CMS collector instead of the semispace collector when
the phone is booting. We still perform compaction during the zygote
space creation. This reduces time spent in GC by ~2s during boot
and doesn't affect zygote space size.
Changed the space creation logic to create the temp space when a
background transition occurs.
Added a flag to each space which is true if you are allowed to
move objects that are within this space.
Removed SwapSemiSpaces call from the semi space collector, it is now
the job of the caller to do this with threads suspended. This
simplifies the logic in the zygote compaction / heap transition code
since these do not copy from one semispace to another.
Added Space::Clear to RosAllocSpace and DlMallocSpace. This greatly
simplifies the code used for collector transitions.
Time spent in GC creating zygote space:
Before: 3.4s, After: 1.28s
No change in zygote space size.
Bug: 13878055
Change-Id: I700348ab7d5bf3aa537c0cd70c0fed09aa4b0623
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index a8989ec..912cf7d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -150,7 +150,7 @@
explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
size_t max_free, double target_utilization, size_t capacity,
const std::string& original_image_file_name,
- CollectorType post_zygote_collector_type, CollectorType background_collector_type,
+ CollectorType foreground_collector_type, CollectorType background_collector_type,
size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
size_t long_pause_threshold, size_t long_gc_threshold,
bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap,
@@ -196,8 +196,6 @@
void VisitObjects(ObjectCallback callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
-
void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ThrowOutOfMemoryError(size_t byte_count, bool large_object_allocation);
@@ -249,10 +247,6 @@
// Returns true if there is any chance that the object (obj) will move.
bool IsMovableObject(const mirror::Object* obj) const;
- // Returns true if an object is in the temp space, if this happens its usually indicative of
- // compaction related errors.
- bool IsInTempSpace(const mirror::Object* obj) const;
-
// Enables us to compacting GC until objects are released.
void IncrementDisableMovingGC(Thread* self);
void DecrementDisableMovingGC(Thread* self);
@@ -568,7 +562,8 @@
private:
void Compact(space::ContinuousMemMapAllocSpace* target_space,
- space::ContinuousMemMapAllocSpace* source_space);
+ space::ContinuousMemMapAllocSpace* source_space)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
@@ -580,7 +575,7 @@
static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
return AllocatorHasAllocationStack(allocator_type);
}
- static bool IsCompactingGC(CollectorType collector_type) {
+ static bool IsMovingGc(CollectorType collector_type) {
return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS ||
collector_type == kCollectorTypeCC;
}
@@ -609,6 +604,10 @@
size_t bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
+ // wrong space.
+ void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
// that the switch statement is constant optimized in the entrypoints.
template <const bool kInstrumented, const bool kGrow>
@@ -668,6 +667,10 @@
// Find a collector based on GC type.
collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
+ // Create the main free list space, typically either a RosAlloc space or DlMalloc space.
+ void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
+ size_t capacity);
+
// Given the current contents of the alloc space, increase the allowed heap footprint to match
// the target utilization ratio. This should only be called immediately after a full garbage
// collection.
@@ -737,17 +740,10 @@
// A remembered set remembers all of the references from the it's space to the target space.
SafeMap<space::Space*, accounting::RememberedSet*> remembered_sets_;
- // Keep the free list allocator mem map lying around when we transition to background so that we
- // don't have to worry about virtual address space fragmentation.
- UniquePtr<MemMap> allocator_mem_map_;
-
- // The mem-map which we will use for the non-moving space after the zygote is done forking:
- UniquePtr<MemMap> post_zygote_non_moving_space_mem_map_;
-
// The current collector type.
CollectorType collector_type_;
- // Which collector we will switch to after zygote fork.
- CollectorType post_zygote_collector_type_;
+ // Which collector we use when the app is in the foreground.
+ CollectorType foreground_collector_type_;
// Which collector we will use when the app is notified of a transition to background.
CollectorType background_collector_type_;
// Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.