blob: 1fb4703971b9eb2b060db16813835bcbba7425a4 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
18#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
19
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080020#include "barrier.h"
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070021#include "garbage_collector.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080022#include "immune_region.h"
23#include "jni.h"
24#include "object_callbacks.h"
25#include "offsets.h"
26#include "gc/accounting/atomic_stack.h"
27#include "gc/accounting/read_barrier_table.h"
28#include "gc/accounting/space_bitmap.h"
29#include "mirror/object.h"
30#include "mirror/object_reference.h"
31#include "safe_map.h"
32
33#include <unordered_map>
34#include <vector>
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070035
36namespace art {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080037class RootInfo;
38
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070039namespace gc {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080040
41namespace accounting {
42 typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
43 class HeapBitmap;
44} // namespace accounting
45
46namespace space {
47 class RegionSpace;
48} // namespace space
49
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070050namespace collector {
51
52class ConcurrentCopying : public GarbageCollector {
53 public:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080054 // TODO: disable thse flags for production use.
55 // Enable the no-from-space-refs verification at the pause.
56 static constexpr bool kEnableNoFromSpaceRefsVerification = true;
57 // Enable the from-space bytes/objects check.
58 static constexpr bool kEnableFromSpaceAccountingCheck = true;
59 // Enable verbose mode.
60 static constexpr bool kVerboseMode = true;
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070061
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080062 ConcurrentCopying(Heap* heap, const std::string& name_prefix = "");
63 ~ConcurrentCopying();
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070064
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080065 virtual void RunPhases() OVERRIDE;
66 void InitializePhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
67 void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
68 void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
69 void FinishPhase();
70
71 void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
72 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070073 virtual GcType GetGcType() const OVERRIDE {
74 return kGcTypePartial;
75 }
76 virtual CollectorType GetCollectorType() const OVERRIDE {
77 return kCollectorTypeCC;
78 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080079 virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
80 void SetRegionSpace(space::RegionSpace* region_space) {
81 DCHECK(region_space != nullptr);
82 region_space_ = region_space;
83 }
84 space::RegionSpace* RegionSpace() {
85 return region_space_;
86 }
87 void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
88 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -070089 void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
90 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080091 bool IsInToSpace(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
92 DCHECK(ref != nullptr);
93 return IsMarked(ref) == ref;
94 }
95 mirror::Object* Mark(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
96 bool IsMarking() const {
97 return is_marking_;
98 }
99 bool IsActive() const {
100 return is_active_;
101 }
102 Barrier& GetBarrier() {
103 return *gc_barrier_;
104 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700105 bool IsWeakRefAccessEnabled() {
106 return weak_ref_access_enabled_.LoadRelaxed();
107 }
108 void RevokeThreadLocalMarkStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700109
110 private:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800111 void PushOntoMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
112 mirror::Object* Copy(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
113 void Scan(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
114 void Process(mirror::Object* obj, MemberOffset offset)
115 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700116 virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
117 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
118 virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
119 const RootInfo& info)
120 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800121 void VerifyNoFromSpaceReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
122 accounting::ObjectStack* GetAllocationStack();
123 accounting::ObjectStack* GetLiveStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700124 void ProcessMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
125 bool ProcessMarkStackOnce() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
126 void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
127 size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access)
128 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
129 void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access)
130 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
131 void SwitchToSharedMarkStackMode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
132 void SwitchToGcExclusiveMarkStackMode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800133 void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
134 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700135 void ProcessReferences(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800136 mirror::Object* IsMarked(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
137 static mirror::Object* MarkCallback(mirror::Object* from_ref, void* arg)
138 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
139 static mirror::Object* IsMarkedCallback(mirror::Object* from_ref, void* arg)
140 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
141 static bool IsHeapReferenceMarkedCallback(
142 mirror::HeapReference<mirror::Object>* field, void* arg)
143 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
144 static void ProcessMarkStackCallback(void* arg)
145 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
146 void SweepSystemWeaks(Thread* self)
147 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
148 void Sweep(bool swap_bitmaps)
149 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
150 void SweepLargeObjects(bool swap_bitmaps)
151 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
152 void ClearBlackPtrs()
153 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
154 void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
155 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
156 mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
157 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700158 void CheckEmptyMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800159 void IssueEmptyCheckpoint() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
160 bool IsOnAllocStack(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
161 mirror::Object* GetFwdPtr(mirror::Object* from_ref)
162 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Andreas Gampe65b798e2015-04-06 09:35:22 -0700163 void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800164 void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800165 void RecordLiveStackFreezeSize(Thread* self);
166 void ComputeUnevacFromSpaceLiveRatio();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700167 void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
168 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
169 void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
170 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700171 void ReenableWeakRefAccess(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800172
173 space::RegionSpace* region_space_; // The underlying region space.
174 std::unique_ptr<Barrier> gc_barrier_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700175 std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
176 Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
177 std::vector<accounting::ObjectStack*> revoked_mark_stacks_
178 GUARDED_BY(mark_stack_lock_);
179 static constexpr size_t kMarkStackSize = kPageSize;
180 static constexpr size_t kMarkStackPoolSize = 256;
181 std::vector<accounting::ObjectStack*> pooled_mark_stacks_
182 GUARDED_BY(mark_stack_lock_);
183 Thread* thread_running_gc_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800184 bool is_marking_; // True while marking is ongoing.
185 bool is_active_; // True while the collection is ongoing.
186 bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant.
187 ImmuneRegion immune_region_;
188 std::unique_ptr<accounting::HeapBitmap> cc_heap_bitmap_;
189 std::vector<accounting::SpaceBitmap<kObjectAlignment>*> cc_bitmaps_;
190 accounting::SpaceBitmap<kObjectAlignment>* region_space_bitmap_;
191 // A cache of Heap::GetMarkBitmap().
192 accounting::HeapBitmap* heap_mark_bitmap_;
193 size_t live_stack_freeze_size_;
194 size_t from_space_num_objects_at_first_pause_;
195 size_t from_space_num_bytes_at_first_pause_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700196 Atomic<int> is_mark_stack_push_disallowed_;
197 enum MarkStackMode {
198 kMarkStackModeOff = 0, // Mark stack is off.
199 kMarkStackModeThreadLocal, // All threads except for the GC-running thread push refs onto
200 // thread-local mark stacks. The GC-running thread pushes onto and
201 // pops off the GC mark stack without a lock.
202 kMarkStackModeShared, // All threads share the GC mark stack with a lock.
203 kMarkStackModeGcExclusive // The GC-running thread pushes onto and pops from the GC mark stack
204 // without a lock. Other threads won't access the mark stack.
205 };
206 Atomic<MarkStackMode> mark_stack_mode_;
207 Atomic<bool> weak_ref_access_enabled_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800208
209 // How many objects and bytes we moved. Used for accounting.
210 Atomic<size_t> bytes_moved_;
211 Atomic<size_t> objects_moved_;
212
213 // The skipped blocks are memory blocks/chucks that were copies of
214 // objects that were unused due to lost races (cas failures) at
215 // object copy/forward pointer install. They are reused.
216 Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
217 std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
218 Atomic<size_t> to_space_bytes_skipped_;
219 Atomic<size_t> to_space_objects_skipped_;
220
221 accounting::ReadBarrierTable* rb_table_;
222 bool force_evacuate_all_; // True if all regions are evacuated.
223
224 friend class ConcurrentCopyingRefFieldsVisitor;
225 friend class ConcurrentCopyingImmuneSpaceObjVisitor;
226 friend class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor;
227 friend class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor;
228 friend class ConcurrentCopyingClearBlackPtrsVisitor;
229 friend class ConcurrentCopyingLostCopyVisitor;
230 friend class ThreadFlipVisitor;
231 friend class FlipCallback;
232 friend class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700233 friend class RevokeThreadLocalMarkStackCheckpoint;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800234
Mathieu Chartier3130cdf2015-05-03 15:20:23 -0700235 DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700236};
237
238} // namespace collector
239} // namespace gc
240} // namespace art
241
242#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_