blob: 08bfbc485ded012444dfb48762bc407678fa6e4b [file] [log] [blame]
Mathieu Chartier590fee92013-09-13 13:46:47 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
18#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
19
Ian Rogersef7d42f2014-01-06 12:55:46 -080020#include "atomic.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070021#include "base/macros.h"
22#include "base/mutex.h"
23#include "garbage_collector.h"
Mathieu Chartier8d562102014-03-12 17:42:10 -070024#include "immune_region.h"
Mathieu Chartier83c8ee02014-01-28 14:50:23 -080025#include "object_callbacks.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070026#include "offsets.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070027#include "UniquePtr.h"
28
29namespace art {
30
31namespace mirror {
32 class Class;
33 class Object;
34 template<class T> class ObjectArray;
35} // namespace mirror
36
37class StackVisitor;
38class Thread;
39
40namespace gc {
41
42namespace accounting {
43 template <typename T> class AtomicStack;
44 class MarkIfReachesAllocspaceVisitor;
45 class ModUnionClearCardVisitor;
46 class ModUnionVisitor;
47 class ModUnionTableBitmap;
48 class MarkStackChunk;
49 typedef AtomicStack<mirror::Object*> ObjectStack;
50 class SpaceBitmap;
51} // namespace accounting
52
53namespace space {
54 class BumpPointerSpace;
55 class ContinuousMemMapAllocSpace;
56 class ContinuousSpace;
Mathieu Chartier85a43c02014-01-07 17:59:00 -080057 class MallocSpace;
Mathieu Chartier590fee92013-09-13 13:46:47 -070058} // namespace space
59
60class Heap;
61
62namespace collector {
63
64class SemiSpace : public GarbageCollector {
65 public:
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080066 // If true, use remembered sets in the generational mode.
67 static constexpr bool kUseRememberedSet = true;
68
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080069 explicit SemiSpace(Heap* heap, bool generational = false,
70 const std::string& name_prefix = "");
Mathieu Chartier590fee92013-09-13 13:46:47 -070071
72 ~SemiSpace() {}
73
74 virtual void InitializePhase();
75 virtual bool IsConcurrent() const {
76 return false;
77 }
78 virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
79 virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
80 virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
81 virtual void MarkReachableObjects()
82 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
83 virtual GcType GetGcType() const {
84 return kGcTypePartial;
85 }
86
87 // Sets which space we will be copying objects to.
88 void SetToSpace(space::ContinuousMemMapAllocSpace* to_space);
89
90 // Set the space where we copy objects from.
91 void SetFromSpace(space::ContinuousMemMapAllocSpace* from_space);
92
93 // Initializes internal structures.
94 void Init();
95
96 // Find the default mark bitmap.
97 void FindDefaultMarkBitmap();
98
99 // Returns the new address of the object.
100 mirror::Object* MarkObject(mirror::Object* object)
101 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
102
103 void ScanObject(mirror::Object* obj)
104 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
105
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800106 void VerifyNoFromSpaceReferences(mirror::Object* obj)
107 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
108
Mathieu Chartier590fee92013-09-13 13:46:47 -0700109 // Marks the root set at the start of a garbage collection.
110 void MarkRoots()
111 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
112
Mathieu Chartier590fee92013-09-13 13:46:47 -0700113 // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
114 // the image. Mark that portion of the heap as immune.
115 virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
116
Mathieu Chartier590fee92013-09-13 13:46:47 -0700117 void UnBindBitmaps()
118 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
119
120 void ProcessReferences(Thread* self)
121 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
122
123 // Sweeps unmarked objects to complete the garbage collection.
124 virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
125
126 // Sweeps unmarked objects to complete the garbage collection.
127 void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
128
129 // Sweep only pointers within an array. WARNING: Trashes objects.
130 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
131 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
132
Mathieu Chartier590fee92013-09-13 13:46:47 -0700133 // TODO: enable thread safety analysis when in use by multiple worker threads.
134 template <typename MarkVisitor>
135 void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor)
136 NO_THREAD_SAFETY_ANALYSIS;
137
138 void SweepSystemWeaks()
139 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
140
141 template <typename Visitor>
142 static void VisitObjectReferencesAndClass(mirror::Object* obj, const Visitor& visitor)
143 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
144
Mathieu Chartier815873e2014-02-13 18:02:13 -0800145 static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t /*tid*/,
146 RootType /*root_type*/)
Mathieu Chartier590fee92013-09-13 13:46:47 -0700147 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
148
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800149 static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg)
Mathieu Chartier39e32612013-11-12 16:28:05 -0800150 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
151
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800152 static void ProcessMarkStackCallback(void* arg)
153 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
154
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800155 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
156 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
157
Mathieu Chartier590fee92013-09-13 13:46:47 -0700158 protected:
159 // Returns null if the object is not marked, otherwise returns the forwarding address (same as
160 // object for non movable things).
161 mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const;
162
Mathieu Chartier39e32612013-11-12 16:28:05 -0800163 static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg)
Mathieu Chartier590fee92013-09-13 13:46:47 -0700164 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
165
166 // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
167 // mark, otherwise we unmark.
168 bool MarkLargeObject(const mirror::Object* obj)
169 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
170
Mathieu Chartier590fee92013-09-13 13:46:47 -0700171 // Expand mark stack to 2x its current size.
172 void ResizeMarkStack(size_t new_size);
173
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800174 // Returns true if we should sweep the space.
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800175 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const;
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800176
Mathieu Chartier590fee92013-09-13 13:46:47 -0700177 static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
178 const StackVisitor *visitor);
179
180 void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor)
181 NO_THREAD_SAFETY_ANALYSIS;
182
183 template <typename Visitor>
184 static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj,
185 const Visitor& visitor)
186 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
187
188 // Visit the header, static field references, and interface pointers of a class object.
189 template <typename Visitor>
190 static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
191 const Visitor& visitor)
192 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
193
194 template <typename Visitor>
195 static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor)
196 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
197
198 template <typename Visitor>
199 static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static,
200 const Visitor& visitor)
201 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
202
203 // Visit all of the references in an object array.
204 template <typename Visitor>
205 static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
206 const Visitor& visitor)
207 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
208
209 // Visits the header and field references of a data object.
210 template <typename Visitor>
211 static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj,
212 const Visitor& visitor)
213 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
214 return VisitInstanceFieldsReferences(klass, obj, visitor);
215 }
216
217 // Push an object onto the mark stack.
218 inline void MarkStackPush(mirror::Object* obj);
219
220 void UpdateAndMarkModUnion()
221 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
222 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
223
224 // Schedules an unmarked object for reference processing.
225 void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference)
226 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
227
228 // Recursively blackens objects on the mark stack.
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800229 void ProcessMarkStack()
Mathieu Chartier590fee92013-09-13 13:46:47 -0700230 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
231
232 void EnqueueFinalizerReferences(mirror::Object** ref)
233 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
234
235 void PreserveSomeSoftReferences(mirror::Object** ref)
236 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
237
238 void ClearWhiteReferences(mirror::Object** list)
239 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
240
241 void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references,
242 mirror::Object** weak_references,
243 mirror::Object** finalizer_references,
244 mirror::Object** phantom_references)
245 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
246
247 inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const;
248
Mathieu Chartier590fee92013-09-13 13:46:47 -0700249 // Current space, we check this space first to avoid searching for the appropriate space for an
250 // object.
251 accounting::ObjectStack* mark_stack_;
252
Mathieu Chartier8d562102014-03-12 17:42:10 -0700253 // Immune region, every object inside the immune region is assumed to be marked.
254 ImmuneRegion immune_region_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700255
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800256 // If true, the large object space is immune.
257 bool is_large_object_space_immune_;
258
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800259 // Destination and source spaces (can be any type of ContinuousMemMapAllocSpace which either has
260 // a live bitmap or doesn't).
Mathieu Chartier590fee92013-09-13 13:46:47 -0700261 space::ContinuousMemMapAllocSpace* to_space_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800262 accounting::SpaceBitmap* to_space_live_bitmap_; // Cached live bitmap as an optimization.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700263 space::ContinuousMemMapAllocSpace* from_space_;
264
Mathieu Chartier590fee92013-09-13 13:46:47 -0700265 Thread* self_;
266
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800267 // When true, the generational mode (promotion and the bump pointer
268 // space only collection) is enabled. TODO: move these to a new file
269 // as a new garbage collector?
Ian Rogers6fac4472014-02-25 17:01:10 -0800270 const bool generational_;
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800271
272 // Used for the generational mode. the end/top of the bump
273 // pointer space at the end of the last collection.
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800274 byte* last_gc_to_space_end_;
275
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800276 // Used for the generational mode. During a collection, keeps track
277 // of how many bytes of objects have been copied so far from the
278 // bump pointer space to the non-moving space.
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800279 uint64_t bytes_promoted_;
280
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800281 // Used for the generational mode. When true, collect the whole
282 // heap. When false, collect only the bump pointer spaces.
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800283 bool whole_heap_collection_;
284
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800285 // Used for the generational mode. A counter used to enable
286 // whole_heap_collection_ once per interval.
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800287 int whole_heap_collection_interval_counter_;
288
Mathieu Chartierad35d902014-02-11 16:20:42 -0800289 // How many bytes we avoided dirtying.
290 size_t saved_bytes_;
291
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800292 // Used for the generational mode. The default interval of the whole
293 // heap collection. If N, the whole heap collection occurs every N
294 // collections.
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800295 static constexpr int kDefaultWholeHeapCollectionInterval = 5;
296
Mathieu Chartier590fee92013-09-13 13:46:47 -0700297 private:
298 DISALLOW_COPY_AND_ASSIGN(SemiSpace);
299};
300
301} // namespace collector
302} // namespace gc
303} // namespace art
304
305#endif // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_