Elliott Hughes | 2faa5f1 | 2012-01-30 14:42:07 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 16 | |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 17 | #include "mark_sweep.h" |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 18 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 19 | #include <functional> |
| 20 | #include <numeric> |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 21 | #include <climits> |
| 22 | #include <vector> |
| 23 | |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 24 | #include "base/logging.h" |
Elliott Hughes | 7616005 | 2012-12-12 16:31:20 -0800 | [diff] [blame] | 25 | #include "base/macros.h" |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 26 | #include "base/mutex-inl.h" |
Sameer Abu Asal | a843954 | 2013-02-14 16:06:42 -0800 | [diff] [blame] | 27 | #include "base/timing_logger.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 28 | #include "gc/accounting/card_table-inl.h" |
| 29 | #include "gc/accounting/heap_bitmap.h" |
| 30 | #include "gc/accounting/space_bitmap-inl.h" |
| 31 | #include "gc/heap.h" |
| 32 | #include "gc/space/image_space.h" |
| 33 | #include "gc/space/large_object_space.h" |
| 34 | #include "gc/space/space-inl.h" |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 35 | #include "indirect_reference_table.h" |
| 36 | #include "intern_table.h" |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 37 | #include "jni_internal.h" |
Elliott Hughes | c33a32b | 2011-10-11 18:18:07 -0700 | [diff] [blame] | 38 | #include "monitor.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 39 | #include "mark_sweep-inl.h" |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 40 | #include "mirror/art_field.h" |
| 41 | #include "mirror/art_field-inl.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 42 | #include "mirror/class-inl.h" |
| 43 | #include "mirror/class_loader.h" |
| 44 | #include "mirror/dex_cache.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 45 | #include "mirror/object-inl.h" |
| 46 | #include "mirror/object_array.h" |
| 47 | #include "mirror/object_array-inl.h" |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 48 | #include "runtime.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 49 | #include "thread-inl.h" |
Mathieu Chartier | 6f1c949 | 2012-10-15 12:08:41 -0700 | [diff] [blame] | 50 | #include "thread_list.h" |
Ian Rogers | 0825427 | 2012-10-23 17:49:23 -0700 | [diff] [blame] | 51 | #include "verifier/method_verifier.h" |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 52 | |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 53 | using ::art::mirror::ArtField; |
Brian Carlstrom | 3e3d591 | 2013-07-18 00:19:45 -0700 | [diff] [blame] | 54 | using ::art::mirror::Class; |
Brian Carlstrom | 3e3d591 | 2013-07-18 00:19:45 -0700 | [diff] [blame] | 55 | using ::art::mirror::Object; |
| 56 | using ::art::mirror::ObjectArray; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 57 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 58 | namespace art { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 59 | namespace gc { |
| 60 | namespace collector { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 61 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 62 | // Performance options. |
| 63 | static const bool kParallelMarkStack = true; |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 64 | static const bool kDisableFinger = true; // TODO: Fix, bit rotten. |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 65 | static const bool kUseMarkStackPrefetch = true; |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 66 | static const size_t kSweepArrayChunkFreeSize = 1024; |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 67 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 68 | // Profiling and information flags. |
| 69 | static const bool kCountClassesMarked = false; |
| 70 | static const bool kProfileLargeObjects = false; |
| 71 | static const bool kMeasureOverhead = false; |
| 72 | static const bool kCountTasks = false; |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 73 | static const bool kCountJavaLangRefs = false; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 74 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 75 | void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 76 | // Bind live to mark bitmap if necessary. |
| 77 | if (space->GetLiveBitmap() != space->GetMarkBitmap()) { |
| 78 | BindLiveToMarkBitmap(space); |
| 79 | } |
| 80 | |
| 81 | // Add the space to the immune region. |
| 82 | if (immune_begin_ == NULL) { |
| 83 | DCHECK(immune_end_ == NULL); |
| 84 | SetImmuneRange(reinterpret_cast<Object*>(space->Begin()), |
| 85 | reinterpret_cast<Object*>(space->End())); |
| 86 | } else { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 87 | const space::ContinuousSpace* prev_space = nullptr; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 88 | // Find out if the previous space is immune. |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 89 | for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { |
| 90 | if (cur_space == space) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 91 | break; |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 92 | } |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 93 | prev_space = cur_space; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 94 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 95 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 96 | // If previous space was immune, then extend the immune region. Relies on continuous spaces |
| 97 | // being sorted by Heap::AddContinuousSpace. |
| 98 | if (prev_space != NULL && |
| 99 | immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) && |
| 100 | immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 101 | immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); |
| 102 | immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); |
| 103 | } |
| 104 | } |
| 105 | } |
| 106 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 107 | void MarkSweep::BindBitmaps() { |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 108 | timings_.StartSplit("BindBitmaps"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 109 | const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 110 | WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
| 111 | |
| 112 | // Mark all of the spaces we never collect as immune. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 113 | typedef std::vector<space::ContinuousSpace*>::const_iterator It; |
| 114 | for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { |
| 115 | space::ContinuousSpace* space = *it; |
| 116 | if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 117 | ImmuneSpace(space); |
| 118 | } |
| 119 | } |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 120 | timings_.EndSplit(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 121 | } |
| 122 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 123 | MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) |
| 124 | : GarbageCollector(heap, |
| 125 | name_prefix + (name_prefix.empty() ? "" : " ") + |
| 126 | (is_concurrent ? "concurrent mark sweep": "mark sweep")), |
| 127 | current_mark_bitmap_(NULL), |
| 128 | java_lang_Class_(NULL), |
| 129 | mark_stack_(NULL), |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 130 | immune_begin_(NULL), |
| 131 | immune_end_(NULL), |
| 132 | soft_reference_list_(NULL), |
| 133 | weak_reference_list_(NULL), |
| 134 | finalizer_reference_list_(NULL), |
| 135 | phantom_reference_list_(NULL), |
| 136 | cleared_reference_list_(NULL), |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 137 | gc_barrier_(new Barrier(0)), |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 138 | large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), |
| 139 | mark_stack_expand_lock_("mark sweep mark stack expand lock"), |
Ian Rogers | 1bd4b4c | 2013-04-18 17:47:42 -0700 | [diff] [blame] | 140 | is_concurrent_(is_concurrent), |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 141 | clear_soft_references_(false) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | void MarkSweep::InitializePhase() { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 145 | timings_.Reset(); |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 146 | base::TimingLogger::ScopedSplit split("InitializePhase", &timings_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 147 | mark_stack_ = GetHeap()->mark_stack_.get(); |
| 148 | DCHECK(mark_stack_ != NULL); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 149 | SetImmuneRange(NULL, NULL); |
| 150 | soft_reference_list_ = NULL; |
| 151 | weak_reference_list_ = NULL; |
| 152 | finalizer_reference_list_ = NULL; |
| 153 | phantom_reference_list_ = NULL; |
| 154 | cleared_reference_list_ = NULL; |
| 155 | freed_bytes_ = 0; |
| 156 | freed_objects_ = 0; |
| 157 | class_count_ = 0; |
| 158 | array_count_ = 0; |
| 159 | other_count_ = 0; |
| 160 | large_object_test_ = 0; |
| 161 | large_object_mark_ = 0; |
| 162 | classes_marked_ = 0; |
| 163 | overhead_time_ = 0; |
| 164 | work_chunks_created_ = 0; |
| 165 | work_chunks_deleted_ = 0; |
| 166 | reference_count_ = 0; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 167 | java_lang_Class_ = Class::GetJavaLangClass(); |
| 168 | CHECK(java_lang_Class_ != NULL); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 169 | |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 170 | FindDefaultMarkBitmap(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 171 | |
| 172 | // Do any pre GC verification. |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 173 | timings_.NewSplit("PreGcVerification"); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 174 | heap_->PreGcVerification(this); |
| 175 | } |
| 176 | |
| 177 | void MarkSweep::ProcessReferences(Thread* self) { |
Mathieu Chartier | 8e56c7e | 2012-11-20 13:25:50 -0800 | [diff] [blame] | 178 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 179 | ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_, |
| 180 | &finalizer_reference_list_, &phantom_reference_list_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | bool MarkSweep::HandleDirtyObjectsPhase() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 184 | base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 185 | Thread* self = Thread::Current(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 186 | accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 187 | Locks::mutator_lock_->AssertExclusiveHeld(self); |
| 188 | |
| 189 | { |
| 190 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 191 | |
| 192 | // Re-mark root set. |
| 193 | ReMarkRoots(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 194 | |
| 195 | // Scan dirty objects, this is only required if we are not doing concurrent GC. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 196 | RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | ProcessReferences(self); |
| 200 | |
| 201 | // Only need to do this if we have the card mark verification on, and only during concurrent GC. |
| 202 | if (GetHeap()->verify_missing_card_marks_) { |
| 203 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 204 | // This second sweep makes sure that we don't have any objects in the live stack which point to |
| 205 | // freed objects. These cause problems since their references may be previously freed objects. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 206 | SweepArray(allocation_stack, false); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 207 | } |
| 208 | return true; |
| 209 | } |
| 210 | |
| 211 | bool MarkSweep::IsConcurrent() const { |
| 212 | return is_concurrent_; |
| 213 | } |
| 214 | |
| 215 | void MarkSweep::MarkingPhase() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 216 | base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 217 | Heap* heap = GetHeap(); |
| 218 | Thread* self = Thread::Current(); |
| 219 | |
| 220 | BindBitmaps(); |
| 221 | FindDefaultMarkBitmap(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 222 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 223 | // Process dirty cards and add dirty cards to mod union tables. |
| 224 | heap->ProcessCards(timings_); |
| 225 | |
| 226 | // Need to do this before the checkpoint since we don't want any threads to add references to |
| 227 | // the live stack during the recursive mark. |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 228 | timings_.NewSplit("SwapStacks"); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 229 | heap->SwapStacks(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 230 | |
| 231 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 232 | if (Locks::mutator_lock_->IsExclusiveHeld(self)) { |
| 233 | // If we exclusively hold the mutator lock, all threads must be suspended. |
| 234 | MarkRoots(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 235 | } else { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 236 | MarkRootsCheckpoint(self); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 237 | MarkNonThreadRoots(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 238 | } |
| 239 | MarkConcurrentRoots(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 240 | |
| 241 | heap->UpdateAndMarkModUnion(this, timings_, GetGcType()); |
| 242 | MarkReachableObjects(); |
| 243 | } |
| 244 | |
| 245 | void MarkSweep::MarkReachableObjects() { |
| 246 | // Mark everything allocated since the last as GC live so that we can sweep concurrently, |
| 247 | // knowing that new allocations won't be marked as live. |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 248 | timings_.StartSplit("MarkStackAsLive"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 249 | accounting::ObjectStack* live_stack = heap_->GetLiveStack(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 250 | heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(), |
| 251 | heap_->large_object_space_->GetLiveObjects(), |
| 252 | live_stack); |
| 253 | live_stack->Reset(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 254 | timings_.EndSplit(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 255 | // Recursively mark all the non-image bits set in the mark bitmap. |
| 256 | RecursiveMark(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 257 | } |
| 258 | |
| 259 | void MarkSweep::ReclaimPhase() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 260 | base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 261 | Thread* self = Thread::Current(); |
| 262 | |
| 263 | if (!IsConcurrent()) { |
Anwar Ghuloum | a9a5092 | 2013-08-09 21:34:20 -0700 | [diff] [blame] | 264 | base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 265 | ProcessReferences(self); |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 266 | } else { |
Anwar Ghuloum | a9a5092 | 2013-08-09 21:34:20 -0700 | [diff] [blame] | 267 | base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_); |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 268 | accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); |
| 269 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 270 | // The allocation stack contains things allocated since the start of the GC. These may have been |
| 271 | // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC. |
| 272 | // Remove these objects from the mark bitmaps so that they will be eligible for sticky |
| 273 | // collection. |
| 274 | // There is a race here which is safely handled. Another thread such as the hprof could |
| 275 | // have flushed the alloc stack after we resumed the threads. This is safe however, since |
| 276 | // reseting the allocation stack zeros it out with madvise. This means that we will either |
| 277 | // read NULLs or attempt to unmark a newly allocated object which will not be marked in the |
| 278 | // first place. |
| 279 | mirror::Object** end = allocation_stack->End(); |
| 280 | for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) { |
| 281 | Object* obj = *it; |
| 282 | if (obj != NULL) { |
| 283 | UnMarkObjectNonNull(obj); |
| 284 | } |
| 285 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 286 | } |
| 287 | |
| 288 | // Before freeing anything, lets verify the heap. |
| 289 | if (kIsDebugBuild) { |
| 290 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 291 | VerifyImageRoots(); |
| 292 | } |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 293 | timings_.StartSplit("PreSweepingGcVerification"); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 294 | heap_->PreSweepingGcVerification(this); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 295 | timings_.EndSplit(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 296 | |
| 297 | { |
| 298 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 299 | |
| 300 | // Reclaim unmarked objects. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 301 | Sweep(false); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 302 | |
| 303 | // Swap the live and mark bitmaps for each space which we modified space. This is an |
| 304 | // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound |
| 305 | // bitmaps. |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 306 | timings_.StartSplit("SwapBitmaps"); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 307 | SwapBitmaps(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 308 | timings_.EndSplit(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 309 | |
| 310 | // Unbind the live and mark bitmaps. |
| 311 | UnBindBitmaps(); |
| 312 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 313 | } |
| 314 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 315 | void MarkSweep::SetImmuneRange(Object* begin, Object* end) { |
| 316 | immune_begin_ = begin; |
| 317 | immune_end_ = end; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 318 | } |
| 319 | |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 320 | void MarkSweep::FindDefaultMarkBitmap() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 321 | base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 322 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 323 | if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 324 | current_mark_bitmap_ = space->GetMarkBitmap(); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 325 | CHECK(current_mark_bitmap_ != NULL); |
| 326 | return; |
| 327 | } |
| 328 | } |
| 329 | GetHeap()->DumpSpaces(); |
| 330 | LOG(FATAL) << "Could not find a default mark bitmap"; |
| 331 | } |
| 332 | |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 333 | void MarkSweep::ExpandMarkStack() { |
| 334 | // Rare case, no need to have Thread::Current be a parameter. |
| 335 | MutexLock mu(Thread::Current(), mark_stack_expand_lock_); |
| 336 | if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { |
| 337 | // Someone else acquired the lock and expanded the mark stack before us. |
| 338 | return; |
| 339 | } |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 340 | std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 341 | mark_stack_->Resize(mark_stack_->Capacity() * 2); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 342 | for (const auto& obj : temp) { |
| 343 | mark_stack_->PushBack(obj); |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 344 | } |
| 345 | } |
| 346 | |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 347 | inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) { |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 348 | DCHECK(obj != NULL); |
| 349 | if (MarkObjectParallel(obj)) { |
Mathieu Chartier | 184e322 | 2013-08-03 14:02:57 -0700 | [diff] [blame] | 350 | while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) { |
| 351 | // Only reason a push can fail is that the mark stack is full. |
| 352 | ExpandMarkStack(); |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 353 | } |
| 354 | } |
| 355 | } |
| 356 | |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 357 | inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { |
| 358 | DCHECK(!IsImmune(obj)); |
| 359 | // Try to take advantage of locality of references within a space, failing this find the space |
| 360 | // the hard way. |
| 361 | accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; |
| 362 | if (UNLIKELY(!object_bitmap->HasAddress(obj))) { |
| 363 | accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); |
| 364 | if (LIKELY(new_bitmap != NULL)) { |
| 365 | object_bitmap = new_bitmap; |
| 366 | } else { |
| 367 | MarkLargeObject(obj, false); |
| 368 | return; |
| 369 | } |
| 370 | } |
| 371 | |
| 372 | DCHECK(object_bitmap->HasAddress(obj)); |
| 373 | object_bitmap->Clear(obj); |
| 374 | } |
| 375 | |
| 376 | inline void MarkSweep::MarkObjectNonNull(const Object* obj) { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 377 | DCHECK(obj != NULL); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 378 | |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 379 | if (IsImmune(obj)) { |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 380 | DCHECK(IsMarked(obj)); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 381 | return; |
| 382 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 383 | |
| 384 | // Try to take advantage of locality of references within a space, failing this find the space |
| 385 | // the hard way. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 386 | accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 387 | if (UNLIKELY(!object_bitmap->HasAddress(obj))) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 388 | accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); |
| 389 | if (LIKELY(new_bitmap != NULL)) { |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 390 | object_bitmap = new_bitmap; |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 391 | } else { |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 392 | MarkLargeObject(obj, true); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 393 | return; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 394 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 395 | } |
| 396 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 397 | // This object was not previously marked. |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 398 | if (!object_bitmap->Test(obj)) { |
| 399 | object_bitmap->Set(obj); |
Mathieu Chartier | 184e322 | 2013-08-03 14:02:57 -0700 | [diff] [blame] | 400 | // Do we need to expand the mark stack? |
| 401 | if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { |
| 402 | ExpandMarkStack(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 403 | } |
Mathieu Chartier | 184e322 | 2013-08-03 14:02:57 -0700 | [diff] [blame] | 404 | // The object must be pushed on to the mark stack. |
| 405 | mark_stack_->PushBack(const_cast<Object*>(obj)); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 406 | } |
| 407 | } |
| 408 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 409 | // Rare case, probably not worth inlining since it will increase instruction cache miss rate. |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 410 | bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 411 | // TODO: support >1 discontinuous space. |
| 412 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 413 | accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 414 | if (kProfileLargeObjects) { |
| 415 | ++large_object_test_; |
| 416 | } |
| 417 | if (UNLIKELY(!large_objects->Test(obj))) { |
Mathieu Chartier | 4fcb8d3 | 2013-07-15 12:43:36 -0700 | [diff] [blame] | 418 | if (!large_object_space->Contains(obj)) { |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 419 | LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; |
| 420 | LOG(ERROR) << "Attempting see if it's a bad root"; |
| 421 | VerifyRoots(); |
| 422 | LOG(FATAL) << "Can't mark bad root"; |
| 423 | } |
| 424 | if (kProfileLargeObjects) { |
| 425 | ++large_object_mark_; |
| 426 | } |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 427 | if (set) { |
| 428 | large_objects->Set(obj); |
| 429 | } else { |
| 430 | large_objects->Clear(obj); |
| 431 | } |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 432 | return true; |
| 433 | } |
| 434 | return false; |
| 435 | } |
| 436 | |
| 437 | inline bool MarkSweep::MarkObjectParallel(const Object* obj) { |
| 438 | DCHECK(obj != NULL); |
| 439 | |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 440 | if (IsImmune(obj)) { |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 441 | DCHECK(IsMarked(obj)); |
| 442 | return false; |
| 443 | } |
| 444 | |
| 445 | // Try to take advantage of locality of references within a space, failing this find the space |
| 446 | // the hard way. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 447 | accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 448 | if (UNLIKELY(!object_bitmap->HasAddress(obj))) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 449 | accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 450 | if (new_bitmap != NULL) { |
| 451 | object_bitmap = new_bitmap; |
| 452 | } else { |
| 453 | // TODO: Remove the Thread::Current here? |
| 454 | // TODO: Convert this to some kind of atomic marking? |
| 455 | MutexLock mu(Thread::Current(), large_object_lock_); |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 456 | return MarkLargeObject(obj, true); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 457 | } |
| 458 | } |
| 459 | |
| 460 | // Return true if the object was not previously marked. |
| 461 | return !object_bitmap->AtomicTestAndSet(obj); |
| 462 | } |
| 463 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 464 | // Used to mark objects when recursing. Recursion is done by moving |
| 465 | // the finger across the bitmaps in address order and marking child |
| 466 | // objects. Any newly-marked objects whose addresses are lower than |
| 467 | // the finger won't be visited by the bitmap scan, so those objects |
| 468 | // need to be added to the mark stack. |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 469 | void MarkSweep::MarkObject(const Object* obj) { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 470 | if (obj != NULL) { |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 471 | MarkObjectNonNull(obj); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 472 | } |
| 473 | } |
| 474 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 475 | void MarkSweep::MarkRoot(const Object* obj) { |
| 476 | if (obj != NULL) { |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 477 | MarkObjectNonNull(obj); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 478 | } |
| 479 | } |
| 480 | |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 481 | void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) { |
| 482 | DCHECK(root != NULL); |
| 483 | DCHECK(arg != NULL); |
| 484 | MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 485 | mark_sweep->MarkObjectNonNullParallel(root); |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 486 | } |
| 487 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 488 | void MarkSweep::MarkObjectCallback(const Object* root, void* arg) { |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 489 | DCHECK(root != NULL); |
| 490 | DCHECK(arg != NULL); |
| 491 | MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 492 | mark_sweep->MarkObjectNonNull(root); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 493 | } |
| 494 | |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 495 | void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) { |
| 496 | DCHECK(root != NULL); |
| 497 | DCHECK(arg != NULL); |
| 498 | MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 499 | mark_sweep->MarkObjectNonNull(root); |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 500 | } |
| 501 | |
Mathieu Chartier | 6f1c949 | 2012-10-15 12:08:41 -0700 | [diff] [blame] | 502 | void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, |
Ian Rogers | 40e3bac | 2012-11-20 00:09:14 -0800 | [diff] [blame] | 503 | const StackVisitor* visitor) { |
| 504 | reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); |
Mathieu Chartier | 6f1c949 | 2012-10-15 12:08:41 -0700 | [diff] [blame] | 505 | } |
| 506 | |
Ian Rogers | 40e3bac | 2012-11-20 00:09:14 -0800 | [diff] [blame] | 507 | void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { |
Mathieu Chartier | 6f1c949 | 2012-10-15 12:08:41 -0700 | [diff] [blame] | 508 | // See if the root is on any space bitmap. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 509 | if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { |
| 510 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
Mathieu Chartier | 4202b74 | 2012-10-17 17:51:25 -0700 | [diff] [blame] | 511 | if (!large_object_space->Contains(root)) { |
Mathieu Chartier | 6f1c949 | 2012-10-15 12:08:41 -0700 | [diff] [blame] | 512 | LOG(ERROR) << "Found invalid root: " << root; |
Ian Rogers | 40e3bac | 2012-11-20 00:09:14 -0800 | [diff] [blame] | 513 | if (visitor != NULL) { |
| 514 | LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; |
Mathieu Chartier | 6f1c949 | 2012-10-15 12:08:41 -0700 | [diff] [blame] | 515 | } |
| 516 | } |
| 517 | } |
| 518 | } |
| 519 | |
| 520 | void MarkSweep::VerifyRoots() { |
| 521 | Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); |
| 522 | } |
| 523 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 524 | // Marks all objects in the root set. |
| 525 | void MarkSweep::MarkRoots() { |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 526 | timings_.StartSplit("MarkRoots"); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 527 | Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 528 | timings_.EndSplit(); |
Mathieu Chartier | 9ebae1f | 2012-10-15 17:38:16 -0700 | [diff] [blame] | 529 | } |
| 530 | |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 531 | void MarkSweep::MarkNonThreadRoots() { |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 532 | timings_.StartSplit("MarkNonThreadRoots"); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 533 | Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 534 | timings_.EndSplit(); |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 535 | } |
| 536 | |
Mathieu Chartier | 9ebae1f | 2012-10-15 17:38:16 -0700 | [diff] [blame] | 537 | void MarkSweep::MarkConcurrentRoots() { |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 538 | timings_.StartSplit("MarkConcurrentRoots"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 539 | // Visit all runtime roots and clear dirty flags. |
| 540 | Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 541 | timings_.EndSplit(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 542 | } |
| 543 | |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 544 | class CheckObjectVisitor { |
| 545 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 546 | explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 547 | |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 548 | void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 549 | NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 550 | if (kDebugLocking) { |
| 551 | Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); |
| 552 | } |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 553 | mark_sweep_->CheckReference(obj, ref, offset, is_static); |
| 554 | } |
| 555 | |
| 556 | private: |
| 557 | MarkSweep* const mark_sweep_; |
| 558 | }; |
| 559 | |
| 560 | void MarkSweep::CheckObject(const Object* obj) { |
| 561 | DCHECK(obj != NULL); |
| 562 | CheckObjectVisitor visitor(this); |
| 563 | VisitObjectReferences(obj, visitor); |
| 564 | } |
| 565 | |
| 566 | void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) { |
| 567 | DCHECK(root != NULL); |
| 568 | DCHECK(arg != NULL); |
| 569 | MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 570 | DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root)); |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 571 | mark_sweep->CheckObject(root); |
| 572 | } |
| 573 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 574 | void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) { |
| 575 | CHECK(space->IsDlMallocSpace()); |
| 576 | space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); |
| 577 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 578 | accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release(); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 579 | GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); |
| 580 | alloc_space->temp_bitmap_.reset(mark_bitmap); |
| 581 | alloc_space->mark_bitmap_.reset(live_bitmap); |
| 582 | } |
| 583 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 584 | class ScanObjectVisitor { |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 585 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 586 | explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 587 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 588 | // TODO: Fixme when anotatalysis works with visitors. |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 589 | void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 590 | if (kDebugLocking) { |
| 591 | Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); |
| 592 | Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); |
| 593 | } |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 594 | mark_sweep_->ScanObject(obj); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 595 | } |
| 596 | |
| 597 | private: |
| 598 | MarkSweep* const mark_sweep_; |
| 599 | }; |
| 600 | |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 601 | void MarkSweep::ScanGrayObjects(byte minimum_age) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 602 | accounting::CardTable* card_table = GetHeap()->GetCardTable(); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 603 | ScanObjectVisitor visitor(this); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 604 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 605 | switch (space->GetGcRetentionPolicy()) { |
| 606 | case space::kGcRetentionPolicyNeverCollect: |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 607 | timings_.StartSplit("ScanGrayImageSpaceObjects"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 608 | break; |
| 609 | case space::kGcRetentionPolicyFullCollect: |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 610 | timings_.StartSplit("ScanGrayZygoteSpaceObjects"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 611 | break; |
| 612 | case space::kGcRetentionPolicyAlwaysCollect: |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 613 | timings_.StartSplit("ScanGrayAllocSpaceObjects"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 614 | break; |
| 615 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 616 | byte* begin = space->Begin(); |
| 617 | byte* end = space->End(); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 618 | // Image spaces are handled properly since live == marked for them. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 619 | accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); |
Mathieu Chartier | 184e322 | 2013-08-03 14:02:57 -0700 | [diff] [blame] | 620 | card_table->Scan(mark_bitmap, begin, end, visitor, minimum_age); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 621 | timings_.EndSplit(); |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 622 | } |
| 623 | } |
| 624 | |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 625 | class CheckBitmapVisitor { |
| 626 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 627 | explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 628 | |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 629 | void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 630 | if (kDebugLocking) { |
| 631 | Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); |
| 632 | } |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 633 | DCHECK(obj != NULL); |
| 634 | mark_sweep_->CheckObject(obj); |
| 635 | } |
| 636 | |
| 637 | private: |
| 638 | MarkSweep* mark_sweep_; |
| 639 | }; |
| 640 | |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 641 | void MarkSweep::VerifyImageRoots() { |
| 642 | // Verify roots ensures that all the references inside the image space point |
| 643 | // objects which are either in the image space or marked objects in the alloc |
| 644 | // space |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 645 | timings_.StartSplit("VerifyImageRoots"); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 646 | CheckBitmapVisitor visitor(this); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 647 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
| 648 | if (space->IsImageSpace()) { |
| 649 | space::ImageSpace* image_space = space->AsImageSpace(); |
| 650 | uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin()); |
| 651 | uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End()); |
| 652 | accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap(); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 653 | DCHECK(live_bitmap != NULL); |
Mathieu Chartier | 184e322 | 2013-08-03 14:02:57 -0700 | [diff] [blame] | 654 | live_bitmap->VisitMarkedRange(begin, end, visitor); |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 655 | } |
| 656 | } |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 657 | timings_.EndSplit(); |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 658 | } |
| 659 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 660 | // Populates the mark stack based on the set of marked objects and |
| 661 | // recursively marks until the mark stack is emptied. |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 662 | void MarkSweep::RecursiveMark() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 663 | base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 664 | // RecursiveMark will build the lists of known instances of the Reference classes. |
| 665 | // See DelayReferenceReferent for details. |
| 666 | CHECK(soft_reference_list_ == NULL); |
| 667 | CHECK(weak_reference_list_ == NULL); |
| 668 | CHECK(finalizer_reference_list_ == NULL); |
| 669 | CHECK(phantom_reference_list_ == NULL); |
| 670 | CHECK(cleared_reference_list_ == NULL); |
| 671 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 672 | const bool partial = GetGcType() == kGcTypePartial; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 673 | ScanObjectVisitor scan_visitor(this); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 674 | if (!kDisableFinger) { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 675 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 676 | if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || |
| 677 | (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 678 | current_mark_bitmap_ = space->GetMarkBitmap(); |
| 679 | if (current_mark_bitmap_ == NULL) { |
| 680 | GetHeap()->DumpSpaces(); |
| 681 | LOG(FATAL) << "invalid bitmap"; |
| 682 | } |
| 683 | // This function does not handle heap end increasing, so we must use the space end. |
| 684 | uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); |
| 685 | uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); |
Mathieu Chartier | 184e322 | 2013-08-03 14:02:57 -0700 | [diff] [blame] | 686 | current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 687 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 688 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 689 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 690 | ProcessMarkStack(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 691 | } |
| 692 | |
| 693 | bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) { |
| 694 | return |
| 695 | reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) || |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 696 | !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object); |
| 697 | } |
| 698 | |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 699 | void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) { |
| 700 | ScanGrayObjects(minimum_age); |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 701 | ProcessMarkStack(); |
| 702 | } |
| 703 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 704 | void MarkSweep::ReMarkRoots() { |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 705 | timings_.StartSplit("ReMarkRoots"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 706 | Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 707 | timings_.EndSplit(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 708 | } |
| 709 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 710 | void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) { |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 711 | JavaVMExt* vm = Runtime::Current()->GetJavaVM(); |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 712 | MutexLock mu(Thread::Current(), vm->weak_globals_lock); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 713 | for (const Object** entry : vm->weak_globals) { |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 714 | if (!is_marked(*entry, arg)) { |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 715 | *entry = kClearedJniWeakGlobal; |
| 716 | } |
| 717 | } |
| 718 | } |
| 719 | |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 720 | struct ArrayMarkedCheck { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 721 | accounting::ObjectStack* live_stack; |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 722 | MarkSweep* mark_sweep; |
| 723 | }; |
| 724 | |
| 725 | // Either marked or not live. |
| 726 | bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) { |
| 727 | ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg); |
| 728 | if (array_check->mark_sweep->IsMarked(object)) { |
| 729 | return true; |
| 730 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 731 | accounting::ObjectStack* live_stack = array_check->live_stack; |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 732 | return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End(); |
| 733 | } |
| 734 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 735 | void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) { |
Mathieu Chartier | 46a2363 | 2012-08-07 18:44:40 -0700 | [diff] [blame] | 736 | Runtime* runtime = Runtime::Current(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 737 | // The callbacks check |
| 738 | // !is_marked where is_marked is the callback but we want |
| 739 | // !IsMarked && IsLive |
| 740 | // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). |
| 741 | // Or for swapped (IsLive || !IsMarked). |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 742 | |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 743 | timings_.StartSplit("SweepSystemWeaksArray"); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 744 | ArrayMarkedCheck visitor; |
| 745 | visitor.live_stack = allocations; |
| 746 | visitor.mark_sweep = this; |
| 747 | runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor); |
| 748 | runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor); |
| 749 | SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 750 | timings_.EndSplit(); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 751 | } |
| 752 | |
| 753 | void MarkSweep::SweepSystemWeaks() { |
| 754 | Runtime* runtime = Runtime::Current(); |
| 755 | // The callbacks check |
| 756 | // !is_marked where is_marked is the callback but we want |
| 757 | // !IsMarked && IsLive |
| 758 | // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). |
| 759 | // Or for swapped (IsLive || !IsMarked). |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 760 | timings_.StartSplit("SweepSystemWeaks"); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 761 | runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this); |
| 762 | runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this); |
| 763 | SweepJniWeakGlobals(IsMarkedCallback, this); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 764 | timings_.EndSplit(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 765 | } |
| 766 | |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 767 | bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) { |
| 768 | reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); |
| 769 | // We don't actually want to sweep the object, so lets return "marked" |
| 770 | return true; |
| 771 | } |
| 772 | |
| 773 | void MarkSweep::VerifyIsLive(const Object* obj) { |
| 774 | Heap* heap = GetHeap(); |
| 775 | if (!heap->GetLiveBitmap()->Test(obj)) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 776 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 777 | if (!large_object_space->GetLiveObjects()->Test(obj)) { |
| 778 | if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == |
| 779 | heap->allocation_stack_->End()) { |
| 780 | // Object not found! |
| 781 | heap->DumpSpaces(); |
| 782 | LOG(FATAL) << "Found dead object " << obj; |
| 783 | } |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 784 | } |
| 785 | } |
| 786 | } |
| 787 | |
| 788 | void MarkSweep::VerifySystemWeaks() { |
| 789 | Runtime* runtime = Runtime::Current(); |
| 790 | // Verify system weaks, uses a special IsMarked callback which always returns true. |
| 791 | runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this); |
| 792 | runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this); |
| 793 | |
| 794 | JavaVMExt* vm = runtime->GetJavaVM(); |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 795 | MutexLock mu(Thread::Current(), vm->weak_globals_lock); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 796 | for (const Object** entry : vm->weak_globals) { |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 797 | VerifyIsLive(*entry); |
| 798 | } |
| 799 | } |
| 800 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 801 | struct SweepCallbackContext { |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 802 | MarkSweep* mark_sweep; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 803 | space::AllocSpace* space; |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 804 | Thread* self; |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 805 | }; |
| 806 | |
Mathieu Chartier | 0e4627e | 2012-10-23 16:13:36 -0700 | [diff] [blame] | 807 | class CheckpointMarkThreadRoots : public Closure { |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 808 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 809 | explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 810 | |
| 811 | virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { |
| 812 | // Note: self is not necessarily equal to thread since thread may be suspended. |
| 813 | Thread* self = Thread::Current(); |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 814 | CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) |
| 815 | << thread->GetState() << " thread " << thread << " self " << self; |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 816 | thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 817 | mark_sweep_->GetBarrier().Pass(self); |
| 818 | } |
| 819 | |
| 820 | private: |
| 821 | MarkSweep* mark_sweep_; |
| 822 | }; |
| 823 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 824 | void MarkSweep::MarkRootsCheckpoint(Thread* self) { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 825 | CheckpointMarkThreadRoots check_point(this); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 826 | timings_.StartSplit("MarkRootsCheckpoint"); |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 827 | ThreadList* thread_list = Runtime::Current()->GetThreadList(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 828 | // Request the check point is run on all threads returning a count of the threads that must |
| 829 | // run through the barrier including self. |
| 830 | size_t barrier_count = thread_list->RunCheckpoint(&check_point); |
| 831 | // Release locks then wait for all mutator threads to pass the barrier. |
| 832 | // TODO: optimize to not release locks when there are no threads to wait for. |
| 833 | Locks::heap_bitmap_lock_->ExclusiveUnlock(self); |
| 834 | Locks::mutator_lock_->SharedUnlock(self); |
| 835 | ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); |
| 836 | CHECK_EQ(old_state, kWaitingPerformingGc); |
| 837 | gc_barrier_->Increment(self, barrier_count); |
| 838 | self->SetState(kWaitingPerformingGc); |
| 839 | Locks::mutator_lock_->SharedLock(self); |
| 840 | Locks::heap_bitmap_lock_->ExclusiveLock(self); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 841 | timings_.EndSplit(); |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 842 | } |
| 843 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 844 | void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 845 | SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 846 | MarkSweep* mark_sweep = context->mark_sweep; |
| 847 | Heap* heap = mark_sweep->GetHeap(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 848 | space::AllocSpace* space = context->space; |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 849 | Thread* self = context->self; |
| 850 | Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 851 | // Use a bulk free, that merges consecutive objects before freeing or free per object? |
| 852 | // Documentation suggests better free performance with merging, but this may be at the expensive |
| 853 | // of allocation. |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 854 | size_t freed_objects = num_ptrs; |
| 855 | // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit |
| 856 | size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 857 | heap->RecordFree(freed_objects, freed_bytes); |
Mathieu Chartier | 4b95e8f | 2013-07-15 16:32:50 -0700 | [diff] [blame] | 858 | mark_sweep->freed_objects_.fetch_add(freed_objects); |
| 859 | mark_sweep->freed_bytes_.fetch_add(freed_bytes); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 860 | } |
| 861 | |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 862 | void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 863 | SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 864 | Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 865 | Heap* heap = context->mark_sweep->GetHeap(); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 866 | // We don't free any actual memory to avoid dirtying the shared zygote pages. |
| 867 | for (size_t i = 0; i < num_ptrs; ++i) { |
| 868 | Object* obj = static_cast<Object*>(ptrs[i]); |
| 869 | heap->GetLiveBitmap()->Clear(obj); |
| 870 | heap->GetCardTable()->MarkCard(obj); |
| 871 | } |
| 872 | } |
| 873 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 874 | void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 875 | size_t freed_bytes = 0; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 876 | space::DlMallocSpace* space = heap_->GetAllocSpace(); |
Elliott Hughes | 2da5036 | 2011-10-10 16:57:08 -0700 | [diff] [blame] | 877 | |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 878 | // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark |
| 879 | // bitmap, resulting in occasional frees of Weaks which are still in use. |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 880 | SweepSystemWeaksArray(allocations); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 881 | |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 882 | timings_.StartSplit("Process allocation stack"); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 883 | // Newly allocated objects MUST be in the alloc space and those are the only objects which we are |
| 884 | // going to free. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 885 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 886 | accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); |
| 887 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 888 | accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); |
| 889 | accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 890 | if (swap_bitmaps) { |
| 891 | std::swap(live_bitmap, mark_bitmap); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 892 | std::swap(large_live_objects, large_mark_objects); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 893 | } |
| 894 | |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 895 | size_t freed_objects = 0; |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 896 | size_t freed_large_objects = 0; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 897 | size_t count = allocations->Size(); |
Mathieu Chartier | d8195f1 | 2012-10-05 12:21:28 -0700 | [diff] [blame] | 898 | Object** objects = const_cast<Object**>(allocations->Begin()); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 899 | Object** out = objects; |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 900 | Object** objects_to_chunk_free = out; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 901 | |
| 902 | // Empty the allocation stack. |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 903 | Thread* self = Thread::Current(); |
Brian Carlstrom | 02c8cc6 | 2013-07-18 15:54:44 -0700 | [diff] [blame] | 904 | for (size_t i = 0; i < count; ++i) { |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 905 | Object* obj = objects[i]; |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 906 | // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack. |
| 907 | if (LIKELY(mark_bitmap->HasAddress(obj))) { |
| 908 | if (!mark_bitmap->Test(obj)) { |
| 909 | // Don't bother un-marking since we clear the mark bitmap anyways. |
| 910 | *(out++) = obj; |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 911 | // Free objects in chunks. |
| 912 | DCHECK_GE(out, objects_to_chunk_free); |
| 913 | DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); |
| 914 | if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) { |
| 915 | timings_.StartSplit("FreeList"); |
| 916 | size_t chunk_freed_objects = out - objects_to_chunk_free; |
| 917 | freed_objects += chunk_freed_objects; |
| 918 | freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); |
| 919 | objects_to_chunk_free = out; |
| 920 | timings_.EndSplit(); |
| 921 | } |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 922 | } |
| 923 | } else if (!large_mark_objects->Test(obj)) { |
| 924 | ++freed_large_objects; |
Mathieu Chartier | 1c23e1e | 2012-10-12 14:14:11 -0700 | [diff] [blame] | 925 | freed_bytes += large_object_space->Free(self, obj); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 926 | } |
| 927 | } |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 928 | // Free the remaining objects in chunks. |
| 929 | DCHECK_GE(out, objects_to_chunk_free); |
| 930 | DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); |
| 931 | if (out - objects_to_chunk_free > 0) { |
| 932 | timings_.StartSplit("FreeList"); |
| 933 | size_t chunk_freed_objects = out - objects_to_chunk_free; |
| 934 | freed_objects += chunk_freed_objects; |
| 935 | freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); |
| 936 | timings_.EndSplit(); |
| 937 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 938 | CHECK_EQ(count, allocations->Size()); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 939 | timings_.EndSplit(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 940 | |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 941 | timings_.StartSplit("RecordFree"); |
Mathieu Chartier | 40e978b | 2012-09-07 11:38:36 -0700 | [diff] [blame] | 942 | VLOG(heap) << "Freed " << freed_objects << "/" << count |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 943 | << " objects with size " << PrettySize(freed_bytes); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 944 | heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes); |
Mathieu Chartier | 4b95e8f | 2013-07-15 16:32:50 -0700 | [diff] [blame] | 945 | freed_objects_.fetch_add(freed_objects); |
| 946 | freed_bytes_.fetch_add(freed_bytes); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 947 | timings_.EndSplit(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 948 | |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 949 | timings_.StartSplit("ResetStack"); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 950 | allocations->Reset(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 951 | timings_.EndSplit(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 952 | } |
| 953 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 954 | void MarkSweep::Sweep(bool swap_bitmaps) { |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 955 | DCHECK(mark_stack_->IsEmpty()); |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 956 | base::TimingLogger::ScopedSplit("Sweep", &timings_); |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 957 | |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 958 | // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark |
| 959 | // bitmap, resulting in occasional frees of Weaks which are still in use. |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 960 | SweepSystemWeaks(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 961 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 962 | const bool partial = (GetGcType() == kGcTypePartial); |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 963 | SweepCallbackContext scc; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 964 | scc.mark_sweep = this; |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 965 | scc.self = Thread::Current(); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 966 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 967 | // We always sweep always collect spaces. |
| 968 | bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect); |
| 969 | if (!partial && !sweep_space) { |
| 970 | // We sweep full collect spaces when the GC isn't a partial GC (ie its full). |
| 971 | sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); |
| 972 | } |
| 973 | if (sweep_space) { |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 974 | uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); |
| 975 | uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 976 | scc.space = space->AsDlMallocSpace(); |
| 977 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 978 | accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 979 | if (swap_bitmaps) { |
| 980 | std::swap(live_bitmap, mark_bitmap); |
| 981 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 982 | if (!space->IsZygoteSpace()) { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 983 | base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 984 | // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 985 | accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, |
| 986 | &SweepCallback, reinterpret_cast<void*>(&scc)); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 987 | } else { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 988 | base::TimingLogger::ScopedSplit split("SweepZygote", &timings_); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 989 | // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual |
| 990 | // memory. |
| 991 | accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, |
| 992 | &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 993 | } |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 994 | } |
| 995 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 996 | |
| 997 | SweepLargeObjects(swap_bitmaps); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 998 | } |
| 999 | |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1000 | void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1001 | base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1002 | // Sweep large objects |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1003 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 1004 | accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); |
| 1005 | accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1006 | if (swap_bitmaps) { |
| 1007 | std::swap(large_live_objects, large_mark_objects); |
| 1008 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1009 | accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects(); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1010 | // O(n*log(n)) but hopefully there are not too many large objects. |
| 1011 | size_t freed_objects = 0; |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 1012 | size_t freed_bytes = 0; |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 1013 | Thread* self = Thread::Current(); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 1014 | for (const Object* obj : live_objects) { |
| 1015 | if (!large_mark_objects->Test(obj)) { |
| 1016 | freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj)); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1017 | ++freed_objects; |
| 1018 | } |
| 1019 | } |
Mathieu Chartier | 4b95e8f | 2013-07-15 16:32:50 -0700 | [diff] [blame] | 1020 | freed_objects_.fetch_add(freed_objects); |
| 1021 | freed_bytes_.fetch_add(freed_bytes); |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 1022 | GetHeap()->RecordFree(freed_objects, freed_bytes); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1023 | } |
| 1024 | |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 1025 | void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 1026 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1027 | if (space->IsDlMallocSpace() && space->Contains(ref)) { |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1028 | DCHECK(IsMarked(obj)); |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 1029 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1030 | bool is_marked = IsMarked(ref); |
| 1031 | if (!is_marked) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1032 | LOG(INFO) << *space; |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1033 | LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref) |
| 1034 | << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj) |
| 1035 | << "' (" << reinterpret_cast<const void*>(obj) << ") at offset " |
| 1036 | << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked"; |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 1037 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1038 | const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); |
| 1039 | DCHECK(klass != NULL); |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 1040 | const ObjectArray<ArtField>* fields = is_static ? klass->GetSFields() : klass->GetIFields(); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1041 | DCHECK(fields != NULL); |
| 1042 | bool found = false; |
| 1043 | for (int32_t i = 0; i < fields->GetLength(); ++i) { |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 1044 | const ArtField* cur = fields->Get(i); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1045 | if (cur->GetOffset().Int32Value() == offset.Int32Value()) { |
| 1046 | LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur); |
| 1047 | found = true; |
| 1048 | break; |
| 1049 | } |
| 1050 | } |
| 1051 | if (!found) { |
| 1052 | LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value(); |
| 1053 | } |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 1054 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1055 | bool obj_marked = heap_->GetCardTable()->IsDirty(obj); |
| 1056 | if (!obj_marked) { |
| 1057 | LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' " |
| 1058 | << "(" << reinterpret_cast<const void*>(obj) << ") contains references to " |
| 1059 | << "the alloc space, but wasn't card marked"; |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 1060 | } |
| 1061 | } |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 1062 | } |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1063 | break; |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 1064 | } |
| 1065 | } |
| 1066 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1067 | // Process the "referent" field in a java.lang.ref.Reference. If the |
| 1068 | // referent has not yet been marked, put it on the appropriate list in |
| 1069 | // the gcHeap for later processing. |
| 1070 | void MarkSweep::DelayReferenceReferent(Object* obj) { |
| 1071 | DCHECK(obj != NULL); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 1072 | Class* klass = obj->GetClass(); |
| 1073 | DCHECK(klass != NULL); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 1074 | DCHECK(klass->IsReferenceClass()); |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1075 | Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false); |
| 1076 | Object* referent = heap_->GetReferenceReferent(obj); |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1077 | if (kCountJavaLangRefs) { |
| 1078 | ++reference_count_; |
| 1079 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1080 | if (pending == NULL && referent != NULL && !IsMarked(referent)) { |
Brian Carlstrom | 4873d46 | 2011-08-21 15:23:39 -0700 | [diff] [blame] | 1081 | Object** list = NULL; |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 1082 | if (klass->IsSoftReferenceClass()) { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1083 | list = &soft_reference_list_; |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 1084 | } else if (klass->IsWeakReferenceClass()) { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1085 | list = &weak_reference_list_; |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 1086 | } else if (klass->IsFinalizerReferenceClass()) { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1087 | list = &finalizer_reference_list_; |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 1088 | } else if (klass->IsPhantomReferenceClass()) { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1089 | list = &phantom_reference_list_; |
| 1090 | } |
Brian Carlstrom | 0796af0 | 2011-10-12 14:31:45 -0700 | [diff] [blame] | 1091 | DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags(); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1092 | // TODO: One lock per list? |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1093 | heap_->EnqueuePendingReference(obj, list); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1094 | } |
| 1095 | } |
| 1096 | |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1097 | void MarkSweep::ScanRoot(const Object* obj) { |
| 1098 | ScanObject(obj); |
| 1099 | } |
| 1100 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1101 | class MarkObjectVisitor { |
| 1102 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 1103 | explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1104 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1105 | // TODO: Fixme when anotatalysis works with visitors. |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 1106 | void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, |
| 1107 | bool /* is_static */) const |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1108 | NO_THREAD_SAFETY_ANALYSIS { |
| 1109 | if (kDebugLocking) { |
| 1110 | Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); |
| 1111 | Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); |
| 1112 | } |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1113 | mark_sweep_->MarkObject(ref); |
| 1114 | } |
| 1115 | |
| 1116 | private: |
| 1117 | MarkSweep* const mark_sweep_; |
| 1118 | }; |
| 1119 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1120 | // Scans an object reference. Determines the type of the reference |
| 1121 | // and dispatches to a specialized scanning routine. |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 1122 | void MarkSweep::ScanObject(const Object* obj) { |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1123 | MarkObjectVisitor visitor(this); |
| 1124 | ScanObjectVisit(obj, visitor); |
| 1125 | } |
| 1126 | |
| 1127 | class MarkStackChunk : public Task { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1128 | public: |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1129 | MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end) |
| 1130 | : mark_sweep_(mark_sweep), |
| 1131 | thread_pool_(thread_pool), |
| 1132 | index_(0), |
| 1133 | length_(0), |
| 1134 | output_(NULL) { |
| 1135 | length_ = end - begin; |
| 1136 | if (begin != end) { |
| 1137 | // Cost not significant since we only do this for the initial set of mark stack chunks. |
| 1138 | memcpy(data_, begin, length_ * sizeof(*begin)); |
| 1139 | } |
| 1140 | if (kCountTasks) { |
| 1141 | ++mark_sweep_->work_chunks_created_; |
| 1142 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1143 | } |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1144 | |
| 1145 | ~MarkStackChunk() { |
| 1146 | DCHECK(output_ == NULL || output_->length_ == 0); |
| 1147 | DCHECK_GE(index_, length_); |
| 1148 | delete output_; |
| 1149 | if (kCountTasks) { |
| 1150 | ++mark_sweep_->work_chunks_deleted_; |
| 1151 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1152 | } |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1153 | |
| 1154 | MarkSweep* const mark_sweep_; |
| 1155 | ThreadPool* const thread_pool_; |
| 1156 | static const size_t max_size = 1 * KB; |
| 1157 | // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing. |
| 1158 | size_t index_; |
| 1159 | // Input / output mark stack. We add newly marked references to data_ until length reaches |
| 1160 | // max_size. This is an optimization so that less tasks are created. |
| 1161 | // TODO: Investigate using a bounded buffer FIFO. |
| 1162 | Object* data_[max_size]; |
| 1163 | // How many elements in data_ we need to scan. |
| 1164 | size_t length_; |
| 1165 | // Output block, newly marked references get added to the ouput block so that another thread can |
| 1166 | // scan them. |
| 1167 | MarkStackChunk* output_; |
| 1168 | |
| 1169 | class MarkObjectParallelVisitor { |
| 1170 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 1171 | explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {} |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1172 | |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 1173 | void operator()(const Object* /* obj */, const Object* ref, |
| 1174 | const MemberOffset& /* offset */, bool /* is_static */) const { |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1175 | if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) { |
| 1176 | chunk_task_->MarkStackPush(ref); |
| 1177 | } |
| 1178 | } |
| 1179 | |
| 1180 | private: |
| 1181 | MarkStackChunk* const chunk_task_; |
| 1182 | }; |
| 1183 | |
| 1184 | // Push an object into the block. |
| 1185 | // Don't need to use atomic ++ since we only one thread is writing to an output block at any |
| 1186 | // given time. |
| 1187 | void Push(Object* obj) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1188 | CHECK(obj != NULL); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1189 | data_[length_++] = obj; |
| 1190 | } |
| 1191 | |
| 1192 | void MarkStackPush(const Object* obj) { |
| 1193 | if (static_cast<size_t>(length_) < max_size) { |
| 1194 | Push(const_cast<Object*>(obj)); |
| 1195 | } else { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1196 | // Internal (thread-local) buffer is full, push to a new buffer instead. |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1197 | if (UNLIKELY(output_ == NULL)) { |
| 1198 | AllocateOutputChunk(); |
| 1199 | } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) { |
| 1200 | // Output block is full, queue it up for processing and obtain a new block. |
| 1201 | EnqueueOutput(); |
| 1202 | AllocateOutputChunk(); |
| 1203 | } |
| 1204 | output_->Push(const_cast<Object*>(obj)); |
| 1205 | } |
| 1206 | } |
| 1207 | |
| 1208 | void ScanObject(Object* obj) { |
| 1209 | mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this)); |
| 1210 | } |
| 1211 | |
| 1212 | void EnqueueOutput() { |
| 1213 | if (output_ != NULL) { |
| 1214 | uint64_t start = 0; |
| 1215 | if (kMeasureOverhead) { |
| 1216 | start = NanoTime(); |
| 1217 | } |
| 1218 | thread_pool_->AddTask(Thread::Current(), output_); |
| 1219 | output_ = NULL; |
| 1220 | if (kMeasureOverhead) { |
Mathieu Chartier | 4b95e8f | 2013-07-15 16:32:50 -0700 | [diff] [blame] | 1221 | mark_sweep_->overhead_time_.fetch_add(NanoTime() - start); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1222 | } |
| 1223 | } |
| 1224 | } |
| 1225 | |
| 1226 | void AllocateOutputChunk() { |
| 1227 | uint64_t start = 0; |
| 1228 | if (kMeasureOverhead) { |
| 1229 | start = NanoTime(); |
| 1230 | } |
| 1231 | output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL); |
| 1232 | if (kMeasureOverhead) { |
Mathieu Chartier | 4b95e8f | 2013-07-15 16:32:50 -0700 | [diff] [blame] | 1233 | mark_sweep_->overhead_time_.fetch_add(NanoTime() - start); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1234 | } |
| 1235 | } |
| 1236 | |
| 1237 | void Finalize() { |
| 1238 | EnqueueOutput(); |
| 1239 | delete this; |
| 1240 | } |
| 1241 | |
| 1242 | // Scans all of the objects |
| 1243 | virtual void Run(Thread* self) { |
Brian Carlstrom | d74e41b | 2013-03-24 23:47:01 -0700 | [diff] [blame] | 1244 | size_t index; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1245 | while ((index = index_++) < length_) { |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1246 | if (kUseMarkStackPrefetch) { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1247 | static const size_t prefetch_look_ahead = 1; |
| 1248 | __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1249 | } |
| 1250 | Object* obj = data_[index]; |
| 1251 | DCHECK(obj != NULL); |
| 1252 | ScanObject(obj); |
| 1253 | } |
| 1254 | } |
| 1255 | }; |
| 1256 | |
| 1257 | void MarkSweep::ProcessMarkStackParallel() { |
| 1258 | CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled"; |
| 1259 | Thread* self = Thread::Current(); |
| 1260 | ThreadPool* thread_pool = GetHeap()->GetThreadPool(); |
| 1261 | // Split the current mark stack up into work tasks. |
| 1262 | const size_t num_threads = thread_pool->GetThreadCount(); |
| 1263 | const size_t stack_size = mark_stack_->Size(); |
| 1264 | const size_t chunk_size = |
| 1265 | std::min((stack_size + num_threads - 1) / num_threads, |
| 1266 | static_cast<size_t>(MarkStackChunk::max_size)); |
| 1267 | size_t index = 0; |
| 1268 | for (size_t i = 0; i < num_threads || index < stack_size; ++i) { |
| 1269 | Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)]; |
| 1270 | Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)]; |
| 1271 | index += chunk_size; |
| 1272 | thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end)); |
| 1273 | } |
| 1274 | thread_pool->StartWorkers(self); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1275 | thread_pool->Wait(self, true, true); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1276 | mark_stack_->Reset(); |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 1277 | // LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime()); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1278 | CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1279 | } |
| 1280 | |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 1281 | // Scan anything that's on the mark stack. |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1282 | void MarkSweep::ProcessMarkStack() { |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1283 | ThreadPool* thread_pool = GetHeap()->GetThreadPool(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1284 | timings_.StartSplit("ProcessMarkStack"); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1285 | if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) { |
| 1286 | ProcessMarkStackParallel(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1287 | timings_.EndSplit(); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1288 | return; |
| 1289 | } |
| 1290 | |
Mathieu Chartier | d8195f1 | 2012-10-05 12:21:28 -0700 | [diff] [blame] | 1291 | if (kUseMarkStackPrefetch) { |
| 1292 | const size_t fifo_size = 4; |
| 1293 | const size_t fifo_mask = fifo_size - 1; |
| 1294 | const Object* fifo[fifo_size]; |
Brian Carlstrom | 02c8cc6 | 2013-07-18 15:54:44 -0700 | [diff] [blame] | 1295 | for (size_t i = 0; i < fifo_size; ++i) { |
Mathieu Chartier | d8195f1 | 2012-10-05 12:21:28 -0700 | [diff] [blame] | 1296 | fifo[i] = NULL; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1297 | } |
Mathieu Chartier | d8195f1 | 2012-10-05 12:21:28 -0700 | [diff] [blame] | 1298 | size_t fifo_pos = 0; |
| 1299 | size_t fifo_count = 0; |
| 1300 | for (;;) { |
| 1301 | const Object* obj = fifo[fifo_pos & fifo_mask]; |
| 1302 | if (obj != NULL) { |
| 1303 | ScanObject(obj); |
| 1304 | fifo[fifo_pos & fifo_mask] = NULL; |
| 1305 | --fifo_count; |
| 1306 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1307 | |
Mathieu Chartier | d8195f1 | 2012-10-05 12:21:28 -0700 | [diff] [blame] | 1308 | if (!mark_stack_->IsEmpty()) { |
| 1309 | const Object* obj = mark_stack_->PopBack(); |
| 1310 | DCHECK(obj != NULL); |
| 1311 | fifo[fifo_pos & fifo_mask] = obj; |
| 1312 | __builtin_prefetch(obj); |
| 1313 | fifo_count++; |
| 1314 | } |
| 1315 | fifo_pos++; |
| 1316 | |
| 1317 | if (!fifo_count) { |
| 1318 | CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size(); |
| 1319 | break; |
| 1320 | } |
| 1321 | } |
| 1322 | } else { |
| 1323 | while (!mark_stack_->IsEmpty()) { |
| 1324 | const Object* obj = mark_stack_->PopBack(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1325 | DCHECK(obj != NULL); |
Mathieu Chartier | d8195f1 | 2012-10-05 12:21:28 -0700 | [diff] [blame] | 1326 | ScanObject(obj); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1327 | } |
| 1328 | } |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1329 | timings_.EndSplit(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1330 | } |
| 1331 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1332 | // Walks the reference list marking any references subject to the |
| 1333 | // reference clearing policy. References with a black referent are |
| 1334 | // removed from the list. References with white referents biased |
| 1335 | // toward saving are blackened and also removed from the list. |
| 1336 | void MarkSweep::PreserveSomeSoftReferences(Object** list) { |
| 1337 | DCHECK(list != NULL); |
| 1338 | Object* clear = NULL; |
| 1339 | size_t counter = 0; |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 1340 | |
| 1341 | DCHECK(mark_stack_->IsEmpty()); |
| 1342 | |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1343 | timings_.StartSplit("PreserveSomeSoftReferences"); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1344 | while (*list != NULL) { |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1345 | Object* ref = heap_->DequeuePendingReference(list); |
| 1346 | Object* referent = heap_->GetReferenceReferent(ref); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1347 | if (referent == NULL) { |
| 1348 | // Referent was cleared by the user during marking. |
| 1349 | continue; |
| 1350 | } |
| 1351 | bool is_marked = IsMarked(referent); |
| 1352 | if (!is_marked && ((++counter) & 1)) { |
| 1353 | // Referent is white and biased toward saving, mark it. |
| 1354 | MarkObject(referent); |
| 1355 | is_marked = true; |
| 1356 | } |
| 1357 | if (!is_marked) { |
| 1358 | // Referent is white, queue it for clearing. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1359 | heap_->EnqueuePendingReference(ref, &clear); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1360 | } |
| 1361 | } |
| 1362 | *list = clear; |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1363 | timings_.EndSplit(); |
| 1364 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1365 | // Restart the mark with the newly black references added to the |
| 1366 | // root set. |
| 1367 | ProcessMarkStack(); |
| 1368 | } |
| 1369 | |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1370 | inline bool MarkSweep::IsMarked(const Object* object) const |
| 1371 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 1372 | if (IsImmune(object)) { |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1373 | return true; |
| 1374 | } |
| 1375 | DCHECK(current_mark_bitmap_ != NULL); |
| 1376 | if (current_mark_bitmap_->HasAddress(object)) { |
| 1377 | return current_mark_bitmap_->Test(object); |
| 1378 | } |
| 1379 | return heap_->GetMarkBitmap()->Test(object); |
| 1380 | } |
| 1381 | |
| 1382 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1383 | // Unlink the reference list clearing references objects with white |
| 1384 | // referents. Cleared references registered to a reference queue are |
| 1385 | // scheduled for appending by the heap worker thread. |
| 1386 | void MarkSweep::ClearWhiteReferences(Object** list) { |
| 1387 | DCHECK(list != NULL); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1388 | while (*list != NULL) { |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1389 | Object* ref = heap_->DequeuePendingReference(list); |
| 1390 | Object* referent = heap_->GetReferenceReferent(ref); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1391 | if (referent != NULL && !IsMarked(referent)) { |
| 1392 | // Referent is white, clear it. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1393 | heap_->ClearReferenceReferent(ref); |
| 1394 | if (heap_->IsEnqueuable(ref)) { |
| 1395 | heap_->EnqueueReference(ref, &cleared_reference_list_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1396 | } |
| 1397 | } |
| 1398 | } |
| 1399 | DCHECK(*list == NULL); |
| 1400 | } |
| 1401 | |
| 1402 | // Enqueues finalizer references with white referents. White |
| 1403 | // referents are blackened, moved to the zombie field, and the |
| 1404 | // referent field is cleared. |
| 1405 | void MarkSweep::EnqueueFinalizerReferences(Object** list) { |
| 1406 | DCHECK(list != NULL); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1407 | timings_.StartSplit("EnqueueFinalizerReferences"); |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1408 | MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1409 | bool has_enqueued = false; |
| 1410 | while (*list != NULL) { |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1411 | Object* ref = heap_->DequeuePendingReference(list); |
| 1412 | Object* referent = heap_->GetReferenceReferent(ref); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1413 | if (referent != NULL && !IsMarked(referent)) { |
| 1414 | MarkObject(referent); |
| 1415 | // If the referent is non-null the reference must queuable. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1416 | DCHECK(heap_->IsEnqueuable(ref)); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 1417 | ref->SetFieldObject(zombie_offset, referent, false); |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1418 | heap_->ClearReferenceReferent(ref); |
| 1419 | heap_->EnqueueReference(ref, &cleared_reference_list_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1420 | has_enqueued = true; |
| 1421 | } |
| 1422 | } |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1423 | timings_.EndSplit(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1424 | if (has_enqueued) { |
| 1425 | ProcessMarkStack(); |
| 1426 | } |
| 1427 | DCHECK(*list == NULL); |
| 1428 | } |
| 1429 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 1430 | // Process reference class instances and schedule finalizations. |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1431 | void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft, |
| 1432 | Object** weak_references, |
| 1433 | Object** finalizer_references, |
| 1434 | Object** phantom_references) { |
| 1435 | DCHECK(soft_references != NULL); |
| 1436 | DCHECK(weak_references != NULL); |
| 1437 | DCHECK(finalizer_references != NULL); |
| 1438 | DCHECK(phantom_references != NULL); |
| 1439 | |
| 1440 | // Unless we are in the zygote or required to clear soft references |
| 1441 | // with white references, preserve some white referents. |
Ian Rogers | 2945e24 | 2012-06-03 14:45:16 -0700 | [diff] [blame] | 1442 | if (!clear_soft && !Runtime::Current()->IsZygote()) { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1443 | PreserveSomeSoftReferences(soft_references); |
| 1444 | } |
| 1445 | |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1446 | timings_.StartSplit("ProcessReferences"); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1447 | // Clear all remaining soft and weak references with white |
| 1448 | // referents. |
| 1449 | ClearWhiteReferences(soft_references); |
| 1450 | ClearWhiteReferences(weak_references); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1451 | timings_.EndSplit(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1452 | |
| 1453 | // Preserve all white objects with finalize methods and schedule |
| 1454 | // them for finalization. |
| 1455 | EnqueueFinalizerReferences(finalizer_references); |
| 1456 | |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1457 | timings_.StartSplit("ProcessReferences"); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1458 | // Clear all f-reachable soft and weak references with white |
| 1459 | // referents. |
| 1460 | ClearWhiteReferences(soft_references); |
| 1461 | ClearWhiteReferences(weak_references); |
| 1462 | |
| 1463 | // Clear all phantom references with white referents. |
| 1464 | ClearWhiteReferences(phantom_references); |
| 1465 | |
| 1466 | // At this point all reference lists should be empty. |
| 1467 | DCHECK(*soft_references == NULL); |
| 1468 | DCHECK(*weak_references == NULL); |
| 1469 | DCHECK(*finalizer_references == NULL); |
| 1470 | DCHECK(*phantom_references == NULL); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1471 | timings_.EndSplit(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1472 | } |
| 1473 | |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1474 | void MarkSweep::UnBindBitmaps() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1475 | base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 1476 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1477 | if (space->IsDlMallocSpace()) { |
| 1478 | space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1479 | if (alloc_space->temp_bitmap_.get() != NULL) { |
| 1480 | // At this point, the temp_bitmap holds our old mark bitmap. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1481 | accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release(); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1482 | GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap); |
| 1483 | CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get()); |
| 1484 | alloc_space->mark_bitmap_.reset(new_bitmap); |
| 1485 | DCHECK(alloc_space->temp_bitmap_.get() == NULL); |
| 1486 | } |
| 1487 | } |
| 1488 | } |
| 1489 | } |
| 1490 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1491 | void MarkSweep::FinishPhase() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1492 | base::TimingLogger::ScopedSplit split("FinishPhase", &timings_); |
| 1493 | // Can't enqueue references if we hold the mutator lock. |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1494 | Object* cleared_references = GetClearedReferences(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1495 | Heap* heap = GetHeap(); |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1496 | timings_.NewSplit("EnqueueClearedReferences"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1497 | heap->EnqueueClearedReferences(&cleared_references); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1498 | |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1499 | timings_.NewSplit("PostGcVerification"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1500 | heap->PostGcVerification(this); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1501 | |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1502 | timings_.NewSplit("GrowForUtilization"); |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 1503 | heap->GrowForUtilization(GetGcType(), GetDurationNs()); |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 1504 | |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1505 | timings_.NewSplit("RequestHeapTrim"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1506 | heap->RequestHeapTrim(); |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 1507 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1508 | // Update the cumulative statistics |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1509 | total_time_ns_ += GetDurationNs(); |
| 1510 | total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0, |
| 1511 | std::plus<uint64_t>()); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1512 | total_freed_objects_ += GetFreedObjects(); |
| 1513 | total_freed_bytes_ += GetFreedBytes(); |
| 1514 | |
| 1515 | // Ensure that the mark stack is empty. |
| 1516 | CHECK(mark_stack_->IsEmpty()); |
| 1517 | |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1518 | if (kCountScannedTypes) { |
| 1519 | VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ |
| 1520 | << " other=" << other_count_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1521 | } |
| 1522 | |
| 1523 | if (kCountTasks) { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1524 | VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1525 | } |
| 1526 | |
| 1527 | if (kMeasureOverhead) { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1528 | VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1529 | } |
| 1530 | |
| 1531 | if (kProfileLargeObjects) { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1532 | VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1533 | } |
| 1534 | |
| 1535 | if (kCountClassesMarked) { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1536 | VLOG(gc) << "Classes marked " << classes_marked_; |
| 1537 | } |
| 1538 | |
| 1539 | if (kCountJavaLangRefs) { |
| 1540 | VLOG(gc) << "References scanned " << reference_count_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1541 | } |
| 1542 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1543 | // Update the cumulative loggers. |
| 1544 | cumulative_timings_.Start(); |
Anwar Ghuloum | 6f28d91 | 2013-07-24 15:02:53 -0700 | [diff] [blame] | 1545 | cumulative_timings_.AddLogger(timings_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1546 | cumulative_timings_.End(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1547 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1548 | // Clear all of the spaces' mark bitmaps. |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame^] | 1549 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1550 | if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1551 | space->GetMarkBitmap()->Clear(); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1552 | } |
| 1553 | } |
Mathieu Chartier | 5301cd2 | 2012-05-31 12:11:36 -0700 | [diff] [blame] | 1554 | mark_stack_->Reset(); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1555 | |
| 1556 | // Reset the marked large objects. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1557 | space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1558 | large_objects->GetMarkObjects()->Clear(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1559 | } |
| 1560 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1561 | } // namespace collector |
| 1562 | } // namespace gc |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1563 | } // namespace art |