Elliott Hughes | 2faa5f1 | 2012-01-30 14:42:07 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 16 | |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 17 | #include "mark_sweep.h" |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 18 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 19 | #include <functional> |
| 20 | #include <numeric> |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 21 | #include <climits> |
| 22 | #include <vector> |
| 23 | |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 24 | #include "base/bounded_fifo.h" |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 25 | #include "base/logging.h" |
Elliott Hughes | 7616005 | 2012-12-12 16:31:20 -0800 | [diff] [blame] | 26 | #include "base/macros.h" |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 27 | #include "base/mutex-inl.h" |
Sameer Abu Asal | a843954 | 2013-02-14 16:06:42 -0800 | [diff] [blame] | 28 | #include "base/timing_logger.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 29 | #include "gc/accounting/card_table-inl.h" |
| 30 | #include "gc/accounting/heap_bitmap.h" |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 31 | #include "gc/accounting/mod_union_table.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 32 | #include "gc/accounting/space_bitmap-inl.h" |
| 33 | #include "gc/heap.h" |
| 34 | #include "gc/space/image_space.h" |
| 35 | #include "gc/space/large_object_space.h" |
| 36 | #include "gc/space/space-inl.h" |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 37 | #include "indirect_reference_table.h" |
| 38 | #include "intern_table.h" |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 39 | #include "jni_internal.h" |
Elliott Hughes | c33a32b | 2011-10-11 18:18:07 -0700 | [diff] [blame] | 40 | #include "monitor.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 41 | #include "mark_sweep-inl.h" |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 42 | #include "mirror/art_field.h" |
| 43 | #include "mirror/art_field-inl.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 44 | #include "mirror/class-inl.h" |
| 45 | #include "mirror/class_loader.h" |
| 46 | #include "mirror/dex_cache.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 47 | #include "mirror/object-inl.h" |
| 48 | #include "mirror/object_array.h" |
| 49 | #include "mirror/object_array-inl.h" |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 50 | #include "runtime.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 51 | #include "thread-inl.h" |
Mathieu Chartier | 6f1c949 | 2012-10-15 12:08:41 -0700 | [diff] [blame] | 52 | #include "thread_list.h" |
Ian Rogers | 0825427 | 2012-10-23 17:49:23 -0700 | [diff] [blame] | 53 | #include "verifier/method_verifier.h" |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 54 | |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 55 | using ::art::mirror::ArtField; |
Brian Carlstrom | 3e3d591 | 2013-07-18 00:19:45 -0700 | [diff] [blame] | 56 | using ::art::mirror::Class; |
Brian Carlstrom | 3e3d591 | 2013-07-18 00:19:45 -0700 | [diff] [blame] | 57 | using ::art::mirror::Object; |
| 58 | using ::art::mirror::ObjectArray; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 59 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 60 | namespace art { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 61 | namespace gc { |
| 62 | namespace collector { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 63 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 64 | // Performance options. |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 65 | constexpr bool kUseRecursiveMark = false; |
| 66 | constexpr bool kUseMarkStackPrefetch = true; |
| 67 | constexpr size_t kSweepArrayChunkFreeSize = 1024; |
| 68 | |
| 69 | // Parallelism options. |
| 70 | constexpr bool kParallelCardScan = true; |
| 71 | constexpr bool kParallelRecursiveMark = true; |
| 72 | // Don't attempt to parallelize mark stack processing unless the mark stack is at least n |
| 73 | // elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not |
| 74 | // having this can add overhead in ProcessReferences since we may end up doing many calls of |
| 75 | // ProcessMarkStack with very small mark stacks. |
| 76 | constexpr size_t kMinimumParallelMarkStackSize = 128; |
| 77 | constexpr bool kParallelProcessMarkStack = true; |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 78 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 79 | // Profiling and information flags. |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 80 | constexpr bool kCountClassesMarked = false; |
| 81 | constexpr bool kProfileLargeObjects = false; |
| 82 | constexpr bool kMeasureOverhead = false; |
| 83 | constexpr bool kCountTasks = false; |
| 84 | constexpr bool kCountJavaLangRefs = false; |
| 85 | |
| 86 | // Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. |
| 87 | constexpr bool kCheckLocks = kDebugLocking; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 88 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 89 | void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 90 | // Bind live to mark bitmap if necessary. |
| 91 | if (space->GetLiveBitmap() != space->GetMarkBitmap()) { |
| 92 | BindLiveToMarkBitmap(space); |
| 93 | } |
| 94 | |
| 95 | // Add the space to the immune region. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 96 | // TODO: Use space limits instead of current end_ since the end_ can be changed by dlmalloc |
| 97 | // callbacks. |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 98 | if (immune_begin_ == NULL) { |
| 99 | DCHECK(immune_end_ == NULL); |
| 100 | SetImmuneRange(reinterpret_cast<Object*>(space->Begin()), |
| 101 | reinterpret_cast<Object*>(space->End())); |
| 102 | } else { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 103 | const space::ContinuousSpace* prev_space = nullptr; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 104 | // Find out if the previous space is immune. |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 105 | for (const space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 106 | if (cur_space == space) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 107 | break; |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 108 | } |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 109 | prev_space = cur_space; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 110 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 111 | // If previous space was immune, then extend the immune region. Relies on continuous spaces |
| 112 | // being sorted by Heap::AddContinuousSpace. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 113 | if (prev_space != nullptr && IsImmuneSpace(prev_space)) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 114 | immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); |
| 115 | immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); |
| 116 | } |
| 117 | } |
| 118 | } |
| 119 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 120 | bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) const { |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 121 | return |
| 122 | immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) && |
| 123 | immune_end_ >= reinterpret_cast<Object*>(space->End()); |
| 124 | } |
| 125 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 126 | void MarkSweep::BindBitmaps() { |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 127 | timings_.StartSplit("BindBitmaps"); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 128 | WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 129 | // Mark all of the spaces we never collect as immune. |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 130 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 131 | if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 132 | ImmuneSpace(space); |
| 133 | } |
| 134 | } |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 135 | timings_.EndSplit(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 136 | } |
| 137 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 138 | MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) |
| 139 | : GarbageCollector(heap, |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 140 | name_prefix + |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 141 | (is_concurrent ? "concurrent mark sweep": "mark sweep")), |
| 142 | current_mark_bitmap_(NULL), |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 143 | mark_stack_(NULL), |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 144 | immune_begin_(NULL), |
| 145 | immune_end_(NULL), |
| 146 | soft_reference_list_(NULL), |
| 147 | weak_reference_list_(NULL), |
| 148 | finalizer_reference_list_(NULL), |
| 149 | phantom_reference_list_(NULL), |
| 150 | cleared_reference_list_(NULL), |
Ian Rogers | 906457c | 2013-11-13 23:28:08 -0800 | [diff] [blame^] | 151 | live_stack_freeze_size_(0), |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 152 | gc_barrier_(new Barrier(0)), |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 153 | large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), |
Mathieu Chartier | 958291c | 2013-08-27 18:14:55 -0700 | [diff] [blame] | 154 | mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 155 | is_concurrent_(is_concurrent) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | void MarkSweep::InitializePhase() { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 159 | timings_.Reset(); |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 160 | base::TimingLogger::ScopedSplit split("InitializePhase", &timings_); |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 161 | mark_stack_ = heap_->mark_stack_.get(); |
| 162 | DCHECK(mark_stack_ != nullptr); |
| 163 | SetImmuneRange(nullptr, nullptr); |
| 164 | soft_reference_list_ = nullptr; |
| 165 | weak_reference_list_ = nullptr; |
| 166 | finalizer_reference_list_ = nullptr; |
| 167 | phantom_reference_list_ = nullptr; |
| 168 | cleared_reference_list_ = nullptr; |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 169 | class_count_ = 0; |
| 170 | array_count_ = 0; |
| 171 | other_count_ = 0; |
| 172 | large_object_test_ = 0; |
| 173 | large_object_mark_ = 0; |
| 174 | classes_marked_ = 0; |
| 175 | overhead_time_ = 0; |
| 176 | work_chunks_created_ = 0; |
| 177 | work_chunks_deleted_ = 0; |
| 178 | reference_count_ = 0; |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 179 | |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 180 | FindDefaultMarkBitmap(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 181 | |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 182 | // Do any pre GC verification. |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 183 | timings_.NewSplit("PreGcVerification"); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 184 | heap_->PreGcVerification(this); |
| 185 | } |
| 186 | |
| 187 | void MarkSweep::ProcessReferences(Thread* self) { |
Mathieu Chartier | 9e452d1 | 2013-09-18 16:35:15 -0700 | [diff] [blame] | 188 | base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_); |
Mathieu Chartier | 8e56c7e | 2012-11-20 13:25:50 -0800 | [diff] [blame] | 189 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 190 | ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_, |
| 191 | &finalizer_reference_list_, &phantom_reference_list_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 192 | } |
| 193 | |
| 194 | bool MarkSweep::HandleDirtyObjectsPhase() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 195 | base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 196 | Thread* self = Thread::Current(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 197 | Locks::mutator_lock_->AssertExclusiveHeld(self); |
| 198 | |
| 199 | { |
| 200 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 201 | |
| 202 | // Re-mark root set. |
| 203 | ReMarkRoots(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 204 | |
| 205 | // Scan dirty objects, this is only required if we are not doing concurrent GC. |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 206 | RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 207 | } |
| 208 | |
| 209 | ProcessReferences(self); |
| 210 | |
| 211 | // Only need to do this if we have the card mark verification on, and only during concurrent GC. |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 212 | if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_|| |
| 213 | GetHeap()->verify_post_gc_heap_) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 214 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 215 | // This second sweep makes sure that we don't have any objects in the live stack which point to |
| 216 | // freed objects. These cause problems since their references may be previously freed objects. |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 217 | SweepArray(GetHeap()->allocation_stack_.get(), false); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 218 | } |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 219 | |
| 220 | timings_.StartSplit("PreSweepingGcVerification"); |
| 221 | heap_->PreSweepingGcVerification(this); |
| 222 | timings_.EndSplit(); |
| 223 | |
| 224 | // Ensure that nobody inserted items in the live stack after we swapped the stacks. |
| 225 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 226 | CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); |
Mathieu Chartier | c11d9b8 | 2013-09-19 10:01:59 -0700 | [diff] [blame] | 227 | |
| 228 | // Disallow new system weaks to prevent a race which occurs when someone adds a new system |
| 229 | // weak before we sweep them. Since this new system weak may not be marked, the GC may |
| 230 | // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong |
| 231 | // reference to a string that is about to be swept. |
| 232 | Runtime::Current()->DisallowNewSystemWeaks(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 233 | return true; |
| 234 | } |
| 235 | |
| 236 | bool MarkSweep::IsConcurrent() const { |
| 237 | return is_concurrent_; |
| 238 | } |
| 239 | |
| 240 | void MarkSweep::MarkingPhase() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 241 | base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 242 | Thread* self = Thread::Current(); |
| 243 | |
| 244 | BindBitmaps(); |
| 245 | FindDefaultMarkBitmap(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 246 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 247 | // Process dirty cards and add dirty cards to mod union tables. |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 248 | heap_->ProcessCards(timings_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 249 | |
| 250 | // Need to do this before the checkpoint since we don't want any threads to add references to |
| 251 | // the live stack during the recursive mark. |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 252 | timings_.NewSplit("SwapStacks"); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 253 | heap_->SwapStacks(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 254 | |
| 255 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 256 | if (Locks::mutator_lock_->IsExclusiveHeld(self)) { |
| 257 | // If we exclusively hold the mutator lock, all threads must be suspended. |
| 258 | MarkRoots(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 259 | } else { |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 260 | MarkThreadRoots(self); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 261 | // At this point the live stack should no longer have any mutators which push into it. |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 262 | MarkNonThreadRoots(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 263 | } |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 264 | live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 265 | MarkConcurrentRoots(); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 266 | UpdateAndMarkModUnion(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 267 | MarkReachableObjects(); |
| 268 | } |
| 269 | |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 270 | void MarkSweep::UpdateAndMarkModUnion() { |
| 271 | for (const auto& space : heap_->GetContinuousSpaces()) { |
| 272 | if (IsImmuneSpace(space)) { |
| 273 | const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : |
| 274 | "UpdateAndMarkImageModUnionTable"; |
| 275 | base::TimingLogger::ScopedSplit split(name, &timings_); |
| 276 | accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); |
| 277 | CHECK(mod_union_table != nullptr); |
| 278 | mod_union_table->UpdateAndMarkReferences(MarkRootCallback, this); |
| 279 | } |
| 280 | } |
| 281 | } |
| 282 | |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 283 | void MarkSweep::MarkThreadRoots(Thread* self) { |
| 284 | MarkRootsCheckpoint(self); |
| 285 | } |
| 286 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 287 | void MarkSweep::MarkReachableObjects() { |
| 288 | // Mark everything allocated since the last as GC live so that we can sweep concurrently, |
| 289 | // knowing that new allocations won't be marked as live. |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 290 | timings_.StartSplit("MarkStackAsLive"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 291 | accounting::ObjectStack* live_stack = heap_->GetLiveStack(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 292 | heap_->MarkAllocStackAsLive(live_stack); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 293 | live_stack->Reset(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 294 | timings_.EndSplit(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 295 | // Recursively mark all the non-image bits set in the mark bitmap. |
| 296 | RecursiveMark(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 297 | } |
| 298 | |
| 299 | void MarkSweep::ReclaimPhase() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 300 | base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); |
Mathieu Chartier | 720ef76 | 2013-08-17 14:46:54 -0700 | [diff] [blame] | 301 | Thread* self = Thread::Current(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 302 | |
| 303 | if (!IsConcurrent()) { |
| 304 | ProcessReferences(self); |
Mathieu Chartier | c11d9b8 | 2013-09-19 10:01:59 -0700 | [diff] [blame] | 305 | } |
| 306 | |
| 307 | { |
| 308 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 309 | SweepSystemWeaks(); |
| 310 | } |
| 311 | |
| 312 | if (IsConcurrent()) { |
| 313 | Runtime::Current()->AllowNewSystemWeaks(); |
| 314 | |
Anwar Ghuloum | a9a5092 | 2013-08-09 21:34:20 -0700 | [diff] [blame] | 315 | base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_); |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 316 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 317 | accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 318 | // The allocation stack contains things allocated since the start of the GC. These may have been |
| 319 | // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC. |
| 320 | // Remove these objects from the mark bitmaps so that they will be eligible for sticky |
| 321 | // collection. |
| 322 | // There is a race here which is safely handled. Another thread such as the hprof could |
| 323 | // have flushed the alloc stack after we resumed the threads. This is safe however, since |
| 324 | // reseting the allocation stack zeros it out with madvise. This means that we will either |
| 325 | // read NULLs or attempt to unmark a newly allocated object which will not be marked in the |
| 326 | // first place. |
| 327 | mirror::Object** end = allocation_stack->End(); |
| 328 | for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) { |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 329 | const Object* obj = *it; |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 330 | if (obj != NULL) { |
| 331 | UnMarkObjectNonNull(obj); |
| 332 | } |
| 333 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 334 | } |
| 335 | |
| 336 | // Before freeing anything, lets verify the heap. |
| 337 | if (kIsDebugBuild) { |
| 338 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 339 | VerifyImageRoots(); |
| 340 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 341 | |
| 342 | { |
| 343 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 344 | |
| 345 | // Reclaim unmarked objects. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 346 | Sweep(false); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 347 | |
| 348 | // Swap the live and mark bitmaps for each space which we modified space. This is an |
| 349 | // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound |
| 350 | // bitmaps. |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 351 | timings_.StartSplit("SwapBitmaps"); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 352 | SwapBitmaps(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 353 | timings_.EndSplit(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 354 | |
| 355 | // Unbind the live and mark bitmaps. |
| 356 | UnBindBitmaps(); |
| 357 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 358 | } |
| 359 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 360 | void MarkSweep::SetImmuneRange(Object* begin, Object* end) { |
| 361 | immune_begin_ = begin; |
| 362 | immune_end_ = end; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 363 | } |
| 364 | |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 365 | void MarkSweep::FindDefaultMarkBitmap() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 366 | base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 367 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 368 | accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); |
| 369 | if (bitmap != nullptr && |
| 370 | space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { |
| 371 | current_mark_bitmap_ = bitmap; |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 372 | CHECK(current_mark_bitmap_ != NULL); |
| 373 | return; |
| 374 | } |
| 375 | } |
| 376 | GetHeap()->DumpSpaces(); |
| 377 | LOG(FATAL) << "Could not find a default mark bitmap"; |
| 378 | } |
| 379 | |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 380 | void MarkSweep::ExpandMarkStack() { |
Mathieu Chartier | ba311b4 | 2013-08-27 13:02:30 -0700 | [diff] [blame] | 381 | ResizeMarkStack(mark_stack_->Capacity() * 2); |
| 382 | } |
| 383 | |
| 384 | void MarkSweep::ResizeMarkStack(size_t new_size) { |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 385 | // Rare case, no need to have Thread::Current be a parameter. |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 386 | if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { |
| 387 | // Someone else acquired the lock and expanded the mark stack before us. |
| 388 | return; |
| 389 | } |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 390 | std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); |
Mathieu Chartier | ba311b4 | 2013-08-27 13:02:30 -0700 | [diff] [blame] | 391 | CHECK_LE(mark_stack_->Size(), new_size); |
| 392 | mark_stack_->Resize(new_size); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 393 | for (const auto& obj : temp) { |
| 394 | mark_stack_->PushBack(obj); |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 395 | } |
| 396 | } |
| 397 | |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 398 | inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) { |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 399 | DCHECK(obj != NULL); |
| 400 | if (MarkObjectParallel(obj)) { |
Mathieu Chartier | ba311b4 | 2013-08-27 13:02:30 -0700 | [diff] [blame] | 401 | MutexLock mu(Thread::Current(), mark_stack_lock_); |
| 402 | if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { |
Mathieu Chartier | 184e322 | 2013-08-03 14:02:57 -0700 | [diff] [blame] | 403 | ExpandMarkStack(); |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 404 | } |
Mathieu Chartier | ba311b4 | 2013-08-27 13:02:30 -0700 | [diff] [blame] | 405 | // The object must be pushed on to the mark stack. |
| 406 | mark_stack_->PushBack(const_cast<Object*>(obj)); |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 407 | } |
| 408 | } |
| 409 | |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 410 | inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { |
| 411 | DCHECK(!IsImmune(obj)); |
| 412 | // Try to take advantage of locality of references within a space, failing this find the space |
| 413 | // the hard way. |
| 414 | accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; |
| 415 | if (UNLIKELY(!object_bitmap->HasAddress(obj))) { |
| 416 | accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); |
| 417 | if (LIKELY(new_bitmap != NULL)) { |
| 418 | object_bitmap = new_bitmap; |
| 419 | } else { |
| 420 | MarkLargeObject(obj, false); |
| 421 | return; |
| 422 | } |
| 423 | } |
| 424 | |
| 425 | DCHECK(object_bitmap->HasAddress(obj)); |
| 426 | object_bitmap->Clear(obj); |
| 427 | } |
| 428 | |
| 429 | inline void MarkSweep::MarkObjectNonNull(const Object* obj) { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 430 | DCHECK(obj != NULL); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 431 | |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 432 | if (IsImmune(obj)) { |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 433 | DCHECK(IsMarked(obj)); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 434 | return; |
| 435 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 436 | |
| 437 | // Try to take advantage of locality of references within a space, failing this find the space |
| 438 | // the hard way. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 439 | accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 440 | if (UNLIKELY(!object_bitmap->HasAddress(obj))) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 441 | accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); |
| 442 | if (LIKELY(new_bitmap != NULL)) { |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 443 | object_bitmap = new_bitmap; |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 444 | } else { |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 445 | MarkLargeObject(obj, true); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 446 | return; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 447 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 448 | } |
| 449 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 450 | // This object was not previously marked. |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 451 | if (!object_bitmap->Test(obj)) { |
| 452 | object_bitmap->Set(obj); |
Mathieu Chartier | 184e322 | 2013-08-03 14:02:57 -0700 | [diff] [blame] | 453 | if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 454 | // Lock is not needed but is here anyways to please annotalysis. |
| 455 | MutexLock mu(Thread::Current(), mark_stack_lock_); |
Mathieu Chartier | 184e322 | 2013-08-03 14:02:57 -0700 | [diff] [blame] | 456 | ExpandMarkStack(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 457 | } |
Mathieu Chartier | 184e322 | 2013-08-03 14:02:57 -0700 | [diff] [blame] | 458 | // The object must be pushed on to the mark stack. |
| 459 | mark_stack_->PushBack(const_cast<Object*>(obj)); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 460 | } |
| 461 | } |
| 462 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 463 | // Rare case, probably not worth inlining since it will increase instruction cache miss rate. |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 464 | bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 465 | // TODO: support >1 discontinuous space. |
| 466 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 467 | accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 468 | if (kProfileLargeObjects) { |
| 469 | ++large_object_test_; |
| 470 | } |
| 471 | if (UNLIKELY(!large_objects->Test(obj))) { |
Mathieu Chartier | 4fcb8d3 | 2013-07-15 12:43:36 -0700 | [diff] [blame] | 472 | if (!large_object_space->Contains(obj)) { |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 473 | LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; |
| 474 | LOG(ERROR) << "Attempting see if it's a bad root"; |
| 475 | VerifyRoots(); |
| 476 | LOG(FATAL) << "Can't mark bad root"; |
| 477 | } |
| 478 | if (kProfileLargeObjects) { |
| 479 | ++large_object_mark_; |
| 480 | } |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 481 | if (set) { |
| 482 | large_objects->Set(obj); |
| 483 | } else { |
| 484 | large_objects->Clear(obj); |
| 485 | } |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 486 | return true; |
| 487 | } |
| 488 | return false; |
| 489 | } |
| 490 | |
| 491 | inline bool MarkSweep::MarkObjectParallel(const Object* obj) { |
| 492 | DCHECK(obj != NULL); |
| 493 | |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 494 | if (IsImmune(obj)) { |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 495 | DCHECK(IsMarked(obj)); |
| 496 | return false; |
| 497 | } |
| 498 | |
| 499 | // Try to take advantage of locality of references within a space, failing this find the space |
| 500 | // the hard way. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 501 | accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 502 | if (UNLIKELY(!object_bitmap->HasAddress(obj))) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 503 | accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 504 | if (new_bitmap != NULL) { |
| 505 | object_bitmap = new_bitmap; |
| 506 | } else { |
| 507 | // TODO: Remove the Thread::Current here? |
| 508 | // TODO: Convert this to some kind of atomic marking? |
| 509 | MutexLock mu(Thread::Current(), large_object_lock_); |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 510 | return MarkLargeObject(obj, true); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 511 | } |
| 512 | } |
| 513 | |
| 514 | // Return true if the object was not previously marked. |
| 515 | return !object_bitmap->AtomicTestAndSet(obj); |
| 516 | } |
| 517 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 518 | // Used to mark objects when recursing. Recursion is done by moving |
| 519 | // the finger across the bitmaps in address order and marking child |
| 520 | // objects. Any newly-marked objects whose addresses are lower than |
| 521 | // the finger won't be visited by the bitmap scan, so those objects |
| 522 | // need to be added to the mark stack. |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 523 | inline void MarkSweep::MarkObject(const Object* obj) { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 524 | if (obj != NULL) { |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 525 | MarkObjectNonNull(obj); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 526 | } |
| 527 | } |
| 528 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 529 | void MarkSweep::MarkRoot(const Object* obj) { |
| 530 | if (obj != NULL) { |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 531 | MarkObjectNonNull(obj); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 532 | } |
| 533 | } |
| 534 | |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 535 | Object* MarkSweep::MarkRootParallelCallback(Object* root, void* arg) { |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 536 | DCHECK(root != NULL); |
| 537 | DCHECK(arg != NULL); |
Mathieu Chartier | ba311b4 | 2013-08-27 13:02:30 -0700 | [diff] [blame] | 538 | reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(root); |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 539 | return root; |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 540 | } |
| 541 | |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 542 | Object* MarkSweep::MarkRootCallback(Object* root, void* arg) { |
| 543 | DCHECK(root != nullptr); |
| 544 | DCHECK(arg != nullptr); |
| 545 | reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(root); |
| 546 | return root; |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 547 | } |
| 548 | |
Mathieu Chartier | 6f1c949 | 2012-10-15 12:08:41 -0700 | [diff] [blame] | 549 | void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, |
Ian Rogers | 40e3bac | 2012-11-20 00:09:14 -0800 | [diff] [blame] | 550 | const StackVisitor* visitor) { |
| 551 | reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); |
Mathieu Chartier | 6f1c949 | 2012-10-15 12:08:41 -0700 | [diff] [blame] | 552 | } |
| 553 | |
Ian Rogers | 40e3bac | 2012-11-20 00:09:14 -0800 | [diff] [blame] | 554 | void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { |
Mathieu Chartier | 6f1c949 | 2012-10-15 12:08:41 -0700 | [diff] [blame] | 555 | // See if the root is on any space bitmap. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 556 | if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { |
| 557 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
Mathieu Chartier | 4202b74 | 2012-10-17 17:51:25 -0700 | [diff] [blame] | 558 | if (!large_object_space->Contains(root)) { |
Mathieu Chartier | 6f1c949 | 2012-10-15 12:08:41 -0700 | [diff] [blame] | 559 | LOG(ERROR) << "Found invalid root: " << root; |
Ian Rogers | 40e3bac | 2012-11-20 00:09:14 -0800 | [diff] [blame] | 560 | if (visitor != NULL) { |
| 561 | LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; |
Mathieu Chartier | 6f1c949 | 2012-10-15 12:08:41 -0700 | [diff] [blame] | 562 | } |
| 563 | } |
| 564 | } |
| 565 | } |
| 566 | |
| 567 | void MarkSweep::VerifyRoots() { |
| 568 | Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); |
| 569 | } |
| 570 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 571 | // Marks all objects in the root set. |
| 572 | void MarkSweep::MarkRoots() { |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 573 | timings_.StartSplit("MarkRoots"); |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 574 | Runtime::Current()->VisitNonConcurrentRoots(MarkRootCallback, this); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 575 | timings_.EndSplit(); |
Mathieu Chartier | 9ebae1f | 2012-10-15 17:38:16 -0700 | [diff] [blame] | 576 | } |
| 577 | |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 578 | void MarkSweep::MarkNonThreadRoots() { |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 579 | timings_.StartSplit("MarkNonThreadRoots"); |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 580 | Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 581 | timings_.EndSplit(); |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 582 | } |
| 583 | |
Mathieu Chartier | 9ebae1f | 2012-10-15 17:38:16 -0700 | [diff] [blame] | 584 | void MarkSweep::MarkConcurrentRoots() { |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 585 | timings_.StartSplit("MarkConcurrentRoots"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 586 | // Visit all runtime roots and clear dirty flags. |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 587 | Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, false, true); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 588 | timings_.EndSplit(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 589 | } |
| 590 | |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 591 | void MarkSweep::CheckObject(const Object* obj) { |
| 592 | DCHECK(obj != NULL); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 593 | VisitObjectReferences(const_cast<Object*>(obj), [this](const Object* obj, const Object* ref, |
| 594 | MemberOffset offset, bool is_static) NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 595 | Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); |
| 596 | CheckReference(obj, ref, offset, is_static); |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 597 | }, true); |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 598 | } |
| 599 | |
| 600 | void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) { |
| 601 | DCHECK(root != NULL); |
| 602 | DCHECK(arg != NULL); |
| 603 | MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 604 | DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root)); |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 605 | mark_sweep->CheckObject(root); |
| 606 | } |
| 607 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 608 | void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) { |
| 609 | CHECK(space->IsDlMallocSpace()); |
| 610 | space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); |
| 611 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 612 | accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap(); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 613 | GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 614 | } |
| 615 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 616 | class ScanObjectVisitor { |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 617 | public: |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 618 | explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE |
| 619 | : mark_sweep_(mark_sweep) {} |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 620 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 621 | // TODO: Fixme when anotatalysis works with visitors. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 622 | void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 623 | if (kCheckLocks) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 624 | Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); |
| 625 | Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); |
| 626 | } |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 627 | mark_sweep_->ScanObject(obj); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 628 | } |
| 629 | |
| 630 | private: |
| 631 | MarkSweep* const mark_sweep_; |
| 632 | }; |
| 633 | |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 634 | template <bool kUseFinger = false> |
| 635 | class MarkStackTask : public Task { |
| 636 | public: |
| 637 | MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, |
| 638 | const Object** mark_stack) |
| 639 | : mark_sweep_(mark_sweep), |
| 640 | thread_pool_(thread_pool), |
| 641 | mark_stack_pos_(mark_stack_size) { |
| 642 | // We may have to copy part of an existing mark stack when another mark stack overflows. |
| 643 | if (mark_stack_size != 0) { |
| 644 | DCHECK(mark_stack != NULL); |
| 645 | // TODO: Check performance? |
| 646 | std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 647 | } |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 648 | if (kCountTasks) { |
| 649 | ++mark_sweep_->work_chunks_created_; |
| 650 | } |
| 651 | } |
| 652 | |
| 653 | static const size_t kMaxSize = 1 * KB; |
| 654 | |
| 655 | protected: |
| 656 | class ScanObjectParallelVisitor { |
| 657 | public: |
| 658 | explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE |
| 659 | : chunk_task_(chunk_task) {} |
| 660 | |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 661 | void operator()(Object* obj) const { |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 662 | MarkSweep* mark_sweep = chunk_task_->mark_sweep_; |
| 663 | mark_sweep->ScanObjectVisit(obj, |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 664 | [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */, |
| 665 | bool /* is_static */) ALWAYS_INLINE { |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 666 | if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) { |
| 667 | if (kUseFinger) { |
| 668 | android_memory_barrier(); |
| 669 | if (reinterpret_cast<uintptr_t>(ref) >= |
| 670 | static_cast<uintptr_t>(mark_sweep->atomic_finger_)) { |
| 671 | return; |
| 672 | } |
| 673 | } |
| 674 | chunk_task_->MarkStackPush(ref); |
| 675 | } |
| 676 | }); |
| 677 | } |
| 678 | |
| 679 | private: |
| 680 | MarkStackTask<kUseFinger>* const chunk_task_; |
| 681 | }; |
| 682 | |
| 683 | virtual ~MarkStackTask() { |
| 684 | // Make sure that we have cleared our mark stack. |
| 685 | DCHECK_EQ(mark_stack_pos_, 0U); |
| 686 | if (kCountTasks) { |
| 687 | ++mark_sweep_->work_chunks_deleted_; |
| 688 | } |
| 689 | } |
| 690 | |
| 691 | MarkSweep* const mark_sweep_; |
| 692 | ThreadPool* const thread_pool_; |
| 693 | // Thread local mark stack for this task. |
| 694 | const Object* mark_stack_[kMaxSize]; |
| 695 | // Mark stack position. |
| 696 | size_t mark_stack_pos_; |
| 697 | |
| 698 | void MarkStackPush(const Object* obj) ALWAYS_INLINE { |
| 699 | if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { |
| 700 | // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. |
| 701 | mark_stack_pos_ /= 2; |
| 702 | auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, |
| 703 | mark_stack_ + mark_stack_pos_); |
| 704 | thread_pool_->AddTask(Thread::Current(), task); |
| 705 | } |
| 706 | DCHECK(obj != nullptr); |
| 707 | DCHECK(mark_stack_pos_ < kMaxSize); |
| 708 | mark_stack_[mark_stack_pos_++] = obj; |
| 709 | } |
| 710 | |
| 711 | virtual void Finalize() { |
| 712 | delete this; |
| 713 | } |
| 714 | |
| 715 | // Scans all of the objects |
| 716 | virtual void Run(Thread* self) { |
| 717 | ScanObjectParallelVisitor visitor(this); |
| 718 | // TODO: Tune this. |
| 719 | static const size_t kFifoSize = 4; |
| 720 | BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo; |
| 721 | for (;;) { |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 722 | const Object* obj = nullptr; |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 723 | if (kUseMarkStackPrefetch) { |
| 724 | while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { |
| 725 | const Object* obj = mark_stack_[--mark_stack_pos_]; |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 726 | DCHECK(obj != nullptr); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 727 | __builtin_prefetch(obj); |
| 728 | prefetch_fifo.push_back(obj); |
| 729 | } |
| 730 | if (UNLIKELY(prefetch_fifo.empty())) { |
| 731 | break; |
| 732 | } |
| 733 | obj = prefetch_fifo.front(); |
| 734 | prefetch_fifo.pop_front(); |
| 735 | } else { |
| 736 | if (UNLIKELY(mark_stack_pos_ == 0)) { |
| 737 | break; |
| 738 | } |
| 739 | obj = mark_stack_[--mark_stack_pos_]; |
| 740 | } |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 741 | DCHECK(obj != nullptr); |
| 742 | visitor(const_cast<mirror::Object*>(obj)); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 743 | } |
| 744 | } |
| 745 | }; |
| 746 | |
| 747 | class CardScanTask : public MarkStackTask<false> { |
| 748 | public: |
| 749 | CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap, |
| 750 | byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, |
| 751 | const Object** mark_stack_obj) |
| 752 | : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), |
| 753 | bitmap_(bitmap), |
| 754 | begin_(begin), |
| 755 | end_(end), |
| 756 | minimum_age_(minimum_age) { |
| 757 | } |
| 758 | |
| 759 | protected: |
| 760 | accounting::SpaceBitmap* const bitmap_; |
| 761 | byte* const begin_; |
| 762 | byte* const end_; |
| 763 | const byte minimum_age_; |
| 764 | |
| 765 | virtual void Finalize() { |
| 766 | delete this; |
| 767 | } |
| 768 | |
| 769 | virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { |
| 770 | ScanObjectParallelVisitor visitor(this); |
| 771 | accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 772 | size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 773 | VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " |
| 774 | << reinterpret_cast<void*>(end_) << " = " << cards_scanned; |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 775 | // Finish by emptying our local mark stack. |
| 776 | MarkStackTask::Run(self); |
| 777 | } |
| 778 | }; |
| 779 | |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 780 | size_t MarkSweep::GetThreadCount(bool paused) const { |
| 781 | if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { |
| 782 | return 0; |
| 783 | } |
| 784 | if (paused) { |
| 785 | return heap_->GetParallelGCThreadCount() + 1; |
| 786 | } else { |
| 787 | return heap_->GetConcGCThreadCount() + 1; |
| 788 | } |
| 789 | } |
| 790 | |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 791 | void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { |
| 792 | accounting::CardTable* card_table = GetHeap()->GetCardTable(); |
| 793 | ThreadPool* thread_pool = GetHeap()->GetThreadPool(); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 794 | size_t thread_count = GetThreadCount(paused); |
| 795 | // The parallel version with only one thread is faster for card scanning, TODO: fix. |
| 796 | if (kParallelCardScan && thread_count > 0) { |
Mathieu Chartier | 720ef76 | 2013-08-17 14:46:54 -0700 | [diff] [blame] | 797 | Thread* self = Thread::Current(); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 798 | // Can't have a different split for each space since multiple spaces can have their cards being |
| 799 | // scanned at the same time. |
| 800 | timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); |
| 801 | // Try to take some of the mark stack since we can pass this off to the worker tasks. |
| 802 | const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin()); |
| 803 | const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End()); |
Mathieu Chartier | 720ef76 | 2013-08-17 14:46:54 -0700 | [diff] [blame] | 804 | const size_t mark_stack_size = mark_stack_end - mark_stack_begin; |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 805 | // Estimated number of work tasks we will create. |
| 806 | const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; |
| 807 | DCHECK_NE(mark_stack_tasks, 0U); |
| 808 | const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, |
| 809 | mark_stack_size / mark_stack_tasks + 1); |
| 810 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 811 | if (space->GetMarkBitmap() == nullptr) { |
| 812 | continue; |
| 813 | } |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 814 | byte* card_begin = space->Begin(); |
| 815 | byte* card_end = space->End(); |
Hiroshi Yamauchi | 0941b04 | 2013-11-05 11:34:03 -0800 | [diff] [blame] | 816 | // Align up the end address. For example, the image space's end |
| 817 | // may not be card-size-aligned. |
| 818 | card_end = AlignUp(card_end, accounting::CardTable::kCardSize); |
| 819 | DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); |
| 820 | DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 821 | // Calculate how many bytes of heap we will scan, |
| 822 | const size_t address_range = card_end - card_begin; |
| 823 | // Calculate how much address range each task gets. |
| 824 | const size_t card_delta = RoundUp(address_range / thread_count + 1, |
| 825 | accounting::CardTable::kCardSize); |
| 826 | // Create the worker tasks for this space. |
| 827 | while (card_begin != card_end) { |
| 828 | // Add a range of cards. |
| 829 | size_t addr_remaining = card_end - card_begin; |
| 830 | size_t card_increment = std::min(card_delta, addr_remaining); |
| 831 | // Take from the back of the mark stack. |
| 832 | size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; |
| 833 | size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); |
| 834 | mark_stack_end -= mark_stack_increment; |
| 835 | mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); |
| 836 | DCHECK_EQ(mark_stack_end, mark_stack_->End()); |
| 837 | // Add the new task to the thread pool. |
| 838 | auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, |
| 839 | card_begin + card_increment, minimum_age, |
| 840 | mark_stack_increment, mark_stack_end); |
| 841 | thread_pool->AddTask(self, task); |
| 842 | card_begin += card_increment; |
| 843 | } |
| 844 | } |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 845 | |
Hiroshi Yamauchi | 0941b04 | 2013-11-05 11:34:03 -0800 | [diff] [blame] | 846 | // Note: the card scan below may dirty new cards (and scan them) |
| 847 | // as a side effect when a Reference object is encountered and |
| 848 | // queued during the marking. See b/11465268. |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 849 | thread_pool->SetMaxActiveWorkers(thread_count - 1); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 850 | thread_pool->StartWorkers(self); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 851 | thread_pool->Wait(self, true, true); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 852 | thread_pool->StopWorkers(self); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 853 | timings_.EndSplit(); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 854 | } else { |
| 855 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 856 | if (space->GetMarkBitmap() != nullptr) { |
| 857 | // Image spaces are handled properly since live == marked for them. |
| 858 | switch (space->GetGcRetentionPolicy()) { |
| 859 | case space::kGcRetentionPolicyNeverCollect: |
| 860 | timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : |
| 861 | "ScanGrayImageSpaceObjects"); |
| 862 | break; |
| 863 | case space::kGcRetentionPolicyFullCollect: |
| 864 | timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : |
| 865 | "ScanGrayZygoteSpaceObjects"); |
| 866 | break; |
| 867 | case space::kGcRetentionPolicyAlwaysCollect: |
| 868 | timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : |
| 869 | "ScanGrayAllocSpaceObjects"); |
| 870 | break; |
| 871 | } |
| 872 | ScanObjectVisitor visitor(this); |
| 873 | card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); |
| 874 | timings_.EndSplit(); |
| 875 | } |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 876 | } |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 877 | } |
| 878 | } |
| 879 | |
| 880 | void MarkSweep::VerifyImageRoots() { |
| 881 | // Verify roots ensures that all the references inside the image space point |
| 882 | // objects which are either in the image space or marked objects in the alloc |
| 883 | // space |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 884 | timings_.StartSplit("VerifyImageRoots"); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 885 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
| 886 | if (space->IsImageSpace()) { |
| 887 | space::ImageSpace* image_space = space->AsImageSpace(); |
| 888 | uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin()); |
| 889 | uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End()); |
| 890 | accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap(); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 891 | DCHECK(live_bitmap != NULL); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 892 | live_bitmap->VisitMarkedRange(begin, end, [this](const Object* obj) { |
| 893 | if (kCheckLocks) { |
| 894 | Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); |
| 895 | } |
| 896 | DCHECK(obj != NULL); |
| 897 | CheckObject(obj); |
| 898 | }); |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 899 | } |
| 900 | } |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 901 | timings_.EndSplit(); |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 902 | } |
| 903 | |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 904 | class RecursiveMarkTask : public MarkStackTask<false> { |
| 905 | public: |
| 906 | RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, |
| 907 | accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) |
| 908 | : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), |
| 909 | bitmap_(bitmap), |
| 910 | begin_(begin), |
| 911 | end_(end) { |
| 912 | } |
| 913 | |
| 914 | protected: |
| 915 | accounting::SpaceBitmap* const bitmap_; |
| 916 | const uintptr_t begin_; |
| 917 | const uintptr_t end_; |
| 918 | |
| 919 | virtual void Finalize() { |
| 920 | delete this; |
| 921 | } |
| 922 | |
| 923 | // Scans all of the objects |
| 924 | virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { |
| 925 | ScanObjectParallelVisitor visitor(this); |
| 926 | bitmap_->VisitMarkedRange(begin_, end_, visitor); |
| 927 | // Finish by emptying our local mark stack. |
| 928 | MarkStackTask::Run(self); |
| 929 | } |
| 930 | }; |
| 931 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 932 | // Populates the mark stack based on the set of marked objects and |
| 933 | // recursively marks until the mark stack is emptied. |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 934 | void MarkSweep::RecursiveMark() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 935 | base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 936 | // RecursiveMark will build the lists of known instances of the Reference classes. |
| 937 | // See DelayReferenceReferent for details. |
| 938 | CHECK(soft_reference_list_ == NULL); |
| 939 | CHECK(weak_reference_list_ == NULL); |
| 940 | CHECK(finalizer_reference_list_ == NULL); |
| 941 | CHECK(phantom_reference_list_ == NULL); |
| 942 | CHECK(cleared_reference_list_ == NULL); |
| 943 | |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 944 | if (kUseRecursiveMark) { |
| 945 | const bool partial = GetGcType() == kGcTypePartial; |
| 946 | ScanObjectVisitor scan_visitor(this); |
| 947 | auto* self = Thread::Current(); |
| 948 | ThreadPool* thread_pool = heap_->GetThreadPool(); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 949 | size_t thread_count = GetThreadCount(false); |
| 950 | const bool parallel = kParallelRecursiveMark && thread_count > 1; |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 951 | mark_stack_->Reset(); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 952 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 953 | if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || |
| 954 | (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 955 | current_mark_bitmap_ = space->GetMarkBitmap(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 956 | if (current_mark_bitmap_ == nullptr) { |
| 957 | continue; |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 958 | } |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 959 | if (parallel) { |
| 960 | // We will use the mark stack the future. |
| 961 | // CHECK(mark_stack_->IsEmpty()); |
| 962 | // This function does not handle heap end increasing, so we must use the space end. |
| 963 | uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); |
| 964 | uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); |
| 965 | atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); |
| 966 | |
| 967 | // Create a few worker tasks. |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 968 | const size_t n = thread_count * 2; |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 969 | while (begin != end) { |
| 970 | uintptr_t start = begin; |
| 971 | uintptr_t delta = (end - begin) / n; |
| 972 | delta = RoundUp(delta, KB); |
| 973 | if (delta < 16 * KB) delta = end - begin; |
| 974 | begin += delta; |
| 975 | auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start, |
| 976 | begin); |
| 977 | thread_pool->AddTask(self, task); |
| 978 | } |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 979 | thread_pool->SetMaxActiveWorkers(thread_count - 1); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 980 | thread_pool->StartWorkers(self); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 981 | thread_pool->Wait(self, true, true); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 982 | thread_pool->StopWorkers(self); |
| 983 | } else { |
| 984 | // This function does not handle heap end increasing, so we must use the space end. |
| 985 | uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); |
| 986 | uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); |
| 987 | current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor); |
| 988 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 989 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 990 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 991 | } |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 992 | ProcessMarkStack(false); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 993 | } |
| 994 | |
Mathieu Chartier | 6aa3df9 | 2013-09-17 15:17:28 -0700 | [diff] [blame] | 995 | mirror::Object* MarkSweep::SystemWeakIsMarkedCallback(Object* object, void* arg) { |
Mathieu Chartier | 5712d5d | 2013-09-18 17:59:36 -0700 | [diff] [blame] | 996 | if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { |
Mathieu Chartier | 6aa3df9 | 2013-09-17 15:17:28 -0700 | [diff] [blame] | 997 | return object; |
| 998 | } |
| 999 | return nullptr; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1000 | } |
| 1001 | |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1002 | void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { |
| 1003 | ScanGrayObjects(paused, minimum_age); |
| 1004 | ProcessMarkStack(paused); |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 1005 | } |
| 1006 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 1007 | void MarkSweep::ReMarkRoots() { |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1008 | timings_.StartSplit("ReMarkRoots"); |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 1009 | Runtime::Current()->VisitRoots(MarkRootCallback, this, true, true); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1010 | timings_.EndSplit(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1011 | } |
| 1012 | |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1013 | void MarkSweep::SweepSystemWeaks() { |
| 1014 | Runtime* runtime = Runtime::Current(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1015 | timings_.StartSplit("SweepSystemWeaks"); |
Mathieu Chartier | 6aa3df9 | 2013-09-17 15:17:28 -0700 | [diff] [blame] | 1016 | runtime->SweepSystemWeaks(SystemWeakIsMarkedCallback, this); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1017 | timings_.EndSplit(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 1018 | } |
| 1019 | |
Mathieu Chartier | 6aa3df9 | 2013-09-17 15:17:28 -0700 | [diff] [blame] | 1020 | mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1021 | reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); |
| 1022 | // We don't actually want to sweep the object, so lets return "marked" |
Mathieu Chartier | 6aa3df9 | 2013-09-17 15:17:28 -0700 | [diff] [blame] | 1023 | return obj; |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1024 | } |
| 1025 | |
| 1026 | void MarkSweep::VerifyIsLive(const Object* obj) { |
| 1027 | Heap* heap = GetHeap(); |
| 1028 | if (!heap->GetLiveBitmap()->Test(obj)) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1029 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1030 | if (!large_object_space->GetLiveObjects()->Test(obj)) { |
| 1031 | if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == |
| 1032 | heap->allocation_stack_->End()) { |
| 1033 | // Object not found! |
| 1034 | heap->DumpSpaces(); |
| 1035 | LOG(FATAL) << "Found dead object " << obj; |
| 1036 | } |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1037 | } |
| 1038 | } |
| 1039 | } |
| 1040 | |
| 1041 | void MarkSweep::VerifySystemWeaks() { |
Mathieu Chartier | 6aa3df9 | 2013-09-17 15:17:28 -0700 | [diff] [blame] | 1042 | // Verify system weaks, uses a special object visitor which returns the input object. |
| 1043 | Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 1044 | } |
| 1045 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1046 | struct SweepCallbackContext { |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1047 | MarkSweep* mark_sweep; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1048 | space::AllocSpace* space; |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 1049 | Thread* self; |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1050 | }; |
| 1051 | |
Mathieu Chartier | 0e4627e | 2012-10-23 16:13:36 -0700 | [diff] [blame] | 1052 | class CheckpointMarkThreadRoots : public Closure { |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1053 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 1054 | explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1055 | |
| 1056 | virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | 3f96670 | 2013-09-04 16:50:05 -0700 | [diff] [blame] | 1057 | ATRACE_BEGIN("Marking thread roots"); |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1058 | // Note: self is not necessarily equal to thread since thread may be suspended. |
| 1059 | Thread* self = Thread::Current(); |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1060 | CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) |
| 1061 | << thread->GetState() << " thread " << thread << " self " << self; |
Mathieu Chartier | ac86a7c | 2012-11-12 15:03:16 -0800 | [diff] [blame] | 1062 | thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); |
Mathieu Chartier | 3f96670 | 2013-09-04 16:50:05 -0700 | [diff] [blame] | 1063 | ATRACE_END(); |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1064 | mark_sweep_->GetBarrier().Pass(self); |
| 1065 | } |
| 1066 | |
| 1067 | private: |
| 1068 | MarkSweep* mark_sweep_; |
| 1069 | }; |
| 1070 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1071 | void MarkSweep::MarkRootsCheckpoint(Thread* self) { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1072 | CheckpointMarkThreadRoots check_point(this); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1073 | timings_.StartSplit("MarkRootsCheckpoint"); |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1074 | ThreadList* thread_list = Runtime::Current()->GetThreadList(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1075 | // Request the check point is run on all threads returning a count of the threads that must |
| 1076 | // run through the barrier including self. |
| 1077 | size_t barrier_count = thread_list->RunCheckpoint(&check_point); |
| 1078 | // Release locks then wait for all mutator threads to pass the barrier. |
| 1079 | // TODO: optimize to not release locks when there are no threads to wait for. |
| 1080 | Locks::heap_bitmap_lock_->ExclusiveUnlock(self); |
| 1081 | Locks::mutator_lock_->SharedUnlock(self); |
| 1082 | ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); |
| 1083 | CHECK_EQ(old_state, kWaitingPerformingGc); |
| 1084 | gc_barrier_->Increment(self, barrier_count); |
| 1085 | self->SetState(kWaitingPerformingGc); |
| 1086 | Locks::mutator_lock_->SharedLock(self); |
| 1087 | Locks::heap_bitmap_lock_->ExclusiveLock(self); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1088 | timings_.EndSplit(); |
Mathieu Chartier | 858f1c5 | 2012-10-17 17:45:55 -0700 | [diff] [blame] | 1089 | } |
| 1090 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 1091 | void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1092 | SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1093 | MarkSweep* mark_sweep = context->mark_sweep; |
| 1094 | Heap* heap = mark_sweep->GetHeap(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1095 | space::AllocSpace* space = context->space; |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 1096 | Thread* self = context->self; |
| 1097 | Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 1098 | // Use a bulk free, that merges consecutive objects before freeing or free per object? |
| 1099 | // Documentation suggests better free performance with merging, but this may be at the expensive |
| 1100 | // of allocation. |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1101 | size_t freed_objects = num_ptrs; |
| 1102 | // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit |
| 1103 | size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 1104 | heap->RecordFree(freed_objects, freed_bytes); |
Mathieu Chartier | 4b95e8f | 2013-07-15 16:32:50 -0700 | [diff] [blame] | 1105 | mark_sweep->freed_objects_.fetch_add(freed_objects); |
| 1106 | mark_sweep->freed_bytes_.fetch_add(freed_bytes); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 1107 | } |
| 1108 | |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 1109 | void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 1110 | SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 1111 | Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1112 | Heap* heap = context->mark_sweep->GetHeap(); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 1113 | // We don't free any actual memory to avoid dirtying the shared zygote pages. |
| 1114 | for (size_t i = 0; i < num_ptrs; ++i) { |
| 1115 | Object* obj = static_cast<Object*>(ptrs[i]); |
| 1116 | heap->GetLiveBitmap()->Clear(obj); |
| 1117 | heap->GetCardTable()->MarkCard(obj); |
| 1118 | } |
| 1119 | } |
| 1120 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1121 | void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1122 | space::DlMallocSpace* space = heap_->GetNonMovingSpace(); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1123 | timings_.StartSplit("SweepArray"); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1124 | // Newly allocated objects MUST be in the alloc space and those are the only objects which we are |
| 1125 | // going to free. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1126 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 1127 | accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); |
| 1128 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 1129 | accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); |
| 1130 | accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1131 | if (swap_bitmaps) { |
| 1132 | std::swap(live_bitmap, mark_bitmap); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1133 | std::swap(large_live_objects, large_mark_objects); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1134 | } |
| 1135 | |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1136 | size_t freed_bytes = 0; |
| 1137 | size_t freed_large_object_bytes = 0; |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 1138 | size_t freed_objects = 0; |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1139 | size_t freed_large_objects = 0; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1140 | size_t count = allocations->Size(); |
Mathieu Chartier | d8195f1 | 2012-10-05 12:21:28 -0700 | [diff] [blame] | 1141 | Object** objects = const_cast<Object**>(allocations->Begin()); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1142 | Object** out = objects; |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 1143 | Object** objects_to_chunk_free = out; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1144 | |
| 1145 | // Empty the allocation stack. |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 1146 | Thread* self = Thread::Current(); |
Brian Carlstrom | 02c8cc6 | 2013-07-18 15:54:44 -0700 | [diff] [blame] | 1147 | for (size_t i = 0; i < count; ++i) { |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1148 | Object* obj = objects[i]; |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1149 | // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack. |
| 1150 | if (LIKELY(mark_bitmap->HasAddress(obj))) { |
| 1151 | if (!mark_bitmap->Test(obj)) { |
| 1152 | // Don't bother un-marking since we clear the mark bitmap anyways. |
| 1153 | *(out++) = obj; |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 1154 | // Free objects in chunks. |
| 1155 | DCHECK_GE(out, objects_to_chunk_free); |
| 1156 | DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); |
| 1157 | if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) { |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1158 | timings_.StartSplit("FreeList"); |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 1159 | size_t chunk_freed_objects = out - objects_to_chunk_free; |
| 1160 | freed_objects += chunk_freed_objects; |
| 1161 | freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); |
| 1162 | objects_to_chunk_free = out; |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1163 | timings_.EndSplit(); |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 1164 | } |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1165 | } |
| 1166 | } else if (!large_mark_objects->Test(obj)) { |
| 1167 | ++freed_large_objects; |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1168 | freed_large_object_bytes += large_object_space->Free(self, obj); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1169 | } |
| 1170 | } |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 1171 | // Free the remaining objects in chunks. |
| 1172 | DCHECK_GE(out, objects_to_chunk_free); |
| 1173 | DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); |
| 1174 | if (out - objects_to_chunk_free > 0) { |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1175 | timings_.StartSplit("FreeList"); |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 1176 | size_t chunk_freed_objects = out - objects_to_chunk_free; |
| 1177 | freed_objects += chunk_freed_objects; |
| 1178 | freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1179 | timings_.EndSplit(); |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 1180 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1181 | CHECK_EQ(count, allocations->Size()); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1182 | timings_.EndSplit(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1183 | |
Hiroshi Yamauchi | b22a451 | 2013-08-13 15:03:22 -0700 | [diff] [blame] | 1184 | timings_.StartSplit("RecordFree"); |
Mathieu Chartier | 40e978b | 2012-09-07 11:38:36 -0700 | [diff] [blame] | 1185 | VLOG(heap) << "Freed " << freed_objects << "/" << count |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1186 | << " objects with size " << PrettySize(freed_bytes); |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1187 | heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); |
Mathieu Chartier | 4b95e8f | 2013-07-15 16:32:50 -0700 | [diff] [blame] | 1188 | freed_objects_.fetch_add(freed_objects); |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1189 | freed_large_objects_.fetch_add(freed_large_objects); |
Mathieu Chartier | 4b95e8f | 2013-07-15 16:32:50 -0700 | [diff] [blame] | 1190 | freed_bytes_.fetch_add(freed_bytes); |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1191 | freed_large_object_bytes_.fetch_add(freed_large_object_bytes); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1192 | timings_.EndSplit(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1193 | |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1194 | timings_.StartSplit("ResetStack"); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1195 | allocations->Reset(); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1196 | timings_.EndSplit(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1197 | } |
| 1198 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1199 | void MarkSweep::Sweep(bool swap_bitmaps) { |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 1200 | DCHECK(mark_stack_->IsEmpty()); |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1201 | base::TimingLogger::ScopedSplit("Sweep", &timings_); |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 1202 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1203 | const bool partial = (GetGcType() == kGcTypePartial); |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1204 | SweepCallbackContext scc; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1205 | scc.mark_sweep = this; |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 1206 | scc.self = Thread::Current(); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1207 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1208 | if (!space->IsDlMallocSpace()) { |
| 1209 | continue; |
| 1210 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1211 | // We always sweep always collect spaces. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1212 | bool sweep_space = space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1213 | if (!partial && !sweep_space) { |
| 1214 | // We sweep full collect spaces when the GC isn't a partial GC (ie its full). |
| 1215 | sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); |
| 1216 | } |
| 1217 | if (sweep_space) { |
Mathieu Chartier | 720ef76 | 2013-08-17 14:46:54 -0700 | [diff] [blame] | 1218 | uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); |
| 1219 | uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1220 | scc.space = space->AsDlMallocSpace(); |
| 1221 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 1222 | accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 1223 | if (swap_bitmaps) { |
| 1224 | std::swap(live_bitmap, mark_bitmap); |
| 1225 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1226 | if (!space->IsZygoteSpace()) { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1227 | base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 1228 | // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1229 | accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, |
| 1230 | &SweepCallback, reinterpret_cast<void*>(&scc)); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 1231 | } else { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1232 | base::TimingLogger::ScopedSplit split("SweepZygote", &timings_); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1233 | // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual |
| 1234 | // memory. |
| 1235 | accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, |
| 1236 | &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 1237 | } |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 1238 | } |
| 1239 | } |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1240 | |
| 1241 | SweepLargeObjects(swap_bitmaps); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 1242 | } |
| 1243 | |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1244 | void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1245 | base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1246 | // Sweep large objects |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1247 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 1248 | accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); |
| 1249 | accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1250 | if (swap_bitmaps) { |
| 1251 | std::swap(large_live_objects, large_mark_objects); |
| 1252 | } |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1253 | // O(n*log(n)) but hopefully there are not too many large objects. |
| 1254 | size_t freed_objects = 0; |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 1255 | size_t freed_bytes = 0; |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 1256 | Thread* self = Thread::Current(); |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1257 | for (const Object* obj : large_live_objects->GetObjects()) { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1258 | if (!large_mark_objects->Test(obj)) { |
| 1259 | freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj)); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1260 | ++freed_objects; |
| 1261 | } |
| 1262 | } |
Mathieu Chartier | e53225c | 2013-08-19 10:59:11 -0700 | [diff] [blame] | 1263 | freed_large_objects_.fetch_add(freed_objects); |
| 1264 | freed_large_object_bytes_.fetch_add(freed_bytes); |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 1265 | GetHeap()->RecordFree(freed_objects, freed_bytes); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1266 | } |
| 1267 | |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 1268 | void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) { |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1269 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1270 | if (space->IsDlMallocSpace() && space->Contains(ref)) { |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1271 | DCHECK(IsMarked(obj)); |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 1272 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1273 | bool is_marked = IsMarked(ref); |
| 1274 | if (!is_marked) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1275 | LOG(INFO) << *space; |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1276 | LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref) |
| 1277 | << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj) |
| 1278 | << "' (" << reinterpret_cast<const void*>(obj) << ") at offset " |
| 1279 | << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked"; |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 1280 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1281 | const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); |
| 1282 | DCHECK(klass != NULL); |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 1283 | const ObjectArray<ArtField>* fields = is_static ? klass->GetSFields() : klass->GetIFields(); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1284 | DCHECK(fields != NULL); |
| 1285 | bool found = false; |
| 1286 | for (int32_t i = 0; i < fields->GetLength(); ++i) { |
Brian Carlstrom | ea46f95 | 2013-07-30 01:26:50 -0700 | [diff] [blame] | 1287 | const ArtField* cur = fields->Get(i); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1288 | if (cur->GetOffset().Int32Value() == offset.Int32Value()) { |
| 1289 | LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur); |
| 1290 | found = true; |
| 1291 | break; |
| 1292 | } |
| 1293 | } |
| 1294 | if (!found) { |
| 1295 | LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value(); |
| 1296 | } |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 1297 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1298 | bool obj_marked = heap_->GetCardTable()->IsDirty(obj); |
| 1299 | if (!obj_marked) { |
| 1300 | LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' " |
| 1301 | << "(" << reinterpret_cast<const void*>(obj) << ") contains references to " |
| 1302 | << "the alloc space, but wasn't card marked"; |
Mathieu Chartier | 262e5ff | 2012-06-01 17:35:38 -0700 | [diff] [blame] | 1303 | } |
| 1304 | } |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 1305 | } |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1306 | break; |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 1307 | } |
| 1308 | } |
| 1309 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1310 | // Process the "referent" field in a java.lang.ref.Reference. If the |
| 1311 | // referent has not yet been marked, put it on the appropriate list in |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1312 | // the heap for later processing. |
| 1313 | void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) { |
| 1314 | DCHECK(klass != nullptr); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 1315 | DCHECK(klass->IsReferenceClass()); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1316 | DCHECK(obj != NULL); |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1317 | Object* referent = heap_->GetReferenceReferent(obj); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1318 | if (referent != NULL && !IsMarked(referent)) { |
| 1319 | if (kCountJavaLangRefs) { |
| 1320 | ++reference_count_; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1321 | } |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1322 | Thread* self = Thread::Current(); |
| 1323 | // TODO: Remove these locks, and use atomic stacks for storing references? |
Mathieu Chartier | b4ea4de | 2013-09-18 09:58:29 -0700 | [diff] [blame] | 1324 | // We need to check that the references haven't already been enqueued since we can end up |
| 1325 | // scanning the same reference multiple times due to dirty cards. |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1326 | if (klass->IsSoftReferenceClass()) { |
| 1327 | MutexLock mu(self, *heap_->GetSoftRefQueueLock()); |
Mathieu Chartier | b4ea4de | 2013-09-18 09:58:29 -0700 | [diff] [blame] | 1328 | if (!heap_->IsEnqueued(obj)) { |
| 1329 | heap_->EnqueuePendingReference(obj, &soft_reference_list_); |
| 1330 | } |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1331 | } else if (klass->IsWeakReferenceClass()) { |
| 1332 | MutexLock mu(self, *heap_->GetWeakRefQueueLock()); |
Mathieu Chartier | b4ea4de | 2013-09-18 09:58:29 -0700 | [diff] [blame] | 1333 | if (!heap_->IsEnqueued(obj)) { |
| 1334 | heap_->EnqueuePendingReference(obj, &weak_reference_list_); |
| 1335 | } |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1336 | } else if (klass->IsFinalizerReferenceClass()) { |
| 1337 | MutexLock mu(self, *heap_->GetFinalizerRefQueueLock()); |
Mathieu Chartier | b4ea4de | 2013-09-18 09:58:29 -0700 | [diff] [blame] | 1338 | if (!heap_->IsEnqueued(obj)) { |
| 1339 | heap_->EnqueuePendingReference(obj, &finalizer_reference_list_); |
| 1340 | } |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1341 | } else if (klass->IsPhantomReferenceClass()) { |
| 1342 | MutexLock mu(self, *heap_->GetPhantomRefQueueLock()); |
Mathieu Chartier | b4ea4de | 2013-09-18 09:58:29 -0700 | [diff] [blame] | 1343 | if (!heap_->IsEnqueued(obj)) { |
| 1344 | heap_->EnqueuePendingReference(obj, &phantom_reference_list_); |
| 1345 | } |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1346 | } else { |
| 1347 | LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) |
| 1348 | << " " << std::hex << klass->GetAccessFlags(); |
| 1349 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1350 | } |
| 1351 | } |
| 1352 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1353 | class MarkObjectVisitor { |
| 1354 | public: |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1355 | explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {} |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1356 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1357 | // TODO: Fixme when anotatalysis works with visitors. |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 1358 | void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1359 | bool /* is_static */) const ALWAYS_INLINE |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1360 | NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1361 | if (kCheckLocks) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1362 | Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); |
| 1363 | Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); |
| 1364 | } |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1365 | mark_sweep_->MarkObject(ref); |
| 1366 | } |
| 1367 | |
| 1368 | private: |
| 1369 | MarkSweep* const mark_sweep_; |
| 1370 | }; |
| 1371 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1372 | // Scans an object reference. Determines the type of the reference |
| 1373 | // and dispatches to a specialized scanning routine. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1374 | void MarkSweep::ScanObject(Object* obj) { |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1375 | MarkObjectVisitor visitor(this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1376 | ScanObjectVisit(obj, visitor); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1377 | } |
| 1378 | |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 1379 | void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1380 | Thread* self = Thread::Current(); |
| 1381 | ThreadPool* thread_pool = GetHeap()->GetThreadPool(); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 1382 | const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, |
| 1383 | static_cast<size_t>(MarkStackTask<false>::kMaxSize)); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1384 | CHECK_GT(chunk_size, 0U); |
| 1385 | // Split the current mark stack up into work tasks. |
| 1386 | for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { |
| 1387 | const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); |
| 1388 | thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, |
| 1389 | const_cast<const mirror::Object**>(it))); |
| 1390 | it += delta; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1391 | } |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 1392 | thread_pool->SetMaxActiveWorkers(thread_count - 1); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1393 | thread_pool->StartWorkers(self); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 1394 | thread_pool->Wait(self, true, true); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1395 | thread_pool->StopWorkers(self); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1396 | mark_stack_->Reset(); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1397 | CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1398 | } |
| 1399 | |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 1400 | // Scan anything that's on the mark stack. |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1401 | void MarkSweep::ProcessMarkStack(bool paused) { |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1402 | timings_.StartSplit("ProcessMarkStack"); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 1403 | size_t thread_count = GetThreadCount(paused); |
| 1404 | if (kParallelProcessMarkStack && thread_count > 1 && |
| 1405 | mark_stack_->Size() >= kMinimumParallelMarkStackSize) { |
| 1406 | ProcessMarkStackParallel(thread_count); |
Mathieu Chartier | d8195f1 | 2012-10-05 12:21:28 -0700 | [diff] [blame] | 1407 | } else { |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1408 | // TODO: Tune this. |
| 1409 | static const size_t kFifoSize = 4; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1410 | BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1411 | for (;;) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1412 | Object* obj = NULL; |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1413 | if (kUseMarkStackPrefetch) { |
| 1414 | while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1415 | Object* obj = mark_stack_->PopBack(); |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1416 | DCHECK(obj != NULL); |
| 1417 | __builtin_prefetch(obj); |
| 1418 | prefetch_fifo.push_back(obj); |
| 1419 | } |
| 1420 | if (prefetch_fifo.empty()) { |
| 1421 | break; |
| 1422 | } |
| 1423 | obj = prefetch_fifo.front(); |
| 1424 | prefetch_fifo.pop_front(); |
| 1425 | } else { |
| 1426 | if (mark_stack_->IsEmpty()) { |
| 1427 | break; |
| 1428 | } |
| 1429 | obj = mark_stack_->PopBack(); |
| 1430 | } |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1431 | DCHECK(obj != NULL); |
Mathieu Chartier | d8195f1 | 2012-10-05 12:21:28 -0700 | [diff] [blame] | 1432 | ScanObject(obj); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1433 | } |
| 1434 | } |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1435 | timings_.EndSplit(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1436 | } |
| 1437 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1438 | // Walks the reference list marking any references subject to the |
| 1439 | // reference clearing policy. References with a black referent are |
| 1440 | // removed from the list. References with white referents biased |
| 1441 | // toward saving are blackened and also removed from the list. |
| 1442 | void MarkSweep::PreserveSomeSoftReferences(Object** list) { |
| 1443 | DCHECK(list != NULL); |
| 1444 | Object* clear = NULL; |
| 1445 | size_t counter = 0; |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 1446 | |
| 1447 | DCHECK(mark_stack_->IsEmpty()); |
| 1448 | |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1449 | timings_.StartSplit("PreserveSomeSoftReferences"); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1450 | while (*list != NULL) { |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1451 | Object* ref = heap_->DequeuePendingReference(list); |
| 1452 | Object* referent = heap_->GetReferenceReferent(ref); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1453 | if (referent == NULL) { |
| 1454 | // Referent was cleared by the user during marking. |
| 1455 | continue; |
| 1456 | } |
| 1457 | bool is_marked = IsMarked(referent); |
| 1458 | if (!is_marked && ((++counter) & 1)) { |
| 1459 | // Referent is white and biased toward saving, mark it. |
| 1460 | MarkObject(referent); |
| 1461 | is_marked = true; |
| 1462 | } |
| 1463 | if (!is_marked) { |
| 1464 | // Referent is white, queue it for clearing. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1465 | heap_->EnqueuePendingReference(ref, &clear); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1466 | } |
| 1467 | } |
| 1468 | *list = clear; |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1469 | timings_.EndSplit(); |
| 1470 | |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1471 | // Restart the mark with the newly black references added to the root set. |
| 1472 | ProcessMarkStack(true); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1473 | } |
| 1474 | |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1475 | inline bool MarkSweep::IsMarked(const Object* object) const |
| 1476 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
Mathieu Chartier | 9642c96 | 2013-08-05 17:40:36 -0700 | [diff] [blame] | 1477 | if (IsImmune(object)) { |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1478 | return true; |
| 1479 | } |
| 1480 | DCHECK(current_mark_bitmap_ != NULL); |
| 1481 | if (current_mark_bitmap_->HasAddress(object)) { |
| 1482 | return current_mark_bitmap_->Test(object); |
| 1483 | } |
| 1484 | return heap_->GetMarkBitmap()->Test(object); |
| 1485 | } |
| 1486 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1487 | // Unlink the reference list clearing references objects with white |
| 1488 | // referents. Cleared references registered to a reference queue are |
| 1489 | // scheduled for appending by the heap worker thread. |
| 1490 | void MarkSweep::ClearWhiteReferences(Object** list) { |
| 1491 | DCHECK(list != NULL); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1492 | while (*list != NULL) { |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1493 | Object* ref = heap_->DequeuePendingReference(list); |
| 1494 | Object* referent = heap_->GetReferenceReferent(ref); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1495 | if (referent != NULL && !IsMarked(referent)) { |
| 1496 | // Referent is white, clear it. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1497 | heap_->ClearReferenceReferent(ref); |
| 1498 | if (heap_->IsEnqueuable(ref)) { |
| 1499 | heap_->EnqueueReference(ref, &cleared_reference_list_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1500 | } |
| 1501 | } |
| 1502 | } |
| 1503 | DCHECK(*list == NULL); |
| 1504 | } |
| 1505 | |
| 1506 | // Enqueues finalizer references with white referents. White |
| 1507 | // referents are blackened, moved to the zombie field, and the |
| 1508 | // referent field is cleared. |
| 1509 | void MarkSweep::EnqueueFinalizerReferences(Object** list) { |
| 1510 | DCHECK(list != NULL); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1511 | timings_.StartSplit("EnqueueFinalizerReferences"); |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1512 | MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1513 | bool has_enqueued = false; |
| 1514 | while (*list != NULL) { |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1515 | Object* ref = heap_->DequeuePendingReference(list); |
| 1516 | Object* referent = heap_->GetReferenceReferent(ref); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1517 | if (referent != NULL && !IsMarked(referent)) { |
| 1518 | MarkObject(referent); |
| 1519 | // If the referent is non-null the reference must queuable. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1520 | DCHECK(heap_->IsEnqueuable(ref)); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 1521 | ref->SetFieldObject(zombie_offset, referent, false); |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 1522 | heap_->ClearReferenceReferent(ref); |
| 1523 | heap_->EnqueueReference(ref, &cleared_reference_list_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1524 | has_enqueued = true; |
| 1525 | } |
| 1526 | } |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1527 | timings_.EndSplit(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1528 | if (has_enqueued) { |
Mathieu Chartier | 94c32c5 | 2013-08-09 11:14:04 -0700 | [diff] [blame] | 1529 | ProcessMarkStack(true); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1530 | } |
| 1531 | DCHECK(*list == NULL); |
| 1532 | } |
| 1533 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 1534 | // Process reference class instances and schedule finalizations. |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1535 | void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft, |
| 1536 | Object** weak_references, |
| 1537 | Object** finalizer_references, |
| 1538 | Object** phantom_references) { |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 1539 | CHECK(soft_references != NULL); |
| 1540 | CHECK(weak_references != NULL); |
| 1541 | CHECK(finalizer_references != NULL); |
| 1542 | CHECK(phantom_references != NULL); |
| 1543 | CHECK(mark_stack_->IsEmpty()); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1544 | |
| 1545 | // Unless we are in the zygote or required to clear soft references |
| 1546 | // with white references, preserve some white referents. |
Ian Rogers | 2945e24 | 2012-06-03 14:45:16 -0700 | [diff] [blame] | 1547 | if (!clear_soft && !Runtime::Current()->IsZygote()) { |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1548 | PreserveSomeSoftReferences(soft_references); |
| 1549 | } |
| 1550 | |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1551 | timings_.StartSplit("ProcessReferences"); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1552 | // Clear all remaining soft and weak references with white |
| 1553 | // referents. |
| 1554 | ClearWhiteReferences(soft_references); |
| 1555 | ClearWhiteReferences(weak_references); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1556 | timings_.EndSplit(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1557 | |
| 1558 | // Preserve all white objects with finalize methods and schedule |
| 1559 | // them for finalization. |
| 1560 | EnqueueFinalizerReferences(finalizer_references); |
| 1561 | |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1562 | timings_.StartSplit("ProcessReferences"); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1563 | // Clear all f-reachable soft and weak references with white |
| 1564 | // referents. |
| 1565 | ClearWhiteReferences(soft_references); |
| 1566 | ClearWhiteReferences(weak_references); |
| 1567 | |
| 1568 | // Clear all phantom references with white referents. |
| 1569 | ClearWhiteReferences(phantom_references); |
| 1570 | |
| 1571 | // At this point all reference lists should be empty. |
| 1572 | DCHECK(*soft_references == NULL); |
| 1573 | DCHECK(*weak_references == NULL); |
| 1574 | DCHECK(*finalizer_references == NULL); |
| 1575 | DCHECK(*phantom_references == NULL); |
Anwar Ghuloum | 4446ab9 | 2013-08-09 21:17:25 -0700 | [diff] [blame] | 1576 | timings_.EndSplit(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1577 | } |
| 1578 | |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1579 | void MarkSweep::UnBindBitmaps() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1580 | base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1581 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1582 | if (space->IsDlMallocSpace()) { |
| 1583 | space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1584 | if (alloc_space->temp_bitmap_.get() != NULL) { |
| 1585 | // At this point, the temp_bitmap holds our old mark bitmap. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1586 | accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release(); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 1587 | GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap); |
| 1588 | CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get()); |
| 1589 | alloc_space->mark_bitmap_.reset(new_bitmap); |
| 1590 | DCHECK(alloc_space->temp_bitmap_.get() == NULL); |
| 1591 | } |
| 1592 | } |
| 1593 | } |
| 1594 | } |
| 1595 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1596 | void MarkSweep::FinishPhase() { |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1597 | base::TimingLogger::ScopedSplit split("FinishPhase", &timings_); |
| 1598 | // Can't enqueue references if we hold the mutator lock. |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1599 | Object* cleared_references = GetClearedReferences(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1600 | Heap* heap = GetHeap(); |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1601 | timings_.NewSplit("EnqueueClearedReferences"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1602 | heap->EnqueueClearedReferences(&cleared_references); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1603 | |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1604 | timings_.NewSplit("PostGcVerification"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1605 | heap->PostGcVerification(this); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1606 | |
Anwar Ghuloum | 4654322 | 2013-08-12 09:28:42 -0700 | [diff] [blame] | 1607 | timings_.NewSplit("RequestHeapTrim"); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1608 | heap->RequestHeapTrim(); |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 1609 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1610 | // Update the cumulative statistics |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1611 | total_time_ns_ += GetDurationNs(); |
| 1612 | total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0, |
| 1613 | std::plus<uint64_t>()); |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 1614 | total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); |
| 1615 | total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1616 | |
| 1617 | // Ensure that the mark stack is empty. |
| 1618 | CHECK(mark_stack_->IsEmpty()); |
| 1619 | |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1620 | if (kCountScannedTypes) { |
| 1621 | VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ |
| 1622 | << " other=" << other_count_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1623 | } |
| 1624 | |
| 1625 | if (kCountTasks) { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1626 | VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1627 | } |
| 1628 | |
| 1629 | if (kMeasureOverhead) { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1630 | VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1631 | } |
| 1632 | |
| 1633 | if (kProfileLargeObjects) { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1634 | VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1635 | } |
| 1636 | |
| 1637 | if (kCountClassesMarked) { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 1638 | VLOG(gc) << "Classes marked " << classes_marked_; |
| 1639 | } |
| 1640 | |
| 1641 | if (kCountJavaLangRefs) { |
| 1642 | VLOG(gc) << "References scanned " << reference_count_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1643 | } |
| 1644 | |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1645 | // Update the cumulative loggers. |
| 1646 | cumulative_timings_.Start(); |
Anwar Ghuloum | 6f28d91 | 2013-07-24 15:02:53 -0700 | [diff] [blame] | 1647 | cumulative_timings_.AddLogger(timings_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 1648 | cumulative_timings_.End(); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 1649 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 1650 | // Clear all of the spaces' mark bitmaps. |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 1651 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1652 | accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); |
| 1653 | if (bitmap != nullptr && |
| 1654 | space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { |
| 1655 | bitmap->Clear(); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 1656 | } |
| 1657 | } |
Mathieu Chartier | 5301cd2 | 2012-05-31 12:11:36 -0700 | [diff] [blame] | 1658 | mark_stack_->Reset(); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1659 | |
| 1660 | // Reset the marked large objects. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1661 | space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 1662 | large_objects->GetMarkObjects()->Clear(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1663 | } |
| 1664 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1665 | } // namespace collector |
| 1666 | } // namespace gc |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1667 | } // namespace art |