Make empty checkpoint work while weak ref access is disabled.
Fix a potential race on PushOntoMarkStack for CC by running an empty
checkpoint (while weak ref access is disabled).
Bug: 32508093
Bug: 12687968
Test: test-art-host with CC/CMS, libartd boot with N9, Ritz EAAC.
Change-Id: I3749bb525e7734804307ee16262355f3fc730312
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index d921900..e18a955 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -181,7 +181,6 @@
}
void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
- CHECK(kUseReadBarrier);
new_record_condition_.Broadcast(Thread::Current());
}
@@ -291,6 +290,9 @@
// Wait for GC's sweeping to complete and allow new records
while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
new_record_condition_.WaitHoldingLocks(self);
}
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index c8b2b89..90cff6a 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -261,7 +261,6 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
void BroadcastForNewAllocationRecords()
- REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
// TODO: Is there a better way to hide the entries_'s type?
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 6dfab8b..1e1b05c 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -514,26 +514,6 @@
live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
}
-class EmptyCheckpoint : public Closure {
- public:
- explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
- : concurrent_copying_(concurrent_copying) {
- }
-
- virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
- // Note: self is not necessarily equal to thread since thread may be suspended.
- Thread* self = Thread::Current();
- CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
- << thread->GetState() << " thread " << thread << " self " << self;
- // If thread is a running mutator, then act on behalf of the garbage collector.
- // See the code in ThreadList::RunCheckpoint.
- concurrent_copying_->GetBarrier().Pass(self);
- }
-
- private:
- ConcurrentCopying* const concurrent_copying_;
-};
-
// Used to visit objects in the immune spaces.
inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
DCHECK(obj != nullptr);
@@ -835,10 +815,10 @@
void ConcurrentCopying::IssueEmptyCheckpoint() {
Thread* self = Thread::Current();
- EmptyCheckpoint check_point(this);
ThreadList* thread_list = Runtime::Current()->GetThreadList();
- gc_barrier_->Init(self, 0);
- size_t barrier_count = thread_list->RunCheckpoint(&check_point);
+ Barrier* barrier = thread_list->EmptyCheckpointBarrier();
+ barrier->Init(self, 0);
+ size_t barrier_count = thread_list->RunEmptyCheckpoint();
// If there are no threads to wait which implys that all the checkpoint functions are finished,
// then no need to release the mutator lock.
if (barrier_count == 0) {
@@ -848,7 +828,7 @@
Locks::mutator_lock_->SharedUnlock(self);
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
- gc_barrier_->Increment(self, barrier_count);
+ barrier->Increment(self, barrier_count);
}
Locks::mutator_lock_->SharedLock(self);
}
@@ -1253,6 +1233,10 @@
}
gc_mark_stack_->Reset();
} else if (mark_stack_mode == kMarkStackModeShared) {
+ // Do an empty checkpoint to avoid a race with a mutator preempted in the middle of a read
+ // barrier but before pushing onto the mark stack. b/32508093. Note the weak ref access is
+ // disabled at this point.
+ IssueEmptyCheckpoint();
// Process the shared GC mark stack with a lock.
{
MutexLock mu(self, mark_stack_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5de004b..447e06e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -4069,7 +4069,6 @@
}
void Heap::BroadcastForNewAllocationRecords() const {
- CHECK(kUseReadBarrier);
// Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
// be set to false while some threads are waiting for system weak access in
// AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index e8eb69e..0c671d2 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -797,7 +797,6 @@
REQUIRES(!Locks::alloc_tracker_lock_);
void BroadcastForNewAllocationRecords() const
- REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 798ecd3..2cde7d5 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -55,7 +55,6 @@
}
void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
- CHECK(kUseReadBarrier);
MutexLock mu(self, *Locks::reference_processor_lock_);
condition_.Broadcast(self);
}
@@ -99,6 +98,9 @@
}
}
}
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
condition_.WaitHoldingLocks(self);
}
return reference->GetReferent();
@@ -270,6 +272,9 @@
// Wait untul we are done processing reference.
while ((!kUseReadBarrier && SlowPathEnabled()) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
condition_.WaitHoldingLocks(self);
}
// At this point, since the sentinel of the reference is live, it is guaranteed to not be
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index 3910a28..884e90b 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -30,7 +30,8 @@
virtual void Allow() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void Disallow() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual void Broadcast() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+ // See Runtime::BroadcastForNewSystemWeaks for the broadcast_for_checkpoint definition.
+ virtual void Broadcast(bool broadcast_for_checkpoint) = 0;
virtual void Sweep(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
@@ -61,19 +62,22 @@
allow_new_system_weak_ = false;
}
- void Broadcast() OVERRIDE
- REQUIRES_SHARED(Locks::mutator_lock_)
+ void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) OVERRIDE
REQUIRES(!allow_disallow_lock_) {
- CHECK(kUseReadBarrier);
MutexLock mu(Thread::Current(), allow_disallow_lock_);
new_weak_condition_.Broadcast(Thread::Current());
}
protected:
- void Wait(Thread* self) REQUIRES_SHARED(allow_disallow_lock_) {
+ void Wait(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_) {
// Wait for GC's sweeping to complete and allow new records
while (UNLIKELY((!kUseReadBarrier && !allow_new_system_weak_) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+ // presence of threads blocking for weak ref access.
+ self->CheckEmptyCheckpoint();
new_weak_condition_.WaitHoldingLocks(self);
}
}
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index af8a444..9b601c0 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -58,12 +58,14 @@
disallow_count_++;
}
- void Broadcast() OVERRIDE
- REQUIRES_SHARED(Locks::mutator_lock_)
+ void Broadcast(bool broadcast_for_checkpoint) OVERRIDE
REQUIRES(!allow_disallow_lock_) {
- SystemWeakHolder::Broadcast();
+ SystemWeakHolder::Broadcast(broadcast_for_checkpoint);
- allow_count_++;
+ if (!broadcast_for_checkpoint) {
+ // Don't count the broadcasts for running checkpoints.
+ allow_count_++;
+ }
}
void Sweep(IsMarkedVisitor* visitor) OVERRIDE