Some fixes for the CC collector.
- Remove a DCHECK in DisableMarkingCheckpoint, which caused
occasional (false) failures.
- Check the thread-local GetWeakRefAccessEnabled in boxed lambdas weak
access.
- Add missing BroadcastForNewAllocationRecords and
BroadcastForNewWeakBoxedLambdas. The lack of the former caused
occasional deadlocks in the ddmc test.
- Remove the 'ensure system weaks disallowed' calls, which weren't
useful and dead.
Bug: 12687968
Change-Id: I33850c8d12e6e1a3aed1c2bb18eba263cbab76e8
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 16c9354..369e408 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -167,14 +167,21 @@
}
void AllocRecordObjectMap::AllowNewAllocationRecords() {
+ CHECK(!kUseReadBarrier);
allow_new_record_ = true;
new_record_condition_.Broadcast(Thread::Current());
}
void AllocRecordObjectMap::DisallowNewAllocationRecords() {
+ CHECK(!kUseReadBarrier);
allow_new_record_ = false;
}
+void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
+ CHECK(kUseReadBarrier);
+ new_record_condition_.Broadcast(Thread::Current());
+}
+
struct AllocRecordStackVisitor : public StackVisitor {
AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max)
SHARED_REQUIRES(Locks::mutator_lock_)
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 0a4f532..ffdfd31 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -277,6 +277,9 @@
void AllowNewAllocationRecords()
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
+ void BroadcastForNewAllocationRecords()
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_);
// TODO: Is there a better way to hide the entries_'s type?
EntryList::iterator Begin()
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index a5bc60a..57af959 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -499,7 +499,8 @@
DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
<< thread->GetState() << " thread " << thread << " self " << self;
// Disable the thread-local is_gc_marking flag.
- DCHECK(thread->GetIsGcMarking());
+ // Note a thread that has just started right before this checkpoint may have already this flag
+ // set to false, which is ok.
thread->SetIsGcMarking(false);
// If thread is a running mutator, then act on behalf of the garbage collector.
// See the code in ThreadList::RunCheckpoint.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 4bc44d3..961b80f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3844,6 +3844,7 @@
}
void Heap::AllowNewAllocationRecords() const {
+ CHECK(!kUseReadBarrier);
if (IsAllocTrackingEnabled()) {
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
if (IsAllocTrackingEnabled()) {
@@ -3853,6 +3854,7 @@
}
void Heap::DisallowNewAllocationRecords() const {
+ CHECK(!kUseReadBarrier);
if (IsAllocTrackingEnabled()) {
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
if (IsAllocTrackingEnabled()) {
@@ -3861,6 +3863,16 @@
}
}
+void Heap::BroadcastForNewAllocationRecords() const {
+ CHECK(kUseReadBarrier);
+ if (IsAllocTrackingEnabled()) {
+ MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
+ if (IsAllocTrackingEnabled()) {
+ GetAllocationRecords()->BroadcastForNewAllocationRecords();
+ }
+ }
+}
+
// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
class StackCrawlState {
public:
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 8bffe5e..d0d0be3 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -753,6 +753,10 @@
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
+ void BroadcastForNewAllocationRecords() const
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::alloc_tracker_lock_);
+
void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
// Create a new alloc space and compact default alloc space to it.