Visit class native roots from VisitReferences
Visit class roots when we call Class::VisitReferences instead of in
the class linker. This makes it easier to implement class unloading
since unmarked classes won't have their roots visited by the class
linker.
Bug: 22181835
Change-Id: I63f31e5ebef7b2a0b764b3ba3cb038b3f561b379
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 92dde51..7f2c204 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -365,7 +365,7 @@
: mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
}
- void operator()(const mirror::Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
if (kProfileLargeObjects) {
// TODO: Differentiate between marking and testing somehow.
++mark_sweep_->large_object_test_;
@@ -597,8 +597,7 @@
: mark_sweep_(mark_sweep) {}
void operator()(mirror::Object* obj) const ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -649,13 +648,33 @@
protected:
class MarkObjectParallelVisitor {
public:
- explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
- MarkSweep* mark_sweep) ALWAYS_INLINE
- : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
+ ALWAYS_INLINE explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
+ MarkSweep* mark_sweep)
+ : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const
+ ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ Mark(obj->GetFieldObject<mirror::Object>(offset));
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kCheckLocks) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ Mark(root->AsMirrorPtr());
+ }
+
+ private:
+ void Mark(mirror::Object* ref) const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
if (kUseFinger) {
std::atomic_thread_fence(std::memory_order_seq_cst);
@@ -668,7 +687,6 @@
}
}
- private:
MarkStackTask<kUseFinger>* const chunk_task_;
MarkSweep* const mark_sweep_;
};
@@ -1268,6 +1286,22 @@
mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ if (kCheckLocks) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ mark_sweep_->MarkObject(root->AsMirrorPtr());
+ }
+
private:
MarkSweep* const mark_sweep_;
};