Move Heap parameters to ObjPtr
Deleted some unused object dumping code.
Test: test-art-host
Bug: 31113334
Change-Id: I747220caafe6679591fd4b361d7f50383a046164
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
index 6fb74d3..f60bc0c 100644
--- a/runtime/gc/allocation_listener.h
+++ b/runtime/gc/allocation_listener.h
@@ -22,6 +22,7 @@
#include "base/macros.h"
#include "base/mutex.h"
+#include "obj_ptr.h"
#include "object_callbacks.h"
#include "gc_root.h"
@@ -39,7 +40,7 @@
public:
virtual ~AllocationListener() {}
- virtual void ObjectAllocated(Thread* self, mirror::Object** obj, size_t byte_count)
+ virtual void ObjectAllocated(Thread* self, ObjPtr<mirror::Object>* obj, size_t byte_count)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 13ebb27..d921900 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -19,6 +19,7 @@
#include "art_method-inl.h"
#include "base/enums.h"
#include "base/stl_util.h"
+#include "obj_ptr-inl.h"
#include "stack.h"
#ifdef ART_TARGET_ANDROID
@@ -263,7 +264,7 @@
}
void AllocRecordObjectMap::RecordAllocation(Thread* self,
- mirror::Object** obj,
+ ObjPtr<mirror::Object>* obj,
size_t byte_count) {
// Get stack trace outside of lock in case there are allocations during the stack walk.
// b/27858645.
@@ -305,7 +306,7 @@
trace.SetTid(self->GetTid());
// Add the record.
- Put(*obj, AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
+ Put(obj->Ptr(), AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
DCHECK_LE(Size(), alloc_record_max_);
}
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index f1f013b..c8b2b89 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -21,6 +21,7 @@
#include <memory>
#include "base/mutex.h"
+#include "obj_ptr.h"
#include "object_callbacks.h"
#include "gc_root.h"
@@ -210,7 +211,7 @@
// Caller needs to check that it is enabled before calling since we read the stack trace before
// checking the enabled boolean.
void RecordAllocation(Thread* self,
- mirror::Object** obj,
+ ObjPtr<mirror::Object>* obj,
size_t byte_count)
REQUIRES(!Locks::alloc_tracker_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 2e97172..76a478e 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -296,7 +296,6 @@
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (from_space_->HasAddress(ref)) {
- Runtime::Current()->GetHeap()->DumpObject(LOG_STREAM(INFO), obj);
LOG(FATAL) << ref << " found in from space";
}
}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 00adefb..05ce9c7 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -41,7 +41,7 @@
template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
- mirror::Class* klass,
+ ObjPtr<mirror::Class> klass,
size_t byte_count,
AllocatorType allocator,
const PreFenceVisitor& pre_fence_visitor) {
@@ -52,16 +52,19 @@
CHECK_EQ(self->GetState(), kRunnable);
self->AssertThreadSuspensionIsAllowable();
self->AssertNoPendingException();
+ // Make sure to preserve klass.
+ StackHandleScope<1> hs(self);
+ HandleWrapperObjPtr<mirror::Class> h = hs.NewHandleWrapper(&klass);
self->PoisonObjectPointers();
}
// Need to check that we arent the large object allocator since the large object allocation code
// path this function. If we didn't check we would have an infinite loop.
- mirror::Object* obj;
+ ObjPtr<mirror::Object> obj;
if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
pre_fence_visitor);
if (obj != nullptr) {
- return obj;
+ return obj.Ptr();
} else {
// There should be an OOM exception, since we are retrying, clear it.
self->ClearException();
@@ -85,7 +88,7 @@
obj->SetClass(klass);
if (kUseBakerOrBrooksReadBarrier) {
if (kUseBrooksReadBarrier) {
- obj->SetReadBarrierPointer(obj);
+ obj->SetReadBarrierPointer(obj.Ptr());
}
obj->AssertReadBarrierPointer();
}
@@ -93,14 +96,15 @@
usable_size = bytes_allocated;
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
- } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
- (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) &&
- LIKELY(obj != nullptr)) {
+ } else if (
+ !kInstrumented && allocator == kAllocatorTypeRosAlloc &&
+ (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
+ LIKELY(obj != nullptr)) {
DCHECK(!is_running_on_memory_tool_);
obj->SetClass(klass);
if (kUseBakerOrBrooksReadBarrier) {
if (kUseBrooksReadBarrier) {
- obj->SetReadBarrierPointer(obj);
+ obj->SetReadBarrierPointer(obj.Ptr());
}
obj->AssertReadBarrierPointer();
}
@@ -141,7 +145,7 @@
obj->SetClass(klass);
if (kUseBakerOrBrooksReadBarrier) {
if (kUseBrooksReadBarrier) {
- obj->SetReadBarrierPointer(obj);
+ obj->SetReadBarrierPointer(obj.Ptr());
}
obj->AssertReadBarrierPointer();
}
@@ -213,25 +217,25 @@
}
VerifyObject(obj);
self->VerifyStack();
- return obj;
+ return obj.Ptr();
}
// The size of a thread-local allocation stack in the number of references.
static constexpr size_t kThreadLocalAllocationStackSize = 128;
-inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
+inline void Heap::PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj) {
if (kUseThreadLocalAllocationStack) {
- if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(*obj))) {
+ if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(obj->Ptr()))) {
PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
}
- } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(*obj))) {
+ } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(obj->Ptr()))) {
PushOnAllocationStackWithInternalGC(self, obj);
}
}
template <bool kInstrumented, typename PreFenceVisitor>
inline mirror::Object* Heap::AllocLargeObject(Thread* self,
- mirror::Class** klass,
+ ObjPtr<mirror::Class>* klass,
size_t byte_count,
const PreFenceVisitor& pre_fence_visitor) {
// Save and restore the class in case it moves.
@@ -405,7 +409,7 @@
return ret;
}
-inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
+inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const {
// We need to have a zygote space or else our newly allocated large object can end up in the
// Zygote resulting in it being prematurely freed.
// We can only do this for primitive objects since large objects will not be within the card table
@@ -435,7 +439,7 @@
inline void Heap::CheckConcurrentGC(Thread* self,
size_t new_num_bytes_allocated,
- mirror::Object** obj) {
+ ObjPtr<mirror::Object>* obj) {
if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
RequestConcurrentGCAndSaveObject(self, false, obj);
}
@@ -447,6 +451,16 @@
card_table_->MarkCard(dst.Ptr());
}
+inline void Heap::WriteBarrierArray(ObjPtr<mirror::Object> dst,
+ int start_offset ATTRIBUTE_UNUSED,
+ size_t length ATTRIBUTE_UNUSED) {
+ card_table_->MarkCard(dst.Ptr());
+}
+
+inline void Heap::WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj) {
+ card_table_->MarkCard(obj.Ptr());
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 33f849a..640787c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -760,83 +760,6 @@
}
}
-std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
- if (!IsValidContinuousSpaceObjectAddress(klass)) {
- return StringPrintf("<non heap address klass %p>", klass);
- }
- mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
- if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
- std::string result("[");
- result += SafeGetClassDescriptor(component_type);
- return result;
- } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
- return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
- } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
- return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
- } else {
- mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
- if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
- return StringPrintf("<non heap address dex_cache %p>", dex_cache);
- }
- const DexFile* dex_file = dex_cache->GetDexFile();
- uint16_t class_def_idx = klass->GetDexClassDefIndex();
- if (class_def_idx == DexFile::kDexNoIndex16) {
- return "<class def not found>";
- }
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
- const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
- return dex_file->GetTypeDescriptor(type_id);
- }
-}
-
-std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
- if (obj == nullptr) {
- return "null";
- }
- mirror::Class* klass = obj->GetClass<kVerifyNone>();
- if (klass == nullptr) {
- return "(class=null)";
- }
- std::string result(SafeGetClassDescriptor(klass));
- if (obj->IsClass()) {
- result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
- }
- return result;
-}
-
-void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
- if (obj == nullptr) {
- stream << "(obj=null)";
- return;
- }
- if (IsAligned<kObjectAlignment>(obj)) {
- space::Space* space = nullptr;
- // Don't use find space since it only finds spaces which actually contain objects instead of
- // spaces which may contain objects (e.g. cleared bump pointer spaces).
- for (const auto& cur_space : continuous_spaces_) {
- if (cur_space->HasAddress(obj)) {
- space = cur_space;
- break;
- }
- }
- // Unprotect all the spaces.
- for (const auto& con_space : continuous_spaces_) {
- mprotect(con_space->Begin(), con_space->Capacity(), PROT_READ | PROT_WRITE);
- }
- stream << "Object " << obj;
- if (space != nullptr) {
- stream << " in space " << *space;
- }
- mirror::Class* klass = obj->GetClass<kVerifyNone>();
- stream << "\nclass=" << klass;
- if (klass != nullptr) {
- stream << " type= " << SafePrettyTypeOf(obj);
- }
- // Re-protect the address we faulted on.
- mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
- }
-}
-
bool Heap::IsCompilingBoot() const {
if (!Runtime::Current()->IsAotCompiler()) {
return false;
@@ -1325,33 +1248,42 @@
VLOG(heap) << "Finished ~Heap()";
}
-space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
- bool fail_ok) const {
+
+space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
for (const auto& space : continuous_spaces_) {
- if (space->Contains(obj)) {
+ if (space->Contains(addr)) {
return space;
}
}
- if (!fail_ok) {
- LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
- }
return nullptr;
}
-space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
+space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
+ bool fail_ok) const {
+ space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
+ if (space != nullptr) {
+ return space;
+ }
+ if (!fail_ok) {
+ LOG(FATAL) << "object " << obj << " not inside any spaces!";
+ }
+ return nullptr;
+}
+
+space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
bool fail_ok) const {
for (const auto& space : discontinuous_spaces_) {
- if (space->Contains(obj)) {
+ if (space->Contains(obj.Ptr())) {
return space;
}
}
if (!fail_ok) {
- LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
+ LOG(FATAL) << "object " << obj << " not inside any spaces!";
}
return nullptr;
}
-space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
+space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
space::Space* result = FindContinuousSpaceFromObject(obj, true);
if (result != nullptr) {
return result;
@@ -1359,6 +1291,21 @@
return FindDiscontinuousSpaceFromObject(obj, fail_ok);
}
+space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
+ for (const auto& space : continuous_spaces_) {
+ if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
+ return space;
+ }
+ }
+ for (const auto& space : discontinuous_spaces_) {
+ if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
+ return space;
+ }
+ }
+ return nullptr;
+}
+
+
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
// If we're in a stack overflow, do not create a new exception. It would require running the
// constructor, which will of course still be in a stack overflow.
@@ -1523,62 +1470,49 @@
<< static_cast<int>(100 * managed_utilization) << "%.";
}
-bool Heap::IsValidObjectAddress(ObjPtr<mirror::Object> obj) const {
- // Note: we deliberately don't take the lock here, and mustn't test anything that would require
- // taking the lock.
- if (obj == nullptr) {
+bool Heap::IsValidObjectAddress(const void* addr) const {
+ if (addr == nullptr) {
return true;
}
- return IsAligned<kObjectAlignment>(obj.Ptr()) &&
- FindSpaceFromObject(obj.Ptr(), true) != nullptr;
+ return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
}
-bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
- return FindContinuousSpaceFromObject(obj, true) != nullptr;
+bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
+ return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
}
-bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
- if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
+bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
+ bool search_allocation_stack,
+ bool search_live_stack,
+ bool sorted) {
+ if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
return false;
}
- for (const auto& space : continuous_spaces_) {
- if (space->HasAddress(obj)) {
- return true;
- }
- }
- return false;
-}
-
-bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
- bool search_live_stack, bool sorted) {
- if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
- return false;
- }
- if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
+ if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
mirror::Class* klass = obj->GetClass<kVerifyNone>();
if (obj == klass) {
// This case happens for java.lang.Class.
return true;
}
return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
- } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
+ } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
// If we are in the allocated region of the temp space, then we are probably live (e.g. during
// a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
- return temp_space_->Contains(obj);
+ return temp_space_->Contains(obj.Ptr());
}
- if (region_space_ != nullptr && region_space_->HasAddress(obj)) {
+ if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
return true;
}
space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
space::DiscontinuousSpace* d_space = nullptr;
if (c_space != nullptr) {
- if (c_space->GetLiveBitmap()->Test(obj)) {
+ if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
return true;
}
} else {
d_space = FindDiscontinuousSpaceFromObject(obj, true);
if (d_space != nullptr) {
- if (d_space->GetLiveBitmap()->Test(obj)) {
+ if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
return true;
}
}
@@ -1590,20 +1524,20 @@
}
if (search_allocation_stack) {
if (sorted) {
- if (allocation_stack_->ContainsSorted(obj)) {
+ if (allocation_stack_->ContainsSorted(obj.Ptr())) {
return true;
}
- } else if (allocation_stack_->Contains(obj)) {
+ } else if (allocation_stack_->Contains(obj.Ptr())) {
return true;
}
}
if (search_live_stack) {
if (sorted) {
- if (live_stack_->ContainsSorted(obj)) {
+ if (live_stack_->ContainsSorted(obj.Ptr())) {
return true;
}
- } else if (live_stack_->Contains(obj)) {
+ } else if (live_stack_->Contains(obj.Ptr())) {
return true;
}
}
@@ -1611,12 +1545,12 @@
// We need to check the bitmaps again since there is a race where we mark something as live and
// then clear the stack containing it.
if (c_space != nullptr) {
- if (c_space->GetLiveBitmap()->Test(obj)) {
+ if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
return true;
}
} else {
d_space = FindDiscontinuousSpaceFromObject(obj, true);
- if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
+ if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
return true;
}
}
@@ -1646,7 +1580,7 @@
}
}
-void Heap::VerifyObjectBody(mirror::Object* obj) {
+void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
if (verify_object_mode_ == kVerifyObjectModeDisabled) {
return;
}
@@ -1655,7 +1589,7 @@
if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
return;
}
- CHECK_ALIGNED(obj, kObjectAlignment) << "Object isn't aligned";
+ CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
CHECK(c != nullptr) << "Null class in object " << obj;
CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
@@ -1734,14 +1668,13 @@
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
- mirror::Class** klass) {
+ ObjPtr<mirror::Class>* klass) {
bool was_default_allocator = allocator == GetCurrentAllocator();
// Make sure there is no pending exception since we may need to throw an OOME.
self->AssertNoPendingException();
DCHECK(klass != nullptr);
StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
- klass = nullptr; // Invalidate for safety.
+ HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
// The allocation failed. If the GC is running, block until it completes, and then retry the
// allocation.
collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
@@ -1944,7 +1877,7 @@
class InstanceCounter {
public:
- InstanceCounter(const std::vector<mirror::Class*>& classes,
+ InstanceCounter(const std::vector<Handle<mirror::Class>>& classes,
bool use_is_assignable_from,
uint64_t* counts)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -1956,7 +1889,7 @@
mirror::Class* instance_class = obj->GetClass();
CHECK(instance_class != nullptr);
for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
- mirror::Class* klass = instance_counter->classes_[i];
+ ObjPtr<mirror::Class> klass = instance_counter->classes_[i].Get();
if (instance_counter->use_is_assignable_from_) {
if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
++instance_counter->counts_[i];
@@ -1968,13 +1901,14 @@
}
private:
- const std::vector<mirror::Class*>& classes_;
+ const std::vector<Handle<mirror::Class>>& classes_;
bool use_is_assignable_from_;
uint64_t* const counts_;
DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
};
-void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
+void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
+ bool use_is_assignable_from,
uint64_t* counts) {
InstanceCounter counter(classes, use_is_assignable_from, counts);
VisitObjects(InstanceCounter::Callback, &counter);
@@ -1982,15 +1916,17 @@
class InstanceCollector {
public:
- InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
+ InstanceCollector(Handle<mirror::Class> c,
+ int32_t max_count,
+ std::vector<ObjPtr<mirror::Object>>& instances)
REQUIRES_SHARED(Locks::mutator_lock_)
- : class_(c), max_count_(max_count), instances_(instances) {
- }
+ : class_(c), max_count_(max_count), instances_(instances) {}
+
static void Callback(mirror::Object* obj, void* arg)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(arg != nullptr);
InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
- if (obj->GetClass() == instance_collector->class_) {
+ if (obj->GetClass() == instance_collector->class_.Get()) {
if (instance_collector->max_count_ == 0 ||
instance_collector->instances_.size() < instance_collector->max_count_) {
instance_collector->instances_.push_back(obj);
@@ -1999,27 +1935,28 @@
}
private:
- const mirror::Class* const class_;
+ Handle<mirror::Class> const class_;
const uint32_t max_count_;
- std::vector<mirror::Object*>& instances_;
+ std::vector<ObjPtr<mirror::Object>>& instances_;
DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
};
-void Heap::GetInstances(mirror::Class* c,
+void Heap::GetInstances(Handle<mirror::Class> c,
int32_t max_count,
- std::vector<mirror::Object*>& instances) {
+ std::vector<ObjPtr<mirror::Object>>& instances) {
InstanceCollector collector(c, max_count, instances);
VisitObjects(&InstanceCollector::Callback, &collector);
}
class ReferringObjectsFinder {
public:
- ReferringObjectsFinder(mirror::Object* object,
+ ReferringObjectsFinder(ObjPtr<mirror::Object> object,
int32_t max_count,
- std::vector<mirror::Object*>& referring_objects)
+ std::vector<ObjPtr<mirror::Object>>& referring_objects)
REQUIRES_SHARED(Locks::mutator_lock_)
- : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
- }
+ : object_(object),
+ max_count_(max_count),
+ referring_objects_(referring_objects) {}
static void Callback(mirror::Object* obj, void* arg)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -2029,12 +1966,14 @@
// For bitmap Visit.
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
- void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(ObjPtr<mirror::Object> o) const NO_THREAD_SAFETY_ANALYSIS {
o->VisitReferences(*this, VoidFunctor());
}
// For Object::VisitReferences.
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
@@ -2047,14 +1986,15 @@
void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
private:
- const mirror::Object* const object_;
+ ObjPtr<mirror::Object> const object_;
const uint32_t max_count_;
- std::vector<mirror::Object*>& referring_objects_;
+ std::vector<ObjPtr<mirror::Object>>& referring_objects_;
DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
};
-void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
- std::vector<mirror::Object*>& referring_objects) {
+void Heap::GetReferringObjects(ObjPtr<mirror::Object> o,
+ int32_t max_count,
+ std::vector<ObjPtr<mirror::Object>>& referring_objects) {
ReferringObjectsFinder finder(o, max_count, referring_objects);
VisitObjects(&ReferringObjectsFinder::Callback, &finder);
}
@@ -3113,41 +3053,42 @@
const bool verify_referent_;
};
-void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
+void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
// Slow path, the allocation stack push back must have already failed.
- DCHECK(!allocation_stack_->AtomicPushBack(*obj));
+ DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
do {
// TODO: Add handle VerifyObject.
StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+ HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
// Push our object into the reserve region of the allocaiton stack. This is only required due
// to heap verification requiring that roots are live (either in the live bitmap or in the
// allocation stack).
- CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
+ CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
- } while (!allocation_stack_->AtomicPushBack(*obj));
+ } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
}
-void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
+void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
+ ObjPtr<mirror::Object>* obj) {
// Slow path, the allocation stack push back must have already failed.
- DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
+ DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
StackReference<mirror::Object>* start_address;
StackReference<mirror::Object>* end_address;
while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
&end_address)) {
// TODO: Add handle VerifyObject.
StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+ HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
// Push our object into the reserve region of the allocaiton stack. This is only required due
// to heap verification requiring that roots are live (either in the live bitmap or in the
// allocation stack).
- CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
+ CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
// Push into the reserve allocation stack.
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
}
self->SetThreadLocalAllocationStack(start_address, end_address);
// Retry on the new thread-local allocation stack.
- CHECK(self->PushOnThreadLocalAllocationStack(*obj)); // Must succeed.
+ CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr())); // Must succeed.
}
// Must do this with mutators suspended since we are directly accessing the allocation stacks.
@@ -3737,7 +3678,7 @@
}
}
-void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
+void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
ScopedObjectAccess soa(self);
ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
jvalue args[1];
@@ -3747,9 +3688,11 @@
*object = soa.Decode<mirror::Object>(arg.get()).Ptr();
}
-void Heap::RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) {
+void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
+ bool force_full,
+ ObjPtr<mirror::Object>* obj) {
StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+ HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
RequestConcurrentGC(self, force_full);
}
@@ -4026,7 +3969,7 @@
mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
}
-void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
+void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
(c->IsVariableSize() || c->GetObjectSize() == byte_count)) << c->GetClassFlags();
CHECK_GE(byte_count, sizeof(mirror::Object));
@@ -4152,7 +4095,7 @@
return state.GetFrameCount();
}
-void Heap::CheckGcStressMode(Thread* self, mirror::Object** obj) {
+void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
auto* const runtime = Runtime::Current();
if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
!runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
@@ -4191,9 +4134,9 @@
gc_disabled_for_shutdown_ = true;
}
-bool Heap::ObjectIsInBootImageSpace(mirror::Object* obj) const {
+bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
for (gc::space::ImageSpace* space : boot_image_spaces_) {
- if (space->HasAddress(obj)) {
+ if (space->HasAddress(obj.Ptr())) {
return true;
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5e17a52..95db4dd 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -34,6 +34,7 @@
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "globals.h"
+#include "handle.h"
#include "obj_ptr.h"
#include "object_callbacks.h"
#include "offsets.h"
@@ -194,36 +195,48 @@
// Allocates and initializes storage for an object instance.
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocObject(Thread* self,
- mirror::Class* klass,
+ ObjPtr<mirror::Class> klass,
size_t num_bytes,
const PreFenceVisitor& pre_fence_visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
+ REQUIRES(!*gc_complete_lock_,
+ !*pending_task_lock_,
+ !*backtrace_lock_,
!Roles::uninterruptible_) {
- return AllocObjectWithAllocator<kInstrumented, true>(
- self, klass, num_bytes, GetCurrentAllocator(), pre_fence_visitor);
+ return AllocObjectWithAllocator<kInstrumented, true>(self,
+ klass,
+ num_bytes,
+ GetCurrentAllocator(),
+ pre_fence_visitor);
}
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocNonMovableObject(Thread* self,
- mirror::Class* klass,
+ ObjPtr<mirror::Class> klass,
size_t num_bytes,
const PreFenceVisitor& pre_fence_visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
+ REQUIRES(!*gc_complete_lock_,
+ !*pending_task_lock_,
+ !*backtrace_lock_,
!Roles::uninterruptible_) {
- return AllocObjectWithAllocator<kInstrumented, true>(
- self, klass, num_bytes, GetCurrentNonMovingAllocator(), pre_fence_visitor);
+ return AllocObjectWithAllocator<kInstrumented, true>(self,
+ klass,
+ num_bytes,
+ GetCurrentNonMovingAllocator(),
+ pre_fence_visitor);
}
template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
- mirror::Class* klass,
+ ObjPtr<mirror::Class> klass,
size_t byte_count,
AllocatorType allocator,
const PreFenceVisitor& pre_fence_visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
+ REQUIRES(!*gc_complete_lock_,
+ !*pending_task_lock_,
+ !*backtrace_lock_,
!Roles::uninterruptible_);
AllocatorType GetCurrentAllocator() const {
@@ -241,7 +254,7 @@
void VisitObjectsPaused(ObjectCallback callback, void* arg)
REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
- void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
+ void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
@@ -263,7 +276,7 @@
// The given reference is believed to be to an object in the Java heap, check the soundness of it.
// TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
// proper lock ordering for it.
- void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
+ void VerifyObjectBody(ObjPtr<mirror::Object> o) NO_THREAD_SAFETY_ANALYSIS;
// Check sanity of all live references.
void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
@@ -276,16 +289,16 @@
// A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
// and doesn't abort on error, allowing the caller to report more
// meaningful diagnostics.
- bool IsValidObjectAddress(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsValidObjectAddress(const void* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
// Faster alternative to IsHeapAddress since finding if an object is in the large object space is
// very slow.
- bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
+ bool IsNonDiscontinuousSpaceHeapAddress(const void* addr) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
// Requires the heap lock to be held.
- bool IsLiveObjectLocked(mirror::Object* obj,
+ bool IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
bool search_allocation_stack = true,
bool search_live_stack = true,
bool sorted = false)
@@ -321,19 +334,23 @@
// Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
// The boolean decides whether to use IsAssignableFrom or == when comparing classes.
- void CountInstances(const std::vector<mirror::Class*>& classes,
+ void CountInstances(const std::vector<Handle<mirror::Class>>& classes,
bool use_is_assignable_from,
uint64_t* counts)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+
// Implements JDWP RT_Instances.
- void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
+ void GetInstances(Handle<mirror::Class> c,
+ int32_t max_count,
+ std::vector<ObjPtr<mirror::Object>>& instances)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+
// Implements JDWP OR_ReferringObjects.
- void GetReferringObjects(mirror::Object* o,
+ void GetReferringObjects(ObjPtr<mirror::Object> o,
int32_t max_count,
- std::vector<mirror::Object*>& referring_objects)
+ std::vector<ObjPtr<mirror::Object>>& referring_objects)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -445,16 +462,14 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// Write barrier for array operations that update many field positions
- ALWAYS_INLINE void WriteBarrierArray(const mirror::Object* dst,
- int start_offset ATTRIBUTE_UNUSED,
+ ALWAYS_INLINE void WriteBarrierArray(ObjPtr<mirror::Object> dst,
+ int start_offset,
// TODO: element_count or byte_count?
- size_t length ATTRIBUTE_UNUSED) {
- card_table_->MarkCard(dst);
- }
+ size_t length)
+ REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
- card_table_->MarkCard(obj);
- }
+ ALWAYS_INLINE void WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj)
+ REQUIRES_SHARED(Locks::mutator_lock_);
accounting::CardTable* GetCardTable() const {
return card_table_.get();
@@ -464,7 +479,7 @@
return rb_table_.get();
}
- void AddFinalizerReference(Thread* self, mirror::Object** object);
+ void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
// Returns the number of bytes currently allocated.
size_t GetBytesAllocated() const {
@@ -527,12 +542,20 @@
// get the space that corresponds to an object's address. Current implementation searches all
// spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
// TODO: consider using faster data structure like binary tree.
- space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const
+ space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, bool fail_ok) const
REQUIRES_SHARED(Locks::mutator_lock_);
- space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
+
+ space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object>,
bool fail_ok) const
REQUIRES_SHARED(Locks::mutator_lock_);
- space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const
+
+ space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ space::Space* FindSpaceFromAddress(const void* ptr) const
REQUIRES_SHARED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
@@ -598,7 +621,7 @@
return boot_image_spaces_;
}
- bool ObjectIsInBootImageSpace(mirror::Object* obj) const
+ bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsInBootImageOatFile(const void* p) const
@@ -650,12 +673,6 @@
void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
- // Dump object should only be used by the signal handler.
- void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
- // Safe version of pretty type of which check to make sure objects are heap addresses.
- std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
- std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
-
// GC performance measuring
void DumpGcPerformanceInfo(std::ostream& os)
REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
@@ -837,11 +854,11 @@
collector_type == kCollectorTypeMC ||
collector_type == kCollectorTypeHomogeneousSpaceCompact;
}
- bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
+ bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
size_t new_num_bytes_allocated,
- mirror::Object** obj)
+ ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
@@ -852,7 +869,7 @@
// We don't force this to be inlined since it is a slow path.
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocLargeObject(Thread* self,
- mirror::Class** klass,
+ ObjPtr<mirror::Class>* klass,
size_t byte_count,
const PreFenceVisitor& pre_fence_visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -867,14 +884,14 @@
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
- mirror::Class** klass)
+ ObjPtr<mirror::Class>* klass)
REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Allocate into a specific space.
mirror::Object* AllocateInto(Thread* self,
space::AllocSpace* space,
- mirror::Class* c,
+ ObjPtr<mirror::Class> c,
size_t bytes)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -899,10 +916,6 @@
template <bool kGrow>
ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
- // Returns true if the address passed in is within the address range of a continuous space.
- bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
void RunFinalization(JNIEnv* env, uint64_t timeout);
@@ -914,7 +927,7 @@
void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
REQUIRES(!*pending_task_lock_);
- void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj)
+ void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*pending_task_lock_);
bool IsGCRequestPending() const;
@@ -986,13 +999,13 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// Push an object onto the allocation stack.
- void PushOnAllocationStack(Thread* self, mirror::Object** obj)
+ void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
- void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
+ void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
- void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
+ void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
@@ -1023,7 +1036,7 @@
void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
// GC stress mode attempts to do one GC per unique backtrace.
- void CheckGcStressMode(Thread* self, mirror::Object** obj)
+ void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);