Move mirror::Object setters to ObjPtr
Bug: 31113334
Test: test-art-host
Change-Id: I2c4c84645e194c3c435a4a6fd670176b0e98671f
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index ad7558c..3e7bca7 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -31,6 +31,7 @@
#include "lock_word-inl.h"
#include "monitor.h"
#include "object_array-inl.h"
+#include "object_reference-inl.h"
#include "obj_ptr-inl.h"
#include "read_barrier-inl.h"
#include "reference.h"
@@ -53,7 +54,7 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline void Object::SetClass(Class* new_klass) {
+inline void Object::SetClass(ObjPtr<Class> new_klass) {
// new_klass may be null prior to class linker initialization.
// We don't mark the card as this occurs as part of object allocation. Not all objects have
// backing cards, such as large objects.
@@ -159,7 +160,6 @@
#endif
}
-
inline uint32_t Object::GetMarkBit() {
#ifdef USE_READ_BARRIER
return GetLockWord(false).MarkBitState();
@@ -895,18 +895,18 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
- Object* new_value) {
+ ObjPtr<Object> new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
if (kTransactionActive) {
- mirror::Object* obj;
+ ObjPtr<Object> obj;
if (kIsVolatile) {
obj = GetFieldObjectVolatile<Object>(field_offset);
} else {
obj = GetFieldObject<Object>(field_offset);
}
- Runtime::Current()->RecordWriteFieldReference(this, field_offset, obj, true);
+ Runtime::Current()->RecordWriteFieldReference(this, field_offset, obj.Ptr(), true);
}
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
@@ -919,17 +919,17 @@
if (kIsVolatile) {
// TODO: Refactor to use a SequentiallyConsistent store instead.
QuasiAtomic::ThreadFenceRelease(); // Ensure that prior accesses are visible before store.
- objref_addr->Assign(new_value);
+ objref_addr->Assign(new_value.Ptr());
QuasiAtomic::ThreadFenceSequentiallyConsistent();
// Ensure this store occurs before any volatile loads.
} else {
- objref_addr->Assign(new_value);
+ objref_addr->Assign(new_value.Ptr());
}
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
-inline void Object::SetFieldObject(MemberOffset field_offset, Object* new_value) {
+inline void Object::SetFieldObject(MemberOffset field_offset, ObjPtr<Object> new_value) {
SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction, kVerifyFlags,
kIsVolatile>(field_offset, new_value);
if (new_value != nullptr) {
@@ -940,7 +940,7 @@
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline void Object::SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value) {
+inline void Object::SetFieldObjectVolatile(MemberOffset field_offset, ObjPtr<Object> new_value) {
SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(field_offset,
new_value);
}
@@ -956,7 +956,8 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset,
- Object* old_value, Object* new_value) {
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value) {
bool success = CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
kTransactionActive, kCheckTransaction, kVerifyFlags>(field_offset, old_value, new_value);
if (success) {
@@ -967,7 +968,9 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(
- MemberOffset field_offset, Object* old_value, Object* new_value) {
+ MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -983,8 +986,8 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
+ HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
@@ -995,7 +998,8 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset,
- Object* old_value, Object* new_value) {
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value) {
bool success = CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<
kTransactionActive, kCheckTransaction, kVerifyFlags>(field_offset, old_value, new_value);
if (success) {
@@ -1006,7 +1010,9 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(
- MemberOffset field_offset, Object* old_value, Object* new_value) {
+ MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -1022,8 +1028,8 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
+ HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
@@ -1034,7 +1040,9 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasFieldWeakRelaxedObjectWithoutWriteBarrier(
- MemberOffset field_offset, Object* old_value, Object* new_value) {
+ MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -1050,8 +1058,8 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
+ HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
@@ -1062,7 +1070,9 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasFieldStrongRelaxedObjectWithoutWriteBarrier(
- MemberOffset field_offset, Object* old_value, Object* new_value) {
+ MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -1078,8 +1088,8 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
+ HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 90b97fd..fbb7c96 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -199,7 +199,7 @@
UNREACHABLE();
}
-void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value) {
+void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object> new_value) {
Class* c = GetClass();
Runtime* runtime = Runtime::Current();
if (runtime->GetClassLinker() == nullptr || !runtime->IsStarted() ||
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 10faf60..9ddf995 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -92,7 +92,7 @@
ALWAYS_INLINE Class* GetClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetClass(Class* new_klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetClass(ObjPtr<Class> new_klass) REQUIRES_SHARED(Locks::mutator_lock_);
// TODO: Clean these up and change to return int32_t
Object* GetReadBarrierPointer() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -283,54 +283,69 @@
ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset)
REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
- ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value)
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ bool kIsVolatile = false>
+ ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
+ ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
- ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, Object* new_value)
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ bool kIsVolatile = false>
+ ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value)
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset,
+ ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
- Object* new_value)
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
- Object* old_value,
- Object* new_value)
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
- Object* new_value)
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
- Object* old_value,
- Object* new_value)
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
- Object* old_value,
- Object* new_value)
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<bool kTransactionActive,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
- Object* old_value,
- Object* new_value)
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -585,9 +600,9 @@
// Verify the type correctness of stores to fields.
// TODO: This can cause thread suspension and isn't moving GC safe.
- void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value)
+ void CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
- void CheckFieldAssignment(MemberOffset field_offset, Object* new_value)
+ void CheckFieldAssignment(MemberOffset field_offset, ObjPtr<Object>new_value)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckFieldAssignments) {
CheckFieldAssignmentImpl(field_offset, new_value);
diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h
new file mode 100644
index 0000000..60955d6
--- /dev/null
+++ b/runtime/mirror/object_reference-inl.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_OBJECT_REFERENCE_INL_H_
+#define ART_RUNTIME_MIRROR_OBJECT_REFERENCE_INL_H_
+
+#include "object_reference.h"
+
+#include "obj_ptr-inl.h"
+
+namespace art {
+namespace mirror {
+
+// References between objects within the managed heap.
+template<class MirrorType>
+HeapReference<MirrorType> HeapReference<MirrorType>::FromObjPtr(ObjPtr<MirrorType> ptr) {
+ return HeapReference<MirrorType>(ptr.Ptr());
+}
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_OBJECT_REFERENCE_INL_H_
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index f4a3580..573cb30 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -19,6 +19,7 @@
#include "base/mutex.h" // For Locks::mutator_lock_.
#include "globals.h"
+#include "obj_ptr.h"
namespace art {
namespace mirror {
@@ -86,11 +87,18 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
return HeapReference<MirrorType>(mirror_ptr);
}
+
+ static HeapReference<MirrorType> FromObjPtr(ObjPtr<MirrorType> ptr)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
explicit HeapReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
};
+static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
+ "heap reference size does not match");
+
// Standard compressed reference used in the runtime. Used for StackReference and GC roots.
template<class MirrorType>
class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> {