Don't use fences to implement volatiles
Mixing the fence-based implementation with acquire/release instructions
on ARMv8 is not just ugly but incorrect. A volatile store; volatile
load sequence implemented as a release store followed by ld; dmb
does not prevent reordering.
This should remove the last places we were using fences to implement
volatiles.
The HeapReference representation is changed to be an Atomic,
thereby avoiding many casts. We no longer inherit from ObjectReference,
which was documented to be a value type. HeapReference is not, since
it contains an atomic.
Disentangle HeapReference and ObjectReference/CompressedReference
uses sufficiently to get the code to compile again. They were
previously used somewhat interchangably in a few places, in spite
of the different intended semantics (value-type vs. a concurrently-
updateable field). Further disentanglement might be useful.
Flag a strange fence use I haven't yet understood.
Test: Booted AOSP. Ran default tests. Some object code inspection.
Bug: 31023171
Test: Built AOSP
Change-Id: I7b3c3e624f480994541c8e3a79e585071c122a3d
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 086925b..71596c9 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -724,11 +724,10 @@
}
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
HeapReference<T>* objref_addr = reinterpret_cast<HeapReference<T>*>(raw_addr);
- T* result = ReadBarrier::Barrier<T, kReadBarrierOption>(this, field_offset, objref_addr);
- if (kIsVolatile) {
- // TODO: Refactor to use a SequentiallyConsistent load instead.
- QuasiAtomic::ThreadFenceAcquire(); // Ensure visibility of operations preceding store.
- }
+ T* result = ReadBarrier::Barrier<T, kIsVolatile, kReadBarrierOption>(
+ this,
+ field_offset,
+ objref_addr);
if (kVerifyFlags & kVerifyReads) {
VerifyObject(result);
}
@@ -764,15 +763,7 @@
}
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
HeapReference<Object>* objref_addr = reinterpret_cast<HeapReference<Object>*>(raw_addr);
- if (kIsVolatile) {
- // TODO: Refactor to use a SequentiallyConsistent store instead.
- QuasiAtomic::ThreadFenceRelease(); // Ensure that prior accesses are visible before store.
- objref_addr->Assign(new_value.Ptr());
- QuasiAtomic::ThreadFenceSequentiallyConsistent();
- // Ensure this store occurs before any volatile loads.
- } else {
- objref_addr->Assign(new_value.Ptr());
- }
+ objref_addr->Assign<kIsVolatile>(new_value.Ptr());
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
@@ -843,13 +834,12 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
+ uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_ref.reference_,
- new_ref.reference_);
+ bool success = atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_ref, new_ref);
return success;
}
@@ -885,13 +875,12 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
+ uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_ref.reference_,
- new_ref.reference_);
+ bool success = atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
return success;
}
@@ -915,13 +904,12 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
+ uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeWeakRelaxed(old_ref.reference_,
- new_ref.reference_);
+ bool success = atomic_addr->CompareExchangeWeakRelaxed(old_ref, new_ref);
return success;
}
@@ -945,13 +933,12 @@
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- HeapReference<Object> old_ref(HeapReference<Object>::FromObjPtr(old_value));
- HeapReference<Object> new_ref(HeapReference<Object>::FromObjPtr(new_value));
+ uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
+ uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- bool success = atomic_addr->CompareExchangeWeakRelease(old_ref.reference_,
- new_ref.reference_);
+ bool success = atomic_addr->CompareExchangeWeakRelease(old_ref, new_ref);
return success;
}