Merge "Relax some CASes for the CC collector."
am: 0b654bce0f
* commit '0b654bce0f8d7c596d4115848b7e4c4dbeadafd5':
Relax some CASes for the CC collector.
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 006d2c7..3be7181 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -46,7 +46,7 @@
DCHECK(Test(obj));
return true;
}
- } while (!atomic_entry->CompareExchangeWeakSequentiallyConsistent(old_word, old_word | mask));
+ } while (!atomic_entry->CompareExchangeWeakRelaxed(old_word, old_word | mask));
DCHECK(Test(obj));
return false;
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index d7e8f81..20e775c 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1664,7 +1664,7 @@
// It was updated by the mutator.
break;
}
- } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
+ } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
false, false, kVerifyNone>(offset, expected_ref, new_ref));
}
@@ -1689,7 +1689,7 @@
// It was updated by the mutator.
break;
}
- } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
+ } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
}
}
@@ -1710,7 +1710,7 @@
// It was updated by the mutator.
break;
}
- } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
+ } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
}
}
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 90180c5..5c12091 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -95,6 +95,12 @@
OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
}
+inline bool Object::CasLockWordWeakRelease(LockWord old_val, LockWord new_val) {
+ // Force use of non-transactional mode and do not check.
+ return CasFieldWeakRelease32<false, false>(
+ OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
+}
+
inline uint32_t Object::GetLockOwnerThreadId() {
return Monitor::GetLockOwnerThreadId(this);
}
@@ -175,7 +181,10 @@
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(expected_rb_ptr)));
new_lw = lw;
new_lw.SetReadBarrierState(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(rb_ptr)));
- } while (!CasLockWordWeakSequentiallyConsistent(expected_lw, new_lw));
+ // This CAS is a CAS release so that when GC updates all the fields of an object and then
+ // changes the object from gray to black, the field updates (stores) will be visible (won't be
+ // reordered after this CAS.)
+ } while (!CasLockWordWeakRelease(expected_lw, new_lw));
return true;
#elif USE_BROOKS_READ_BARRIER
DCHECK(kUseBrooksReadBarrier);
@@ -671,6 +680,24 @@
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldWeakRelease32(MemberOffset field_offset,
+ int32_t old_value, int32_t new_value) {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
+ }
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
+
+ return atomic_addr->CompareExchangeWeakRelease(old_value, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset,
int32_t old_value, int32_t new_value) {
if (kCheckTransaction) {
@@ -944,6 +971,62 @@
return success;
}
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldWeakRelaxedObjectWithoutWriteBarrier(
+ MemberOffset field_offset, Object* old_value, Object* new_value) {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(new_value);
+ }
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(old_value);
+ }
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
+ }
+ HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
+
+ bool success = atomic_addr->CompareExchangeWeakRelaxed(old_ref.reference_,
+ new_ref.reference_);
+ return success;
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldStrongRelaxedObjectWithoutWriteBarrier(
+ MemberOffset field_offset, Object* old_value, Object* new_value) {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(new_value);
+ }
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(old_value);
+ }
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
+ }
+ HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
+
+ bool success = atomic_addr->CompareExchangeStrongRelaxed(old_ref.reference_,
+ new_ref.reference_);
+ return success;
+}
+
template<bool kIsStatic, typename Visitor>
inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) {
if (!kIsStatic && (ref_offsets != mirror::Class::kClassWalkSuper)) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index f75b8ae..022f31d 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -135,6 +135,8 @@
SHARED_REQUIRES(Locks::mutator_lock_);
bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val)
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CasLockWordWeakRelease(LockWord old_val, LockWord new_val)
+ SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
mirror::Object* MonitorEnter(Thread* self)
@@ -276,7 +278,6 @@
Object* old_value,
Object* new_value)
SHARED_REQUIRES(Locks::mutator_lock_);
-
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
@@ -288,6 +289,18 @@
Object* old_value,
Object* new_value)
SHARED_REQUIRES(Locks::mutator_lock_);
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldWeakRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
+ Object* old_value,
+ Object* new_value)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
+ Object* old_value,
+ Object* new_value)
+ SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
@@ -396,6 +409,12 @@
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldWeakRelease32(MemberOffset field_offset, int32_t old_value,
+ int32_t new_value) ALWAYS_INLINE
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 85ac4aa..4998a6a 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -63,7 +63,7 @@
ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
if (ref != old_ref) {
- obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
+ obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
offset, old_ref, ref);
}
}
@@ -101,7 +101,7 @@
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
if (ref != old_ref) {
Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
- atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
+ atomic_root->CompareExchangeStrongRelaxed(old_ref, ref);
}
}
AssertToSpaceInvariant(gc_root_source, ref);
@@ -140,7 +140,7 @@
if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
auto* atomic_root =
reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
- atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
+ atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref);
}
}
AssertToSpaceInvariant(gc_root_source, ref);