Fix GC to use art::Atomic rather than compiler intrinsics.
Changes to SpaceBitmap::AtomicTestAndSet and Space::end_. Space::end_ is made
atomic rather than volatile to fully capture all its uses multi-threaded or not
uses.
Change-Id: I3058964b8ad90a8c253b3d7f75585f63ca2fb5e3
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 7f1da79..1e9556a 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -21,6 +21,7 @@
#include <memory>
+#include "atomic.h"
#include "base/logging.h"
#include "dex_file-inl.h"
#include "heap_bitmap.h"
@@ -43,17 +44,17 @@
const uintptr_t offset = addr - heap_begin_;
const size_t index = OffsetToIndex(offset);
const uword mask = OffsetToMask(offset);
- uword* const address = &bitmap_begin_[index];
+ Atomic<uword>* atomic_entry = reinterpret_cast<Atomic<uword>*>(&bitmap_begin_[index]);
DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
uword old_word;
do {
- old_word = *address;
+ old_word = atomic_entry->LoadRelaxed();
// Fast path: The bit is already set.
if ((old_word & mask) != 0) {
DCHECK(Test(obj));
return true;
}
- } while (!__sync_bool_compare_and_swap(address, old_word, old_word | mask));
+ } while (!atomic_entry->CompareExchangeWeakSequentiallyConsistent(old_word, old_word | mask));
DCHECK(Test(obj));
return false;
}
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 27fb087..6d1ba87 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -60,17 +60,17 @@
// <offset> is the difference from .base to a pointer address.
// <index> is the index of .bits that contains the bit representing
// <offset>.
- static size_t OffsetToIndex(size_t offset) ALWAYS_INLINE {
+ static constexpr size_t OffsetToIndex(size_t offset) {
return offset / kAlignment / kBitsPerWord;
}
template<typename T>
- static T IndexToOffset(T index) {
+ static constexpr T IndexToOffset(T index) {
return static_cast<T>(index * kAlignment * kBitsPerWord);
}
// Bits are packed in the obvious way.
- static uword OffsetToMask(uintptr_t offset) ALWAYS_INLINE {
+ static constexpr uword OffsetToMask(uintptr_t offset) {
return (static_cast<size_t>(1)) << ((offset / kAlignment) % kBitsPerWord);
}