Fix GC to use art::Atomic rather than compiler intrinsics.
Changes to SpaceBitmap::AtomicTestAndSet and Space::end_. Space::end_ is made
atomic rather than volatile to fully capture all its uses multi-threaded or not
uses.
Change-Id: I3058964b8ad90a8c253b3d7f75585f63ca2fb5e3
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 7f1da79..1e9556a 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -21,6 +21,7 @@
#include <memory>
+#include "atomic.h"
#include "base/logging.h"
#include "dex_file-inl.h"
#include "heap_bitmap.h"
@@ -43,17 +44,17 @@
const uintptr_t offset = addr - heap_begin_;
const size_t index = OffsetToIndex(offset);
const uword mask = OffsetToMask(offset);
- uword* const address = &bitmap_begin_[index];
+ Atomic<uword>* atomic_entry = reinterpret_cast<Atomic<uword>*>(&bitmap_begin_[index]);
DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
uword old_word;
do {
- old_word = *address;
+ old_word = atomic_entry->LoadRelaxed();
// Fast path: The bit is already set.
if ((old_word & mask) != 0) {
DCHECK(Test(obj));
return true;
}
- } while (!__sync_bool_compare_and_swap(address, old_word, old_word | mask));
+ } while (!atomic_entry->CompareExchangeWeakSequentiallyConsistent(old_word, old_word | mask));
DCHECK(Test(obj));
return false;
}