Begin migration of art::Atomic to std::atomic.
Change-Id: I4858d9cbed95e5ca560956b9dabd976cebe68333
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index f3ed8d3..979970c 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -46,8 +46,8 @@
void Reset() {
DCHECK(mem_map_.get() != NULL);
DCHECK(begin_ != NULL);
- front_index_ = 0;
- back_index_ = 0;
+ front_index_.StoreRelaxed(0);
+ back_index_.StoreRelaxed(0);
debug_is_sorted_ = true;
int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED);
if (result == -1) {
@@ -64,12 +64,12 @@
}
int32_t index;
do {
- index = back_index_;
+ index = back_index_.LoadRelaxed();
if (UNLIKELY(static_cast<size_t>(index) >= capacity_)) {
// Stack overflow.
return false;
}
- } while (!back_index_.CompareAndSwap(index, index + 1));
+ } while (!back_index_.CompareExchangeWeakRelaxed(index, index + 1));
begin_[index] = value;
return true;
}
@@ -83,13 +83,13 @@
int32_t index;
int32_t new_index;
do {
- index = back_index_;
+ index = back_index_.LoadRelaxed();
new_index = index + num_slots;
if (UNLIKELY(static_cast<size_t>(new_index) >= capacity_)) {
// Stack overflow.
return false;
}
- } while (!back_index_.CompareAndSwap(index, new_index));
+ } while (!back_index_.CompareExchangeWeakRelaxed(index, new_index));
*start_address = &begin_[index];
*end_address = &begin_[new_index];
if (kIsDebugBuild) {
@@ -114,31 +114,31 @@
if (kIsDebugBuild) {
debug_is_sorted_ = false;
}
- int32_t index = back_index_;
+ int32_t index = back_index_.LoadRelaxed();
DCHECK_LT(static_cast<size_t>(index), capacity_);
- back_index_ = index + 1;
+ back_index_.StoreRelaxed(index + 1);
begin_[index] = value;
}
T PopBack() {
- DCHECK_GT(back_index_, front_index_);
+ DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed());
// Decrement the back index non atomically.
- back_index_ = back_index_ - 1;
- return begin_[back_index_];
+ back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1);
+ return begin_[back_index_.LoadRelaxed()];
}
// Take an item from the front of the stack.
T PopFront() {
- int32_t index = front_index_;
- DCHECK_LT(index, back_index_.Load());
- front_index_ = front_index_ + 1;
+ int32_t index = front_index_.LoadRelaxed();
+ DCHECK_LT(index, back_index_.LoadRelaxed());
+ front_index_.StoreRelaxed(index + 1);
return begin_[index];
}
// Pop a number of elements.
void PopBackCount(int32_t n) {
DCHECK_GE(Size(), static_cast<size_t>(n));
- back_index_.FetchAndSub(n);
+ back_index_.FetchAndSubSequentiallyConsistent(n);
}
bool IsEmpty() const {
@@ -146,16 +146,16 @@
}
size_t Size() const {
- DCHECK_LE(front_index_, back_index_);
- return back_index_ - front_index_;
+ DCHECK_LE(front_index_.LoadRelaxed(), back_index_.LoadRelaxed());
+ return back_index_.LoadRelaxed() - front_index_.LoadRelaxed();
}
T* Begin() const {
- return const_cast<T*>(begin_ + front_index_);
+ return const_cast<T*>(begin_ + front_index_.LoadRelaxed());
}
T* End() const {
- return const_cast<T*>(begin_ + back_index_);
+ return const_cast<T*>(begin_ + back_index_.LoadRelaxed());
}
size_t Capacity() const {
@@ -169,11 +169,11 @@
}
void Sort() {
- int32_t start_back_index = back_index_.Load();
- int32_t start_front_index = front_index_.Load();
+ int32_t start_back_index = back_index_.LoadRelaxed();
+ int32_t start_front_index = front_index_.LoadRelaxed();
std::sort(Begin(), End());
- CHECK_EQ(start_back_index, back_index_.Load());
- CHECK_EQ(start_front_index, front_index_.Load());
+ CHECK_EQ(start_back_index, back_index_.LoadRelaxed());
+ CHECK_EQ(start_front_index, front_index_.LoadRelaxed());
if (kIsDebugBuild) {
debug_is_sorted_ = true;
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index cc258f5..43331c3 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -99,9 +99,10 @@
: GarbageCollector(heap,
name_prefix +
(is_concurrent ? "concurrent mark sweep": "mark sweep")),
+ current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr),
gc_barrier_(new Barrier(0)),
mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
- is_concurrent_(is_concurrent) {
+ is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
}
void MarkSweep::InitializePhase() {
@@ -109,19 +110,19 @@
mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
- class_count_ = 0;
- array_count_ = 0;
- other_count_ = 0;
- large_object_test_ = 0;
- large_object_mark_ = 0;
- overhead_time_ = 0;
- work_chunks_created_ = 0;
- work_chunks_deleted_ = 0;
- reference_count_ = 0;
- mark_null_count_ = 0;
- mark_immune_count_ = 0;
- mark_fastpath_count_ = 0;
- mark_slowpath_count_ = 0;
+ class_count_.StoreRelaxed(0);
+ array_count_.StoreRelaxed(0);
+ other_count_.StoreRelaxed(0);
+ large_object_test_.StoreRelaxed(0);
+ large_object_mark_.StoreRelaxed(0);
+ overhead_time_ .StoreRelaxed(0);
+ work_chunks_created_.StoreRelaxed(0);
+ work_chunks_deleted_.StoreRelaxed(0);
+ reference_count_.StoreRelaxed(0);
+ mark_null_count_.StoreRelaxed(0);
+ mark_immune_count_.StoreRelaxed(0);
+ mark_fastpath_count_.StoreRelaxed(0);
+ mark_slowpath_count_.StoreRelaxed(0);
{
// TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -596,7 +597,7 @@
if (kUseFinger) {
android_memory_barrier();
if (reinterpret_cast<uintptr_t>(ref) >=
- static_cast<uintptr_t>(mark_sweep_->atomic_finger_)) {
+ static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
return;
}
}
@@ -881,7 +882,7 @@
// This function does not handle heap end increasing, so we must use the space end.
uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF);
+ atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
// Create a few worker tasks.
const size_t n = thread_count * 2;
@@ -1214,7 +1215,9 @@
thread_pool->Wait(self, true, true);
thread_pool->StopWorkers(self);
mark_stack_->Reset();
- CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
+ CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
+ work_chunks_deleted_.LoadSequentiallyConsistent())
+ << " some of the work chunks were leaked";
}
// Scan anything that's on the mark stack.
@@ -1269,24 +1272,27 @@
void MarkSweep::FinishPhase() {
TimingLogger::ScopedSplit split("FinishPhase", &timings_);
if (kCountScannedTypes) {
- VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
- << " other=" << other_count_;
+ VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
+ << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
}
if (kCountTasks) {
- VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
+ VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
}
if (kMeasureOverhead) {
- VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
+ VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
}
if (kProfileLargeObjects) {
- VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
+ VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
+ << " marked " << large_object_mark_.LoadRelaxed();
}
if (kCountJavaLangRefs) {
- VLOG(gc) << "References scanned " << reference_count_;
+ VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed();
}
if (kCountMarkedObjects) {
- VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_
- << " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_;
+ VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
+ << " immune=" << mark_immune_count_.LoadRelaxed()
+ << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
+ << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
}
CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty.
mark_stack_->Reset();
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index e9a3c3a..d73bf3f 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -305,14 +305,14 @@
AtomicInteger mark_fastpath_count_;
AtomicInteger mark_slowpath_count_;
- // Verification.
- size_t live_stack_freeze_size_;
-
std::unique_ptr<Barrier> gc_barrier_;
Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_);
const bool is_concurrent_;
+ // Verification.
+ size_t live_stack_freeze_size_;
+
private:
friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
friend class CardScanTask;
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 7cee5a0..03b72b6 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -96,7 +96,7 @@
CHECK_LE(obj->SizeOf(), usable_size);
}
const size_t new_num_bytes_allocated =
- static_cast<size_t>(num_bytes_allocated_.FetchAndAdd(bytes_allocated)) + bytes_allocated;
+ static_cast<size_t>(num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated)) + bytes_allocated;
// TODO: Deprecate.
if (kInstrumented) {
if (Runtime::Current()->HasStatsEnabled()) {
@@ -264,7 +264,7 @@
// Only if the allocation succeeded, record the time.
if (allocated_obj != nullptr) {
uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
- heap_->total_allocation_time_.FetchAndAdd(allocation_end_time - allocation_start_time_);
+ heap_->total_allocation_time_.FetchAndAddSequentiallyConsistent(allocation_end_time - allocation_start_time_);
}
}
};
@@ -279,7 +279,7 @@
template <bool kGrow>
inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) {
- size_t new_footprint = num_bytes_allocated_ + alloc_size;
+ size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size;
if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
if (UNLIKELY(new_footprint > growth_limit_)) {
return true;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 706d1de..29e8383 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -292,7 +292,7 @@
}
// TODO: Count objects in the image space here.
- num_bytes_allocated_ = 0;
+ num_bytes_allocated_.StoreRelaxed(0);
// Default mark stack size in bytes.
static const size_t default_mark_stack_size = 64 * KB;
@@ -658,13 +658,13 @@
void Heap::RegisterGCAllocation(size_t bytes) {
if (this != nullptr) {
- gc_memory_overhead_.FetchAndAdd(bytes);
+ gc_memory_overhead_.FetchAndAddSequentiallyConsistent(bytes);
}
}
void Heap::RegisterGCDeAllocation(size_t bytes) {
if (this != nullptr) {
- gc_memory_overhead_.FetchAndSub(bytes);
+ gc_memory_overhead_.FetchAndSubSequentiallyConsistent(bytes);
}
}
@@ -699,7 +699,8 @@
}
collector->ResetMeasurements();
}
- uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
+ uint64_t allocation_time =
+ static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
if (total_duration != 0) {
const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
@@ -719,7 +720,7 @@
}
os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
- os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_;
+ os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_.LoadRelaxed();
BaseMutex::DumpAll(os);
}
@@ -1021,7 +1022,7 @@
return;
}
// Ignore early dawn of the universe verifications.
- if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.Load()) < 10 * KB)) {
+ if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
return;
}
CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
@@ -1052,9 +1053,9 @@
// Use signed comparison since freed bytes can be negative when background compaction foreground
// transitions occurs. This is caused by the moving objects from a bump pointer space to a
// free list backed space typically increasing memory footprint due to padding and binning.
- DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.Load()));
+ DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
// Note: This relies on 2s complement for handling negative freed_bytes.
- num_bytes_allocated_.FetchAndSub(static_cast<ssize_t>(freed_bytes));
+ num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
if (Runtime::Current()->HasStatsEnabled()) {
RuntimeStats* thread_stats = Thread::Current()->GetStats();
thread_stats->freed_objects += freed_objects;
@@ -1312,7 +1313,7 @@
VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
<< " -> " << static_cast<int>(collector_type);
uint64_t start_time = NanoTime();
- uint32_t before_allocated = num_bytes_allocated_.Load();
+ uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
ThreadList* tl = Runtime::Current()->GetThreadList();
Thread* self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
@@ -1390,7 +1391,7 @@
uint64_t duration = NanoTime() - start_time;
GrowForUtilization(semi_space_collector_);
FinishGC(self, collector::kGcTypeFull);
- int32_t after_allocated = num_bytes_allocated_.Load();
+ int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
int32_t delta_allocated = before_allocated - after_allocated;
LOG(INFO) << "Heap transition to " << process_state_ << " took "
<< PrettyDuration(duration) << " saved at least " << PrettySize(delta_allocated);
@@ -2421,7 +2422,7 @@
}
void Heap::UpdateMaxNativeFootprint() {
- size_t native_size = native_bytes_allocated_;
+ size_t native_size = native_bytes_allocated_.LoadRelaxed();
// TODO: Tune the native heap utilization to be a value other than the java heap utilization.
size_t target_size = native_size / GetTargetHeapUtilization();
if (target_size > native_size + max_free_) {
@@ -2693,21 +2694,22 @@
native_need_to_run_finalization_ = false;
}
// Total number of native bytes allocated.
- native_bytes_allocated_.FetchAndAdd(bytes);
- if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
+ size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
+ new_native_bytes_allocated += bytes;
+ if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
collector::kGcTypeFull;
// The second watermark is higher than the gc watermark. If you hit this it means you are
// allocating native objects faster than the GC can keep up with.
- if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
+ if (new_native_bytes_allocated > native_footprint_limit_) {
if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
// Just finished a GC, attempt to run finalizers.
RunFinalization(env);
CHECK(!env->ExceptionCheck());
}
// If we still are over the watermark, attempt a GC for alloc and run finalizers.
- if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
+ if (new_native_bytes_allocated > native_footprint_limit_) {
CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
RunFinalization(env);
native_need_to_run_finalization_ = false;
@@ -2729,7 +2731,7 @@
void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
int expected_size, new_size;
do {
- expected_size = native_bytes_allocated_.Load();
+ expected_size = native_bytes_allocated_.LoadRelaxed();
new_size = expected_size - bytes;
if (UNLIKELY(new_size < 0)) {
ScopedObjectAccess soa(env);
@@ -2738,7 +2740,7 @@
"registered as allocated", bytes, expected_size).c_str());
break;
}
- } while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size));
+ } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size, new_size));
}
size_t Heap::GetTotalMemory() const {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index eea2879..46d1268 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -373,7 +373,7 @@
// Returns the number of bytes currently allocated.
size_t GetBytesAllocated() const {
- return num_bytes_allocated_;
+ return num_bytes_allocated_.LoadSequentiallyConsistent();
}
// Returns the number of objects currently allocated.
@@ -409,7 +409,7 @@
// Implements java.lang.Runtime.freeMemory.
size_t GetFreeMemory() const {
- return GetTotalMemory() - num_bytes_allocated_;
+ return GetTotalMemory() - num_bytes_allocated_.LoadSequentiallyConsistent();
}
// get the space that corresponds to an object's address. Current implementation searches all
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 497a61f..71c295e 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -48,8 +48,8 @@
end_ += num_bytes;
*bytes_allocated = num_bytes;
// Use the CAS free versions as an optimization.
- objects_allocated_ = objects_allocated_ + 1;
- bytes_allocated_ = bytes_allocated_ + num_bytes;
+ objects_allocated_.StoreRelaxed(objects_allocated_.LoadRelaxed() + 1);
+ bytes_allocated_.StoreRelaxed(bytes_allocated_.LoadRelaxed() + num_bytes);
if (UNLIKELY(usable_size != nullptr)) {
*usable_size = num_bytes;
}
@@ -76,8 +76,8 @@
inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
mirror::Object* ret = AllocNonvirtualWithoutAccounting(num_bytes);
if (ret != nullptr) {
- objects_allocated_.FetchAndAdd(1);
- bytes_allocated_.FetchAndAdd(num_bytes);
+ objects_allocated_.FetchAndAddSequentiallyConsistent(1);
+ bytes_allocated_.FetchAndAddSequentiallyConsistent(num_bytes);
}
return ret;
}
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index fcd772b..fd0a92d 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -68,8 +68,8 @@
// Reset the end of the space back to the beginning, we move the end forward as we allocate
// objects.
SetEnd(Begin());
- objects_allocated_ = 0;
- bytes_allocated_ = 0;
+ objects_allocated_.StoreRelaxed(0);
+ bytes_allocated_.StoreRelaxed(0);
growth_end_ = Limit();
{
MutexLock mu(Thread::Current(), block_lock_);
@@ -204,7 +204,7 @@
uint64_t BumpPointerSpace::GetBytesAllocated() {
// Start out pre-determined amount (blocks which are not being allocated into).
- uint64_t total = static_cast<uint64_t>(bytes_allocated_.Load());
+ uint64_t total = static_cast<uint64_t>(bytes_allocated_.LoadRelaxed());
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
@@ -222,7 +222,7 @@
uint64_t BumpPointerSpace::GetObjectsAllocated() {
// Start out pre-determined amount (blocks which are not being allocated into).
- uint64_t total = static_cast<uint64_t>(objects_allocated_.Load());
+ uint64_t total = static_cast<uint64_t>(objects_allocated_.LoadRelaxed());
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
@@ -239,8 +239,8 @@
}
void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
- objects_allocated_.FetchAndAdd(thread->GetThreadLocalObjectsAllocated());
- bytes_allocated_.FetchAndAdd(thread->GetThreadLocalBytesAllocated());
+ objects_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalObjectsAllocated());
+ bytes_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalBytesAllocated());
thread->SetTlab(nullptr, nullptr);
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 5036095..335df69 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -239,7 +239,7 @@
*error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
return nullptr;
}
- uint32_t bitmap_index = bitmap_index_.FetchAndAdd(1);
+ uint32_t bitmap_index = bitmap_index_.FetchAndAddSequentiallyConsistent(1);
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_filename,
bitmap_index));
std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 0466413..fb3a12e 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -115,7 +115,7 @@
// Need to mark the card since this will update the mod-union table next GC cycle.
card_table->MarkCard(ptrs[i]);
}
- zygote_space->objects_allocated_.FetchAndSub(num_ptrs);
+ zygote_space->objects_allocated_.FetchAndSubSequentiallyConsistent(num_ptrs);
}
} // namespace space
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 50fc62b..5d5fe76 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -65,7 +65,7 @@
}
uint64_t GetObjectsAllocated() {
- return objects_allocated_;
+ return objects_allocated_.LoadSequentiallyConsistent();
}
void Clear() OVERRIDE;