Add thread unsafe allocation methods to spaces.
Used by SS/GSS collectors since these run with mutators suspended and
only allocate from a single thread. Added AllocThreadUnsafe to
BumpPointerSpace and RosAllocSpace. Added AllocThreadUnsafe which uses
current runs as thread local runs for a thread unsafe allocation.
Added code to revoke current runs which are the same idx as thread
local runs.
Changed:
The number of thread local runs in each thread is now the the number
of thread local runs in RosAlloc instead of the number of size
brackets.
Total GC time / time on EvaluateAndApplyChanges.
TLAB SS:
Before: 36.7s / 7254
After: 16.1s / 4837
TLAB GSS:
Before: 6.9s / 3973
After: 5.7s / 3778
Bug: 8981901
Change-Id: Id1d264ade3799f431bf7ebbdcca6146aefbeb632
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 70ab64b..497a61f 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -36,6 +36,26 @@
return ret;
}
+inline mirror::Object* BumpPointerSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
+ size_t* bytes_allocated,
+ size_t* usable_size) {
+ Locks::mutator_lock_->AssertExclusiveHeld(self);
+ num_bytes = RoundUp(num_bytes, kAlignment);
+ if (end_ + num_bytes > growth_end_) {
+ return nullptr;
+ }
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(end_);
+ end_ += num_bytes;
+ *bytes_allocated = num_bytes;
+ // Use the CAS free versions as an optimization.
+ objects_allocated_ = objects_allocated_ + 1;
+ bytes_allocated_ = bytes_allocated_ + num_bytes;
+ if (UNLIKELY(usable_size != nullptr)) {
+ *usable_size = num_bytes;
+ }
+ return obj;
+}
+
inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) {
DCHECK(IsAligned<kAlignment>(num_bytes));
byte* old_end;
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index e52a9a3..9e61f30 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -48,6 +48,11 @@
// Allocate num_bytes, returns nullptr if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size) OVERRIDE;
+ // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
+ mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size)
+ OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
mirror::Object* AllocNonvirtual(size_t num_bytes);
mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index d270885..fbfef45 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -46,11 +46,15 @@
return size_by_size;
}
+template<bool kThreadSafe>
inline mirror::Object* RosAllocSpace::AllocCommon(Thread* self, size_t num_bytes,
size_t* bytes_allocated, size_t* usable_size) {
size_t rosalloc_size = 0;
+ if (!kThreadSafe) {
+ Locks::mutator_lock_->AssertExclusiveHeld(self);
+ }
mirror::Object* result = reinterpret_cast<mirror::Object*>(
- rosalloc_->Alloc(self, num_bytes, &rosalloc_size));
+ rosalloc_->Alloc<kThreadSafe>(self, num_bytes, &rosalloc_size));
if (LIKELY(result != NULL)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index f5c0e94..a1511e7 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -159,7 +159,7 @@
}
// Note RosAlloc zeroes memory internally.
// Return the new allocation or NULL.
- CHECK(!kDebugSpaces || result == NULL || Contains(result));
+ CHECK(!kDebugSpaces || result == nullptr || Contains(result));
return result;
}
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index a156738..2934af8 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -52,6 +52,11 @@
size_t* usable_size) OVERRIDE {
return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size);
}
+ mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size)
+ OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size);
+ }
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
return AllocationSizeNonvirtual(obj, usable_size);
}
@@ -65,6 +70,11 @@
// RosAlloc zeroes memory internally.
return AllocCommon(self, num_bytes, bytes_allocated, usable_size);
}
+ mirror::Object* AllocNonvirtualThreadUnsafe(Thread* self, size_t num_bytes,
+ size_t* bytes_allocated, size_t* usable_size) {
+ // RosAlloc zeroes memory internally. Pass in false for thread unsafe.
+ return AllocCommon<false>(self, num_bytes, bytes_allocated, usable_size);
+ }
// TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
@@ -116,6 +126,7 @@
size_t starting_size, size_t initial_size, bool low_memory_mode);
private:
+ template<bool kThreadSafe = true>
mirror::Object* AllocCommon(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size);
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 0a87a16..dcf5357 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -203,9 +203,17 @@
// Allocate num_bytes without allowing growth. If the allocation
// succeeds, the output parameter bytes_allocated will be set to the
// actually allocated bytes which is >= num_bytes.
+ // Alloc can be called from multiple threads at the same time and must be thread-safe.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size) = 0;
+ // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
+ virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return Alloc(self, num_bytes, bytes_allocated, usable_size);
+ }
+
// Return the storage space required by obj.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;