Refactor allocation entrypoints.
Adds support for switching entrypoints during runtime. Enables
addition of new allocators with out requiring significant copy
paste. Slight speedup on ritzperf probably due to more inlining.
TODO: Ensuring that the entire allocation path is inlined so
that the switch statement in the allocation code is optimized
out.
Rosalloc measurements:
4583
4453
4439
4434
4751
After change:
4184
4287
4131
4335
4097
Change-Id: I1352a3cbcdf6dae93921582726324d91312df5c9
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 82077dc..5f64bb4 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_CLASS_H_
#define ART_RUNTIME_MIRROR_CLASS_H_
+#include "gc/heap.h"
#include "modifiers.h"
#include "object.h"
#include "primitive.h"
@@ -377,13 +378,14 @@
}
// Creates a raw object instance but does not invoke the default constructor.
- Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return Alloc<kMovingCollector, true>(self);
- }
+ template <bool kIsInstrumented>
+ ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Creates a raw object instance but does not invoke the default constructor.
- template <bool kIsMovable, bool kIsInstrumented>
- Object* Alloc(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Object* AllocObject(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Object* AllocNonMovableObject(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsVariableSize() const {
// Classes and arrays vary in size, and so the object_size_ field cannot