Global lock levels.
Introduce the notion of the mutators/GC being a shared-exclusive (aka
reader-writer) lock. Introduce globally ordered locks, analysable by
annotalysis, statically at compile time. Add locking attributes to
methods.
More subtly, remove the heap_lock_ and split between various locks that
are held for smaller periods (where work doesn't get blocked). Remove
buggy Dalvik style thread transitions. Make GC use CMS in all cases when
concurrent is enabled. Fix bug where suspend counts rather than debug
suspend counts were sent to JDWP. Move the PathClassLoader to
WellKnownClasses. In debugger refactor calls to send request and
possibly suspend. Break apart different VmWait thread states. Move
identity hash code to a shared method.
Change-Id: Icdbfc3ce3fcccd14341860ac7305d8e97b51f5c6
diff --git a/src/oat/runtime/support_invoke.cc b/src/oat/runtime/support_invoke.cc
index 4669688..9c7b3a2 100644
--- a/src/oat/runtime/support_invoke.cc
+++ b/src/oat/runtime/support_invoke.cc
@@ -20,7 +20,8 @@
namespace art {
static uint64_t artInvokeCommon(uint32_t method_idx, Object* this_object, Method* caller_method,
- Thread* self, Method** sp, bool access_check, InvokeType type) {
+ Thread* self, Method** sp, bool access_check, InvokeType type)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Method* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
if (UNLIKELY(method == NULL)) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
@@ -54,14 +55,16 @@
// See comments in runtime_support_asm.S
extern "C" uint64_t artInvokeInterfaceTrampoline(uint32_t method_idx, Object* this_object,
Method* caller_method, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
return artInvokeCommon(method_idx, this_object, caller_method, self, sp, false, kInterface);
}
extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
Object* this_object,
Method* caller_method, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kInterface);
}
@@ -69,28 +72,32 @@
extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,
Object* this_object,
Method* caller_method, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kDirect);
}
extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
Object* this_object,
Method* caller_method, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kStatic);
}
extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
Object* this_object,
Method* caller_method, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kSuper);
}
extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
Object* this_object,
Method* caller_method, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kVirtual);
}