Move GC daemon locking logic into heap

Fixes deadlock caused by acquirng the mutator lock while
synchronizing on the daemon thread.

Bug: 18739541
Change-Id: I925b8f0f3b58178da6eff17b9c073f655c39597b
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 10fe64e..d420500 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -142,6 +142,7 @@
       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
       zygote_space_(nullptr),
       large_object_threshold_(large_object_threshold),
+      gc_request_pending_(false),
       collector_type_running_(kCollectorTypeNone),
       last_gc_type_(collector::kGcTypeNone),
       next_gc_type_(collector::kGcTypePartial),
@@ -409,6 +410,8 @@
   gc_complete_lock_ = new Mutex("GC complete lock");
   gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
                                                 *gc_complete_lock_));
+  gc_request_lock_ = new Mutex("GC request lock");
+  gc_request_cond_.reset(new ConditionVariable("GC request condition variable", *gc_request_lock_));
   heap_trim_request_lock_ = new Mutex("Heap trim request lock");
   last_gc_size_ = GetBytesAllocated();
   if (ignore_max_footprint_) {
@@ -3038,12 +3041,7 @@
       self->IsHandlingStackOverflow()) {
     return;
   }
-  JNIEnv* env = self->GetJniEnv();
-  DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
-  DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
-  env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
-                            WellKnownClasses::java_lang_Daemons_requestGC);
-  CHECK(!env->ExceptionCheck());
+  NotifyConcurrentGCRequest(self);
 }
 
 void Heap::ConcurrentGC(Thread* self) {
@@ -3276,5 +3274,21 @@
   }
 }
 
+void Heap::WaitForConcurrentGCRequest(Thread* self) {
+  ScopedThreadStateChange tsc(self, kBlocked);
+  MutexLock mu(self, *gc_request_lock_);
+  while (!gc_request_pending_) {
+    gc_request_cond_->Wait(self);
+  }
+  gc_request_pending_ = false;
+}
+
+void Heap::NotifyConcurrentGCRequest(Thread* self) {
+  ScopedThreadStateChange tsc(self, kBlocked);
+  MutexLock mu(self, *gc_request_lock_);
+  gc_request_pending_ = true;
+  gc_request_cond_->Signal(self);
+}
+
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 4e1a0ff..529af95 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -611,6 +611,9 @@
     return zygote_space_ != nullptr;
   }
 
+  void WaitForConcurrentGCRequest(Thread* self) LOCKS_EXCLUDED(gc_request_lock_);
+  void NotifyConcurrentGCRequest(Thread* self) LOCKS_EXCLUDED(gc_request_lock_);
+
  private:
   // Compact source space to target space.
   void Compact(space::ContinuousMemMapAllocSpace* target_space,
@@ -874,6 +877,11 @@
   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
 
+  // Guards concurrent GC requests.
+  Mutex* gc_request_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  std::unique_ptr<ConditionVariable> gc_request_cond_ GUARDED_BY(gc_request_lock_);
+  bool gc_request_pending_ GUARDED_BY(gc_request_lock_);
+
   // Reference processor;
   ReferenceProcessor reference_processor_;