Move startup thread pool back into runtime

Added logic in heap trim to delete the thread pool, if there are no
active users.

Added a scoped accessor to prevent ref counting errors.

Motivation, have workers already created when the app images are
loaded.

Bug: 116052292
Test: test-art-host
Change-Id: I8ea776d74e88601222a9989e0c6dac34cf77c683
diff --git a/runtime/base/locks.cc b/runtime/base/locks.cc
index cfc9f1d..a7922a2 100644
--- a/runtime/base/locks.cc
+++ b/runtime/base/locks.cc
@@ -61,6 +61,7 @@
 Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
 Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
 Mutex* Locks::runtime_shutdown_lock_ = nullptr;
+Mutex* Locks::runtime_thread_pool_lock_ = nullptr;
 Mutex* Locks::cha_lock_ = nullptr;
 Mutex* Locks::subtype_check_lock_ = nullptr;
 Mutex* Locks::thread_list_lock_ = nullptr;
@@ -154,6 +155,7 @@
     DCHECK(user_code_suspension_lock_ != nullptr);
     DCHECK(dex_lock_ != nullptr);
     DCHECK(native_debug_interface_lock_ != nullptr);
+    DCHECK(runtime_thread_pool_lock_ != nullptr);
   } else {
     // Create global locks in level order from highest lock level to lowest.
     LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -189,6 +191,10 @@
     DCHECK(runtime_shutdown_lock_ == nullptr);
     runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
 
+    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeThreadPoolLock);
+    DCHECK(runtime_thread_pool_lock_ == nullptr);
+    runtime_thread_pool_lock_ = new Mutex("runtime thread pool lock", current_lock_level);
+
     UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
     DCHECK(profiler_lock_ == nullptr);
     profiler_lock_ = new Mutex("profiler lock", current_lock_level);