Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "jit_code_cache.h" |
| 18 | |
| 19 | #include <sstream> |
| 20 | |
Andreas Gampe | c7d878d | 2018-11-19 18:42:06 +0000 | [diff] [blame] | 21 | #include <android-base/logging.h> |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 22 | |
Andreas Gampe | 5629d2d | 2017-05-15 16:28:13 -0700 | [diff] [blame] | 23 | #include "arch/context.h" |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 24 | #include "art_method-inl.h" |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 25 | #include "base/enums.h" |
Andreas Gampe | f0f3c59 | 2018-06-26 13:28:00 -0700 | [diff] [blame] | 26 | #include "base/histogram-inl.h" |
Andreas Gampe | 170331f | 2017-12-07 18:41:03 -0800 | [diff] [blame] | 27 | #include "base/logging.h" // For VLOG. |
Orion Hodson | 563ada2 | 2018-09-04 11:28:31 +0100 | [diff] [blame] | 28 | #include "base/membarrier.h" |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 29 | #include "base/memfd.h" |
David Sehr | 79e2607 | 2018-04-06 17:58:50 -0700 | [diff] [blame] | 30 | #include "base/mem_map.h" |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 31 | #include "base/quasi_atomic.h" |
Calin Juravle | 66f5523 | 2015-12-08 15:09:10 +0000 | [diff] [blame] | 32 | #include "base/stl_util.h" |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 33 | #include "base/systrace.h" |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 34 | #include "base/time_utils.h" |
Orion Hodson | f233136 | 2018-07-11 15:14:10 +0100 | [diff] [blame] | 35 | #include "base/utils.h" |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 36 | #include "cha.h" |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 37 | #include "debugger_interface.h" |
David Sehr | 9e734c7 | 2018-01-04 17:56:19 -0800 | [diff] [blame] | 38 | #include "dex/dex_file_loader.h" |
Andreas Gampe | f0f3c59 | 2018-06-26 13:28:00 -0700 | [diff] [blame] | 39 | #include "dex/method_reference.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 40 | #include "entrypoints/runtime_asm_entrypoints.h" |
| 41 | #include "gc/accounting/bitmap-inl.h" |
Andreas Gampe | 88dbad3 | 2018-06-26 19:54:12 -0700 | [diff] [blame] | 42 | #include "gc/allocator/dlmalloc.h" |
Nicolas Geoffray | cf48fa0 | 2016-07-30 22:49:11 +0100 | [diff] [blame] | 43 | #include "gc/scoped_gc_critical_section.h" |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 44 | #include "handle.h" |
Andreas Gampe | f0f3c59 | 2018-06-26 13:28:00 -0700 | [diff] [blame] | 45 | #include "instrumentation.h" |
Andreas Gampe | b2d18fa | 2017-06-06 20:46:10 -0700 | [diff] [blame] | 46 | #include "intern_table.h" |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 47 | #include "jit/jit.h" |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 48 | #include "jit/profiling_info.h" |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 49 | #include "jit/jit_scoped_code_cache_write.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 50 | #include "linear_alloc.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 51 | #include "oat_file-inl.h" |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 52 | #include "oat_quick_method_header.h" |
Andreas Gampe | 5d08fcc | 2017-06-05 17:56:46 -0700 | [diff] [blame] | 53 | #include "object_callbacks.h" |
David Sehr | 82d046e | 2018-04-23 08:14:19 -0700 | [diff] [blame] | 54 | #include "profile/profile_compilation_info.h" |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 55 | #include "scoped_thread_state_change-inl.h" |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 56 | #include "stack.h" |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 57 | #include "thread-current-inl.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 58 | #include "thread_list.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 59 | |
| 60 | namespace art { |
| 61 | namespace jit { |
| 62 | |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 63 | static constexpr size_t kCodeSizeLogThreshold = 50 * KB; |
| 64 | static constexpr size_t kStackMapSizeLogThreshold = 50 * KB; |
| 65 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 66 | class JitCodeCache::JniStubKey { |
| 67 | public: |
| 68 | explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) |
| 69 | : shorty_(method->GetShorty()), |
| 70 | is_static_(method->IsStatic()), |
| 71 | is_fast_native_(method->IsFastNative()), |
| 72 | is_critical_native_(method->IsCriticalNative()), |
| 73 | is_synchronized_(method->IsSynchronized()) { |
| 74 | DCHECK(!(is_fast_native_ && is_critical_native_)); |
| 75 | } |
| 76 | |
| 77 | bool operator<(const JniStubKey& rhs) const { |
| 78 | if (is_static_ != rhs.is_static_) { |
| 79 | return rhs.is_static_; |
| 80 | } |
| 81 | if (is_synchronized_ != rhs.is_synchronized_) { |
| 82 | return rhs.is_synchronized_; |
| 83 | } |
| 84 | if (is_fast_native_ != rhs.is_fast_native_) { |
| 85 | return rhs.is_fast_native_; |
| 86 | } |
| 87 | if (is_critical_native_ != rhs.is_critical_native_) { |
| 88 | return rhs.is_critical_native_; |
| 89 | } |
| 90 | return strcmp(shorty_, rhs.shorty_) < 0; |
| 91 | } |
| 92 | |
| 93 | // Update the shorty to point to another method's shorty. Call this function when removing |
| 94 | // the method that references the old shorty from JniCodeData and not removing the entire |
| 95 | // JniCodeData; the old shorty may become a dangling pointer when that method is unloaded. |
| 96 | void UpdateShorty(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) { |
| 97 | const char* shorty = method->GetShorty(); |
| 98 | DCHECK_STREQ(shorty_, shorty); |
| 99 | shorty_ = shorty; |
| 100 | } |
| 101 | |
| 102 | private: |
| 103 | // The shorty points to a DexFile data and may need to change |
| 104 | // to point to the same shorty in a different DexFile. |
| 105 | mutable const char* shorty_; |
| 106 | |
| 107 | const bool is_static_; |
| 108 | const bool is_fast_native_; |
| 109 | const bool is_critical_native_; |
| 110 | const bool is_synchronized_; |
| 111 | }; |
| 112 | |
| 113 | class JitCodeCache::JniStubData { |
| 114 | public: |
| 115 | JniStubData() : code_(nullptr), methods_() {} |
| 116 | |
| 117 | void SetCode(const void* code) { |
| 118 | DCHECK(code != nullptr); |
| 119 | code_ = code; |
| 120 | } |
| 121 | |
| 122 | const void* GetCode() const { |
| 123 | return code_; |
| 124 | } |
| 125 | |
| 126 | bool IsCompiled() const { |
| 127 | return GetCode() != nullptr; |
| 128 | } |
| 129 | |
| 130 | void AddMethod(ArtMethod* method) { |
| 131 | if (!ContainsElement(methods_, method)) { |
| 132 | methods_.push_back(method); |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | const std::vector<ArtMethod*>& GetMethods() const { |
| 137 | return methods_; |
| 138 | } |
| 139 | |
| 140 | void RemoveMethodsIn(const LinearAlloc& alloc) { |
| 141 | auto kept_end = std::remove_if( |
| 142 | methods_.begin(), |
| 143 | methods_.end(), |
| 144 | [&alloc](ArtMethod* method) { return alloc.ContainsUnsafe(method); }); |
| 145 | methods_.erase(kept_end, methods_.end()); |
| 146 | } |
| 147 | |
| 148 | bool RemoveMethod(ArtMethod* method) { |
| 149 | auto it = std::find(methods_.begin(), methods_.end(), method); |
| 150 | if (it != methods_.end()) { |
| 151 | methods_.erase(it); |
| 152 | return true; |
| 153 | } else { |
| 154 | return false; |
| 155 | } |
| 156 | } |
| 157 | |
| 158 | void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) { |
| 159 | std::replace(methods_.begin(), methods_.end(), old_method, new_method); |
| 160 | } |
| 161 | |
| 162 | private: |
| 163 | const void* code_; |
| 164 | std::vector<ArtMethod*> methods_; |
| 165 | }; |
| 166 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame] | 167 | JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data, |
| 168 | bool rwx_memory_allowed, |
| 169 | bool is_zygote, |
| 170 | std::string* error_msg) { |
| 171 | // Register for membarrier expedited sync core if JIT will be generating code. |
| 172 | if (!used_only_for_profile_data) { |
| 173 | if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) { |
| 174 | // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are |
| 175 | // flushed and it's used when adding code to the JIT. The memory used by the new code may |
| 176 | // have just been released and, in theory, the old code could still be in a pipeline. |
| 177 | VLOG(jit) << "Kernel does not support membarrier sync-core"; |
| 178 | } |
| 179 | } |
| 180 | |
Nicolas Geoffray | 9c54e18 | 2019-06-18 10:42:52 +0100 | [diff] [blame] | 181 | size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity(); |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame] | 182 | // Check whether the provided max capacity in options is below 1GB. |
| 183 | size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity(); |
| 184 | // We need to have 32 bit offsets from method headers in code cache which point to things |
| 185 | // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work. |
| 186 | // Ensure we're below 1 GB to be safe. |
| 187 | if (max_capacity > 1 * GB) { |
| 188 | std::ostringstream oss; |
| 189 | oss << "Maxium code cache capacity is limited to 1 GB, " |
| 190 | << PrettySize(max_capacity) << " is too big"; |
| 191 | *error_msg = oss.str(); |
| 192 | return nullptr; |
| 193 | } |
| 194 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 195 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Nicolas Geoffray | 9c54e18 | 2019-06-18 10:42:52 +0100 | [diff] [blame] | 196 | JitMemoryRegion region; |
| 197 | if (!region.Initialize(initial_capacity, |
| 198 | max_capacity, |
| 199 | rwx_memory_allowed, |
| 200 | is_zygote, |
| 201 | error_msg)) { |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame] | 202 | return nullptr; |
| 203 | } |
| 204 | |
Nicolas Geoffray | 9c54e18 | 2019-06-18 10:42:52 +0100 | [diff] [blame] | 205 | std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache()); |
| 206 | if (is_zygote) { |
| 207 | // Zygote should never collect code to share the memory with the children. |
| 208 | jit_code_cache->garbage_collect_code_ = false; |
Nicolas Geoffray | a48c3df | 2019-06-27 13:11:12 +0000 | [diff] [blame] | 209 | jit_code_cache->shared_region_ = std::move(region); |
| 210 | } else { |
| 211 | jit_code_cache->private_region_ = std::move(region); |
Nicolas Geoffray | 9c54e18 | 2019-06-18 10:42:52 +0100 | [diff] [blame] | 212 | } |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame] | 213 | |
| 214 | VLOG(jit) << "Created jit code cache: initial capacity=" |
| 215 | << PrettySize(initial_capacity) |
| 216 | << ", maximum capacity=" |
| 217 | << PrettySize(max_capacity); |
| 218 | |
| 219 | return jit_code_cache.release(); |
| 220 | } |
| 221 | |
| 222 | JitCodeCache::JitCodeCache() |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 223 | : is_weak_access_enabled_(true), |
| 224 | inline_cache_cond_("Jit inline cache condition variable", *Locks::jit_lock_), |
Nicolas Geoffray | e32d24c | 2019-07-05 10:28:59 +0100 | [diff] [blame] | 225 | zygote_map_(&shared_region_), |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 226 | lock_cond_("Jit code cache condition variable", *Locks::jit_lock_), |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 227 | collection_in_progress_(false), |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 228 | last_collection_increased_code_cache_(false), |
Orion Hodson | ad28f5e | 2018-10-17 09:08:17 +0100 | [diff] [blame] | 229 | garbage_collect_code_(true), |
Nicolas Geoffray | fcdd729 | 2016-02-25 13:27:47 +0000 | [diff] [blame] | 230 | number_of_compilations_(0), |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 231 | number_of_osr_compilations_(0), |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 232 | number_of_collections_(0), |
| 233 | histogram_stack_map_memory_use_("Memory used for stack maps", 16), |
| 234 | histogram_code_memory_use_("Memory used for compiled code", 16), |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 235 | histogram_profiling_info_memory_use_("Memory used for profiling info", 16) { |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 236 | } |
| 237 | |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 238 | JitCodeCache::~JitCodeCache() {} |
| 239 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 240 | bool JitCodeCache::ContainsPc(const void* ptr) const { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 241 | return private_region_.IsInExecSpace(ptr) || shared_region_.IsInExecSpace(ptr); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 242 | } |
| 243 | |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 244 | bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) { |
| 245 | ScopedObjectAccess soa(art::Thread::Current()); |
| 246 | ScopedAssertNoThreadSuspension sants(__FUNCTION__); |
| 247 | if (ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { |
| 248 | return true; |
| 249 | } else if (method->GetEntryPointFromQuickCompiledCode() == GetQuickInstrumentationEntryPoint()) { |
| 250 | return FindCompiledCodeForInstrumentation(method) != nullptr; |
| 251 | } |
| 252 | return false; |
| 253 | } |
| 254 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 255 | bool JitCodeCache::ContainsMethod(ArtMethod* method) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 256 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 257 | if (UNLIKELY(method->IsNative())) { |
| 258 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 259 | if (it != jni_stubs_map_.end() && |
| 260 | it->second.IsCompiled() && |
| 261 | ContainsElement(it->second.GetMethods(), method)) { |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 262 | return true; |
| 263 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 264 | } else { |
| 265 | for (const auto& it : method_code_map_) { |
| 266 | if (it.second == method) { |
| 267 | return true; |
| 268 | } |
| 269 | } |
Nicolas Geoffray | e32d24c | 2019-07-05 10:28:59 +0100 | [diff] [blame] | 270 | if (zygote_map_.ContainsMethod(method)) { |
| 271 | return true; |
| 272 | } |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 273 | } |
| 274 | return false; |
| 275 | } |
| 276 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 277 | const void* JitCodeCache::GetJniStubCode(ArtMethod* method) { |
| 278 | DCHECK(method->IsNative()); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 279 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 280 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 281 | if (it != jni_stubs_map_.end()) { |
| 282 | JniStubData& data = it->second; |
| 283 | if (data.IsCompiled() && ContainsElement(data.GetMethods(), method)) { |
| 284 | return data.GetCode(); |
| 285 | } |
| 286 | } |
| 287 | return nullptr; |
| 288 | } |
| 289 | |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 290 | const void* JitCodeCache::FindCompiledCodeForInstrumentation(ArtMethod* method) { |
Alex Light | 839f53a | 2018-07-10 15:46:14 -0700 | [diff] [blame] | 291 | // If jit-gc is still on we use the SavedEntryPoint field for doing that and so cannot use it to |
| 292 | // find the instrumentation entrypoint. |
| 293 | if (LIKELY(GetGarbageCollectCode())) { |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 294 | return nullptr; |
| 295 | } |
| 296 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
| 297 | if (info == nullptr) { |
| 298 | return nullptr; |
| 299 | } |
| 300 | // When GC is disabled for trampoline tracing we will use SavedEntrypoint to hold the actual |
| 301 | // jit-compiled version of the method. If jit-gc is disabled for other reasons this will just be |
| 302 | // nullptr. |
| 303 | return info->GetSavedEntryPoint(); |
| 304 | } |
| 305 | |
Nicolas Geoffray | 3238440 | 2019-07-17 20:06:44 +0100 | [diff] [blame] | 306 | const void* JitCodeCache::GetSavedEntryPointOfPreCompiledMethod(ArtMethod* method) { |
| 307 | if (Runtime::Current()->IsUsingApexBootImageLocation() && method->IsPreCompiled()) { |
Nicolas Geoffray | de0ccff | 2019-07-19 10:54:05 +0100 | [diff] [blame] | 308 | const void* code_ptr = nullptr; |
Nicolas Geoffray | 3238440 | 2019-07-17 20:06:44 +0100 | [diff] [blame] | 309 | if (method->GetDeclaringClass()->GetClassLoader() == nullptr) { |
Nicolas Geoffray | de0ccff | 2019-07-19 10:54:05 +0100 | [diff] [blame] | 310 | code_ptr = zygote_map_.GetCodeFor(method); |
Nicolas Geoffray | a3b31ba | 2019-04-14 20:10:16 +0100 | [diff] [blame] | 311 | } else { |
Nicolas Geoffray | 3238440 | 2019-07-17 20:06:44 +0100 | [diff] [blame] | 312 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
| 313 | auto it = saved_compiled_methods_map_.find(method); |
| 314 | if (it != saved_compiled_methods_map_.end()) { |
Nicolas Geoffray | de0ccff | 2019-07-19 10:54:05 +0100 | [diff] [blame] | 315 | code_ptr = it->second; |
Nicolas Geoffray | a3b31ba | 2019-04-14 20:10:16 +0100 | [diff] [blame] | 316 | } |
Nicolas Geoffray | de0ccff | 2019-07-19 10:54:05 +0100 | [diff] [blame] | 317 | } |
| 318 | if (code_ptr != nullptr) { |
| 319 | OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 320 | return method_header->GetEntryPoint(); |
Nicolas Geoffray | 7989ac9 | 2019-04-10 12:42:30 +0100 | [diff] [blame] | 321 | } |
| 322 | } |
| 323 | return nullptr; |
| 324 | } |
| 325 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 326 | uint8_t* JitCodeCache::CommitCode(Thread* self, |
Nicolas Geoffray | 7f7539b | 2019-06-06 16:20:54 +0100 | [diff] [blame] | 327 | JitMemoryRegion* region, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 328 | ArtMethod* method, |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 329 | const uint8_t* code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 330 | size_t code_size, |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 331 | const uint8_t* stack_map, |
| 332 | size_t stack_map_size, |
| 333 | uint8_t* roots_data, |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 334 | const std::vector<Handle<mirror::Object>>& roots, |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 335 | bool osr, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 336 | bool has_should_deoptimize_flag, |
| 337 | const ArenaSet<ArtMethod*>& cha_single_implementation_list) { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 338 | uint8_t* result = CommitCodeInternal(self, |
Nicolas Geoffray | 7f7539b | 2019-06-06 16:20:54 +0100 | [diff] [blame] | 339 | region, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 340 | method, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 341 | code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 342 | code_size, |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 343 | stack_map, |
| 344 | stack_map_size, |
| 345 | roots_data, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 346 | roots, |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 347 | osr, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 348 | has_should_deoptimize_flag, |
| 349 | cha_single_implementation_list); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 350 | if (result == nullptr) { |
| 351 | // Retry. |
| 352 | GarbageCollectCache(self); |
| 353 | result = CommitCodeInternal(self, |
Nicolas Geoffray | 7f7539b | 2019-06-06 16:20:54 +0100 | [diff] [blame] | 354 | region, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 355 | method, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 356 | code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 357 | code_size, |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 358 | stack_map, |
| 359 | stack_map_size, |
| 360 | roots_data, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 361 | roots, |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 362 | osr, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 363 | has_should_deoptimize_flag, |
| 364 | cha_single_implementation_list); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 365 | } |
| 366 | return result; |
| 367 | } |
| 368 | |
| 369 | bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) { |
| 370 | bool in_collection = false; |
| 371 | while (collection_in_progress_) { |
| 372 | in_collection = true; |
| 373 | lock_cond_.Wait(self); |
| 374 | } |
| 375 | return in_collection; |
| 376 | } |
| 377 | |
| 378 | static uintptr_t FromCodeToAllocation(const void* code) { |
Orion Hodson | e764f38 | 2019-06-27 12:56:48 +0100 | [diff] [blame] | 379 | size_t alignment = GetInstructionSetAlignment(kRuntimeISA); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 380 | return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment); |
| 381 | } |
| 382 | |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 383 | static uint32_t GetNumberOfRoots(const uint8_t* stack_map) { |
| 384 | // The length of the table is stored just before the stack map (and therefore at the end of |
| 385 | // the table itself), in order to be able to fetch it from a `stack_map` pointer. |
| 386 | return reinterpret_cast<const uint32_t*>(stack_map)[-1]; |
| 387 | } |
| 388 | |
Nicolas Geoffray | a48c3df | 2019-06-27 13:11:12 +0000 | [diff] [blame] | 389 | static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots, |
| 390 | bool is_shared_region) |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 391 | REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) { |
| 392 | if (!kIsDebugBuild) { |
| 393 | return; |
| 394 | } |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 395 | // Put all roots in `roots_data`. |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 396 | for (Handle<mirror::Object> object : roots) { |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 397 | // Ensure the string is strongly interned. b/32995596 |
| 398 | if (object->IsString()) { |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 399 | ObjPtr<mirror::String> str = object->AsString(); |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 400 | ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); |
| 401 | CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr); |
| 402 | } |
Nicolas Geoffray | a48c3df | 2019-06-27 13:11:12 +0000 | [diff] [blame] | 403 | // Ensure that we don't put movable objects in the shared region. |
| 404 | if (is_shared_region) { |
| 405 | CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get())); |
| 406 | } |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 407 | } |
| 408 | } |
| 409 | |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 410 | static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) { |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 411 | OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 412 | uint8_t* data = method_header->GetOptimizedCodeInfoPtr(); |
| 413 | uint32_t roots = GetNumberOfRoots(data); |
| 414 | if (number_of_roots != nullptr) { |
| 415 | *number_of_roots = roots; |
| 416 | } |
| 417 | return data - ComputeRootTableSize(roots); |
| 418 | } |
| 419 | |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 420 | // Use a sentinel for marking entries in the JIT table that have been cleared. |
| 421 | // This helps diagnosing in case the compiled code tries to wrongly access such |
| 422 | // entries. |
Andreas Gampe | 5629d2d | 2017-05-15 16:28:13 -0700 | [diff] [blame] | 423 | static mirror::Class* const weak_sentinel = |
| 424 | reinterpret_cast<mirror::Class*>(Context::kBadGprBase + 0xff); |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 425 | |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 426 | // Helper for the GC to process a weak class in a JIT root table. |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 427 | static inline void ProcessWeakClass(GcRoot<mirror::Class>* root_ptr, |
| 428 | IsMarkedVisitor* visitor, |
| 429 | mirror::Class* update) |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 430 | REQUIRES_SHARED(Locks::mutator_lock_) { |
| 431 | // This does not need a read barrier because this is called by GC. |
| 432 | mirror::Class* cls = root_ptr->Read<kWithoutReadBarrier>(); |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 433 | if (cls != nullptr && cls != weak_sentinel) { |
Mathieu Chartier | d7a7f2f | 2018-09-07 11:57:18 -0700 | [diff] [blame] | 434 | DCHECK((cls->IsClass<kDefaultVerifyFlags>())); |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 435 | // Look at the classloader of the class to know if it has been unloaded. |
| 436 | // This does not need a read barrier because this is called by GC. |
Vladimir Marko | c524e9e | 2019-03-26 10:54:50 +0000 | [diff] [blame] | 437 | ObjPtr<mirror::Object> class_loader = |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 438 | cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>(); |
Vladimir Marko | c524e9e | 2019-03-26 10:54:50 +0000 | [diff] [blame] | 439 | if (class_loader == nullptr || visitor->IsMarked(class_loader.Ptr()) != nullptr) { |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 440 | // The class loader is live, update the entry if the class has moved. |
| 441 | mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls)); |
| 442 | // Note that new_object can be null for CMS and newly allocated objects. |
| 443 | if (new_cls != nullptr && new_cls != cls) { |
| 444 | *root_ptr = GcRoot<mirror::Class>(new_cls); |
| 445 | } |
| 446 | } else { |
| 447 | // The class loader is not live, clear the entry. |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 448 | *root_ptr = GcRoot<mirror::Class>(update); |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 449 | } |
| 450 | } |
| 451 | } |
| 452 | |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 453 | void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 454 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 455 | for (const auto& entry : method_code_map_) { |
| 456 | uint32_t number_of_roots = 0; |
| 457 | uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots); |
| 458 | GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data); |
| 459 | for (uint32_t i = 0; i < number_of_roots; ++i) { |
| 460 | // This does not need a read barrier because this is called by GC. |
| 461 | mirror::Object* object = roots[i].Read<kWithoutReadBarrier>(); |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 462 | if (object == nullptr || object == weak_sentinel) { |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 463 | // entry got deleted in a previous sweep. |
Vladimir Marko | d355acf | 2019-03-21 17:09:40 +0000 | [diff] [blame] | 464 | } else if (object->IsString<kDefaultVerifyFlags>()) { |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 465 | mirror::Object* new_object = visitor->IsMarked(object); |
| 466 | // We know the string is marked because it's a strongly-interned string that |
| 467 | // is always alive. The IsMarked implementation of the CMS collector returns |
| 468 | // null for newly allocated objects, but we know those haven't moved. Therefore, |
| 469 | // only update the entry if we get a different non-null string. |
| 470 | // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method |
| 471 | // out of the weak access/creation pause. b/32167580 |
| 472 | if (new_object != nullptr && new_object != object) { |
| 473 | DCHECK(new_object->IsString()); |
| 474 | roots[i] = GcRoot<mirror::Object>(new_object); |
| 475 | } |
| 476 | } else { |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 477 | ProcessWeakClass( |
| 478 | reinterpret_cast<GcRoot<mirror::Class>*>(&roots[i]), visitor, weak_sentinel); |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 479 | } |
| 480 | } |
| 481 | } |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 482 | // Walk over inline caches to clear entries containing unloaded classes. |
| 483 | for (ProfilingInfo* info : profiling_infos_) { |
| 484 | for (size_t i = 0; i < info->number_of_inline_caches_; ++i) { |
| 485 | InlineCache* cache = &info->cache_[i]; |
| 486 | for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) { |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 487 | ProcessWeakClass(&cache->classes_[j], visitor, nullptr); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 488 | } |
| 489 | } |
| 490 | } |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 491 | } |
| 492 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame^] | 493 | void JitCodeCache::FreeCodeAndData(const void* code_ptr, bool free_debug_info) { |
Nicolas Geoffray | ae982f9 | 2018-12-08 12:31:10 +0000 | [diff] [blame] | 494 | if (IsInZygoteExecSpace(code_ptr)) { |
| 495 | // No need to free, this is shared memory. |
| 496 | return; |
| 497 | } |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 498 | uintptr_t allocation = FromCodeToAllocation(code_ptr); |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame^] | 499 | if (free_debug_info) { |
| 500 | // Remove compressed mini-debug info for the method. |
| 501 | // TODO: This is expensive, so we should always do it in the caller in bulk. |
| 502 | RemoveNativeDebugInfoForJit(ArrayRef<const void*>(&code_ptr, 1)); |
| 503 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 504 | if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 505 | private_region_.FreeData(GetRootTable(code_ptr)); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 506 | } // else this is a JNI stub without any data. |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 507 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 508 | private_region_.FreeCode(reinterpret_cast<uint8_t*>(allocation)); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 509 | } |
| 510 | |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 511 | void JitCodeCache::FreeAllMethodHeaders( |
| 512 | const std::unordered_set<OatQuickMethodHeader*>& method_headers) { |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 513 | // We need to remove entries in method_headers from CHA dependencies |
| 514 | // first since once we do FreeCode() below, the memory can be reused |
| 515 | // so it's possible for the same method_header to start representing |
| 516 | // different compile code. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 517 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 518 | { |
| 519 | MutexLock mu2(Thread::Current(), *Locks::cha_lock_); |
| 520 | Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis() |
| 521 | ->RemoveDependentsWithMethodHeaders(method_headers); |
| 522 | } |
| 523 | |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame^] | 524 | // Remove compressed mini-debug info for the methods. |
| 525 | std::vector<const void*> removed_symbols; |
| 526 | removed_symbols.reserve(method_headers.size()); |
| 527 | for (const OatQuickMethodHeader* method_header : method_headers) { |
| 528 | removed_symbols.push_back(method_header->GetCode()); |
| 529 | } |
| 530 | std::sort(removed_symbols.begin(), removed_symbols.end()); |
| 531 | RemoveNativeDebugInfoForJit(ArrayRef<const void*>(removed_symbols)); |
| 532 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 533 | ScopedCodeCacheWrite scc(private_region_); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 534 | for (const OatQuickMethodHeader* method_header : method_headers) { |
David Srbecky | 8fc2f95 | 2019-07-31 18:40:09 +0100 | [diff] [blame^] | 535 | FreeCodeAndData(method_header->GetCode(), /*free_debug_info=*/ false); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 536 | } |
| 537 | } |
| 538 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 539 | void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 540 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 541 | // We use a set to first collect all method_headers whose code need to be |
| 542 | // removed. We need to free the underlying code after we remove CHA dependencies |
| 543 | // for entries in this set. And it's more efficient to iterate through |
| 544 | // the CHA dependency map just once with an unordered_set. |
| 545 | std::unordered_set<OatQuickMethodHeader*> method_headers; |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 546 | { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 547 | MutexLock mu(self, *Locks::jit_lock_); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 548 | // We do not check if a code cache GC is in progress, as this method comes |
| 549 | // with the classlinker_classes_lock_ held, and suspending ourselves could |
| 550 | // lead to a deadlock. |
| 551 | { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 552 | for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) { |
| 553 | it->second.RemoveMethodsIn(alloc); |
| 554 | if (it->second.GetMethods().empty()) { |
| 555 | method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->second.GetCode())); |
| 556 | it = jni_stubs_map_.erase(it); |
| 557 | } else { |
| 558 | it->first.UpdateShorty(it->second.GetMethods().front()); |
| 559 | ++it; |
| 560 | } |
| 561 | } |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 562 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 563 | if (alloc.ContainsUnsafe(it->second)) { |
| 564 | method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first)); |
| 565 | it = method_code_map_.erase(it); |
| 566 | } else { |
| 567 | ++it; |
| 568 | } |
| 569 | } |
| 570 | } |
| 571 | for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) { |
| 572 | if (alloc.ContainsUnsafe(it->first)) { |
| 573 | // Note that the code has already been pushed to method_headers in the loop |
| 574 | // above and is going to be removed in FreeCode() below. |
| 575 | it = osr_code_map_.erase(it); |
| 576 | } else { |
| 577 | ++it; |
| 578 | } |
| 579 | } |
| 580 | for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) { |
| 581 | ProfilingInfo* info = *it; |
| 582 | if (alloc.ContainsUnsafe(info->GetMethod())) { |
| 583 | info->GetMethod()->SetProfilingInfo(nullptr); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 584 | private_region_.FreeData(reinterpret_cast<uint8_t*>(info)); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 585 | it = profiling_infos_.erase(it); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 586 | } else { |
| 587 | ++it; |
| 588 | } |
| 589 | } |
| 590 | } |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 591 | FreeAllMethodHeaders(method_headers); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 592 | } |
| 593 | |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 594 | bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const { |
| 595 | return kUseReadBarrier |
| 596 | ? self->GetWeakRefAccessEnabled() |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 597 | : is_weak_access_enabled_.load(std::memory_order_seq_cst); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 598 | } |
| 599 | |
| 600 | void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) { |
| 601 | if (IsWeakAccessEnabled(self)) { |
| 602 | return; |
| 603 | } |
| 604 | ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 605 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 606 | while (!IsWeakAccessEnabled(self)) { |
| 607 | inline_cache_cond_.Wait(self); |
| 608 | } |
| 609 | } |
| 610 | |
| 611 | void JitCodeCache::BroadcastForInlineCacheAccess() { |
| 612 | Thread* self = Thread::Current(); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 613 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 614 | inline_cache_cond_.Broadcast(self); |
| 615 | } |
| 616 | |
| 617 | void JitCodeCache::AllowInlineCacheAccess() { |
| 618 | DCHECK(!kUseReadBarrier); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 619 | is_weak_access_enabled_.store(true, std::memory_order_seq_cst); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 620 | BroadcastForInlineCacheAccess(); |
| 621 | } |
| 622 | |
| 623 | void JitCodeCache::DisallowInlineCacheAccess() { |
| 624 | DCHECK(!kUseReadBarrier); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 625 | is_weak_access_enabled_.store(false, std::memory_order_seq_cst); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 626 | } |
| 627 | |
| 628 | void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic, |
| 629 | Handle<mirror::ObjectArray<mirror::Class>> array) { |
| 630 | WaitUntilInlineCacheAccessible(Thread::Current()); |
| 631 | // Note that we don't need to lock `lock_` here, the compiler calling |
| 632 | // this method has already ensured the inline cache will not be deleted. |
| 633 | for (size_t in_cache = 0, in_array = 0; |
| 634 | in_cache < InlineCache::kIndividualCacheSize; |
| 635 | ++in_cache) { |
| 636 | mirror::Class* object = ic.classes_[in_cache].Read(); |
| 637 | if (object != nullptr) { |
| 638 | array->Set(in_array++, object); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 639 | } |
| 640 | } |
| 641 | } |
| 642 | |
David Srbecky | e36e7f2 | 2018-11-14 14:21:23 +0000 | [diff] [blame] | 643 | static void ClearMethodCounter(ArtMethod* method, bool was_warm) |
| 644 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 645 | if (was_warm) { |
Vladimir Marko | c945e0d | 2018-07-18 17:26:45 +0100 | [diff] [blame] | 646 | method->SetPreviouslyWarm(); |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 647 | } |
| 648 | // We reset the counter to 1 so that the profile knows that the method was executed at least once. |
| 649 | // This is required for layout purposes. |
Nicolas Geoffray | 88f50b1 | 2017-06-09 16:08:47 +0100 | [diff] [blame] | 650 | // We also need to make sure we'll pass the warmup threshold again, so we set to 0 if |
| 651 | // the warmup threshold is 1. |
| 652 | uint16_t jit_warmup_threshold = Runtime::Current()->GetJITOptions()->GetWarmupThreshold(); |
| 653 | method->SetCounter(std::min(jit_warmup_threshold - 1, 1)); |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 654 | } |
| 655 | |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 656 | void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) { |
| 657 | while (collection_in_progress_) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 658 | Locks::jit_lock_->Unlock(self); |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 659 | { |
| 660 | ScopedThreadSuspension sts(self, kSuspended); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 661 | MutexLock mu(self, *Locks::jit_lock_); |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 662 | WaitForPotentialCollectionToComplete(self); |
| 663 | } |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 664 | Locks::jit_lock_->Lock(self); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 665 | } |
| 666 | } |
| 667 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 668 | uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, |
Nicolas Geoffray | 7f7539b | 2019-06-06 16:20:54 +0100 | [diff] [blame] | 669 | JitMemoryRegion* region, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 670 | ArtMethod* method, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 671 | const uint8_t* code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 672 | size_t code_size, |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 673 | const uint8_t* stack_map, |
| 674 | size_t stack_map_size, |
| 675 | uint8_t* roots_data, |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 676 | const std::vector<Handle<mirror::Object>>& roots, |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 677 | bool osr, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 678 | bool has_should_deoptimize_flag, |
| 679 | const ArenaSet<ArtMethod*>& |
| 680 | cha_single_implementation_list) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 681 | DCHECK(!method->IsNative() || !osr); |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 682 | |
| 683 | if (!method->IsNative()) { |
| 684 | // We need to do this before grabbing the lock_ because it needs to be able to see the string |
| 685 | // InternTable. Native methods do not have roots. |
Nicolas Geoffray | a48c3df | 2019-06-27 13:11:12 +0000 | [diff] [blame] | 686 | DCheckRootsAreValid(roots, IsSharedRegion(*region)); |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 687 | } |
| 688 | |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 689 | size_t root_table_size = ComputeRootTableSize(roots.size()); |
| 690 | uint8_t* stack_map_data = roots_data + root_table_size; |
| 691 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 692 | MutexLock mu(self, *Locks::jit_lock_); |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 693 | // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to |
| 694 | // finish. |
| 695 | WaitForPotentialCollectionToCompleteRunnable(self); |
Nicolas Geoffray | 349845a | 2019-06-19 13:13:10 +0100 | [diff] [blame] | 696 | const uint8_t* code_ptr = region->AllocateCode( |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 697 | code, code_size, stack_map_data, has_should_deoptimize_flag); |
Nicolas Geoffray | 349845a | 2019-06-19 13:13:10 +0100 | [diff] [blame] | 698 | if (code_ptr == nullptr) { |
| 699 | return nullptr; |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 700 | } |
Nicolas Geoffray | 349845a | 2019-06-19 13:13:10 +0100 | [diff] [blame] | 701 | OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 702 | |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 703 | // Commit roots and stack maps before updating the entry point. |
Orion Hodson | aeb0223 | 2019-06-25 14:18:18 +0100 | [diff] [blame] | 704 | if (!region->CommitData(roots_data, roots, stack_map, stack_map_size)) { |
| 705 | ScopedCodeCacheWrite ccw(*region); |
| 706 | uintptr_t allocation = FromCodeToAllocation(code_ptr); |
| 707 | region->FreeCode(reinterpret_cast<uint8_t*>(allocation)); |
| 708 | return nullptr; |
| 709 | } |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 710 | |
Nicolas Geoffray | 349845a | 2019-06-19 13:13:10 +0100 | [diff] [blame] | 711 | number_of_compilations_++; |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 712 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 713 | // We need to update the entry point in the runnable state for the instrumentation. |
| 714 | { |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 715 | // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the |
| 716 | // compiled code is considered invalidated by some class linking, but below we still make the |
| 717 | // compiled code valid for the method. Need cha_lock_ for checking all single-implementation |
| 718 | // flags and register dependencies. |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 719 | MutexLock cha_mu(self, *Locks::cha_lock_); |
| 720 | bool single_impl_still_valid = true; |
| 721 | for (ArtMethod* single_impl : cha_single_implementation_list) { |
| 722 | if (!single_impl->HasSingleImplementation()) { |
Jeff Hao | 00286db | 2017-05-30 16:53:07 -0700 | [diff] [blame] | 723 | // Simply discard the compiled code. Clear the counter so that it may be recompiled later. |
| 724 | // Hopefully the class hierarchy will be more stable when compilation is retried. |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 725 | single_impl_still_valid = false; |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 726 | ClearMethodCounter(method, /*was_warm=*/ false); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 727 | break; |
| 728 | } |
| 729 | } |
| 730 | |
| 731 | // Discard the code if any single-implementation assumptions are now invalid. |
Orion Hodson | 3149252 | 2019-06-18 12:13:49 +0100 | [diff] [blame] | 732 | if (UNLIKELY(!single_impl_still_valid)) { |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 733 | VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions."; |
Orion Hodson | 3149252 | 2019-06-18 12:13:49 +0100 | [diff] [blame] | 734 | ScopedCodeCacheWrite ccw(*region); |
| 735 | uintptr_t allocation = FromCodeToAllocation(code_ptr); |
| 736 | region->FreeCode(reinterpret_cast<uint8_t*>(allocation)); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 737 | return nullptr; |
| 738 | } |
Nicolas Geoffray | 433b79a | 2017-01-30 20:54:45 +0000 | [diff] [blame] | 739 | DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable()) |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 740 | << "Should not be using cha on debuggable apps/runs!"; |
| 741 | |
Nicolas Geoffray | 7989ac9 | 2019-04-10 12:42:30 +0100 | [diff] [blame] | 742 | ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 743 | for (ArtMethod* single_impl : cha_single_implementation_list) { |
Nicolas Geoffray | 7989ac9 | 2019-04-10 12:42:30 +0100 | [diff] [blame] | 744 | class_linker->GetClassHierarchyAnalysis()->AddDependency(single_impl, method, method_header); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 745 | } |
| 746 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 747 | if (UNLIKELY(method->IsNative())) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 748 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 749 | DCHECK(it != jni_stubs_map_.end()) |
| 750 | << "Entry inserted in NotifyCompilationOf() should be alive."; |
| 751 | JniStubData* data = &it->second; |
| 752 | DCHECK(ContainsElement(data->GetMethods(), method)) |
| 753 | << "Entry inserted in NotifyCompilationOf() should contain this method."; |
| 754 | data->SetCode(code_ptr); |
| 755 | instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation(); |
| 756 | for (ArtMethod* m : data->GetMethods()) { |
Nicolas Geoffray | 7989ac9 | 2019-04-10 12:42:30 +0100 | [diff] [blame] | 757 | if (!class_linker->IsQuickResolutionStub(m->GetEntryPointFromQuickCompiledCode())) { |
| 758 | instrum->UpdateMethodsCode(m, method_header->GetEntryPoint()); |
| 759 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 760 | } |
Nicolas Geoffray | 480d510 | 2016-04-18 12:09:30 +0100 | [diff] [blame] | 761 | } else { |
Nicolas Geoffray | 3238440 | 2019-07-17 20:06:44 +0100 | [diff] [blame] | 762 | if (method->IsPreCompiled() && IsSharedRegion(*region)) { |
Nicolas Geoffray | e32d24c | 2019-07-05 10:28:59 +0100 | [diff] [blame] | 763 | zygote_map_.Put(code_ptr, method); |
| 764 | } else { |
| 765 | method_code_map_.Put(code_ptr, method); |
| 766 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 767 | if (osr) { |
| 768 | number_of_osr_compilations_++; |
| 769 | osr_code_map_.Put(method, code_ptr); |
Nicolas Geoffray | 7989ac9 | 2019-04-10 12:42:30 +0100 | [diff] [blame] | 770 | } else if (class_linker->IsQuickResolutionStub( |
| 771 | method->GetEntryPointFromQuickCompiledCode())) { |
| 772 | // This situation currently only occurs in the jit-zygote mode. |
David Srbecky | 3db3d37 | 2019-04-17 18:19:17 +0100 | [diff] [blame] | 773 | DCHECK(Runtime::Current()->IsUsingApexBootImageLocation()); |
Nicolas Geoffray | 741a070 | 2019-06-10 11:18:11 +0100 | [diff] [blame] | 774 | DCHECK(!garbage_collect_code_); |
Nicolas Geoffray | 3238440 | 2019-07-17 20:06:44 +0100 | [diff] [blame] | 775 | DCHECK(method->IsPreCompiled()); |
| 776 | // The shared region can easily be queried. For the private region, we |
| 777 | // use a side map. |
| 778 | if (!IsSharedRegion(*region)) { |
| 779 | saved_compiled_methods_map_.Put(method, code_ptr); |
Nicolas Geoffray | d2f13ba | 2019-06-04 16:48:58 +0100 | [diff] [blame] | 780 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 781 | } else { |
| 782 | Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( |
| 783 | method, method_header->GetEntryPoint()); |
| 784 | } |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 785 | } |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 786 | VLOG(jit) |
Nicolas Geoffray | 71cd50f | 2016-04-14 15:00:33 +0100 | [diff] [blame] | 787 | << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 788 | << ArtMethod::PrettyMethod(method) << "@" << method |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 789 | << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": " |
| 790 | << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": " |
| 791 | << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << "," |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 792 | << reinterpret_cast<const void*>(method_header->GetEntryPoint() + |
| 793 | method_header->GetCodeSize()); |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 794 | histogram_code_memory_use_.AddValue(code_size); |
| 795 | if (code_size > kCodeSizeLogThreshold) { |
| 796 | LOG(INFO) << "JIT allocated " |
| 797 | << PrettySize(code_size) |
| 798 | << " for compiled code of " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 799 | << ArtMethod::PrettyMethod(method); |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 800 | } |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 801 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 802 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 803 | return reinterpret_cast<uint8_t*>(method_header); |
| 804 | } |
| 805 | |
| 806 | size_t JitCodeCache::CodeCacheSize() { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 807 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 808 | return CodeCacheSizeLocked(); |
| 809 | } |
| 810 | |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 811 | bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 812 | // This function is used only for testing and only with non-native methods. |
| 813 | CHECK(!method->IsNative()); |
| 814 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 815 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 816 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 817 | bool osr = osr_code_map_.find(method) != osr_code_map_.end(); |
| 818 | bool in_cache = RemoveMethodLocked(method, release_memory); |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 819 | |
| 820 | if (!in_cache) { |
| 821 | return false; |
| 822 | } |
| 823 | |
David Srbecky | e36e7f2 | 2018-11-14 14:21:23 +0000 | [diff] [blame] | 824 | method->SetCounter(0); |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 825 | Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( |
| 826 | method, GetQuickToInterpreterBridge()); |
| 827 | VLOG(jit) |
| 828 | << "JIT removed (osr=" << std::boolalpha << osr << std::noboolalpha << ") " |
| 829 | << ArtMethod::PrettyMethod(method) << "@" << method |
| 830 | << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": " |
| 831 | << " dcache_size=" << PrettySize(DataCacheSizeLocked()); |
| 832 | return true; |
| 833 | } |
| 834 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 835 | bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) { |
| 836 | if (LIKELY(!method->IsNative())) { |
| 837 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
| 838 | if (info != nullptr) { |
| 839 | RemoveElement(profiling_infos_, info); |
| 840 | } |
| 841 | method->SetProfilingInfo(nullptr); |
| 842 | } |
| 843 | |
| 844 | bool in_cache = false; |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 845 | ScopedCodeCacheWrite ccw(private_region_); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 846 | if (UNLIKELY(method->IsNative())) { |
| 847 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 848 | if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) { |
| 849 | in_cache = true; |
| 850 | if (it->second.GetMethods().empty()) { |
| 851 | if (release_memory) { |
Orion Hodson | 607624f | 2018-05-11 10:10:46 +0100 | [diff] [blame] | 852 | FreeCodeAndData(it->second.GetCode()); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 853 | } |
| 854 | jni_stubs_map_.erase(it); |
| 855 | } else { |
| 856 | it->first.UpdateShorty(it->second.GetMethods().front()); |
| 857 | } |
| 858 | } |
| 859 | } else { |
| 860 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 861 | if (it->second == method) { |
| 862 | in_cache = true; |
| 863 | if (release_memory) { |
Orion Hodson | 607624f | 2018-05-11 10:10:46 +0100 | [diff] [blame] | 864 | FreeCodeAndData(it->first); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 865 | } |
| 866 | it = method_code_map_.erase(it); |
| 867 | } else { |
| 868 | ++it; |
| 869 | } |
| 870 | } |
| 871 | |
| 872 | auto osr_it = osr_code_map_.find(method); |
| 873 | if (osr_it != osr_code_map_.end()) { |
| 874 | osr_code_map_.erase(osr_it); |
| 875 | } |
| 876 | } |
| 877 | |
| 878 | return in_cache; |
| 879 | } |
| 880 | |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 881 | // This notifies the code cache that the given method has been redefined and that it should remove |
| 882 | // any cached information it has on the method. All threads must be suspended before calling this |
| 883 | // method. The compiled code for the method (if there is any) must not be in any threads call stack. |
| 884 | void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 885 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 886 | RemoveMethodLocked(method, /* release_memory= */ true); |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 887 | } |
| 888 | |
| 889 | // This invalidates old_method. Once this function returns one can no longer use old_method to |
| 890 | // execute code unless it is fixed up. This fixup will happen later in the process of installing a |
| 891 | // class redefinition. |
| 892 | // TODO We should add some info to ArtMethod to note that 'old_method' has been invalidated and |
| 893 | // shouldn't be used since it is no longer logically in the jit code cache. |
| 894 | // TODO We should add DCHECKS that validate that the JIT is paused when this method is entered. |
| 895 | void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 896 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Alex Light | eee0bd4 | 2017-02-14 15:31:45 +0000 | [diff] [blame] | 897 | if (old_method->IsNative()) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 898 | // Update methods in jni_stubs_map_. |
| 899 | for (auto& entry : jni_stubs_map_) { |
| 900 | JniStubData& data = entry.second; |
| 901 | data.MoveObsoleteMethod(old_method, new_method); |
| 902 | } |
Alex Light | eee0bd4 | 2017-02-14 15:31:45 +0000 | [diff] [blame] | 903 | return; |
| 904 | } |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 905 | // Update ProfilingInfo to the new one and remove it from the old_method. |
| 906 | if (old_method->GetProfilingInfo(kRuntimePointerSize) != nullptr) { |
| 907 | DCHECK_EQ(old_method->GetProfilingInfo(kRuntimePointerSize)->GetMethod(), old_method); |
| 908 | ProfilingInfo* info = old_method->GetProfilingInfo(kRuntimePointerSize); |
| 909 | old_method->SetProfilingInfo(nullptr); |
| 910 | // Since the JIT should be paused and all threads suspended by the time this is called these |
| 911 | // checks should always pass. |
| 912 | DCHECK(!info->IsInUseByCompiler()); |
| 913 | new_method->SetProfilingInfo(info); |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 914 | // Get rid of the old saved entrypoint if it is there. |
| 915 | info->SetSavedEntryPoint(nullptr); |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 916 | info->method_ = new_method; |
| 917 | } |
| 918 | // Update method_code_map_ to point to the new method. |
| 919 | for (auto& it : method_code_map_) { |
| 920 | if (it.second == old_method) { |
| 921 | it.second = new_method; |
| 922 | } |
| 923 | } |
| 924 | // Update osr_code_map_ to point to the new method. |
| 925 | auto code_map = osr_code_map_.find(old_method); |
| 926 | if (code_map != osr_code_map_.end()) { |
| 927 | osr_code_map_.Put(new_method, code_map->second); |
| 928 | osr_code_map_.erase(old_method); |
| 929 | } |
| 930 | } |
| 931 | |
Nicolas Geoffray | 226805d | 2018-12-14 10:59:02 +0000 | [diff] [blame] | 932 | void JitCodeCache::ClearEntryPointsInZygoteExecSpace() { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 933 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Nicolas Geoffray | af213cc | 2019-07-01 10:50:55 +0100 | [diff] [blame] | 934 | for (const auto& it : method_code_map_) { |
| 935 | ArtMethod* method = it.second; |
Nicolas Geoffray | 226805d | 2018-12-14 10:59:02 +0000 | [diff] [blame] | 936 | if (IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode())) { |
| 937 | method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); |
| 938 | } |
Nicolas Geoffray | 226805d | 2018-12-14 10:59:02 +0000 | [diff] [blame] | 939 | } |
| 940 | } |
| 941 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 942 | size_t JitCodeCache::CodeCacheSizeLocked() { |
Nicolas Geoffray | f2dcba0 | 2019-07-22 13:59:24 +0100 | [diff] [blame] | 943 | return GetCurrentRegion()->GetUsedMemoryForCode(); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 944 | } |
| 945 | |
| 946 | size_t JitCodeCache::DataCacheSize() { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 947 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 948 | return DataCacheSizeLocked(); |
| 949 | } |
| 950 | |
| 951 | size_t JitCodeCache::DataCacheSizeLocked() { |
Nicolas Geoffray | f2dcba0 | 2019-07-22 13:59:24 +0100 | [diff] [blame] | 952 | return GetCurrentRegion()->GetUsedMemoryForData(); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 953 | } |
| 954 | |
Nicolas Geoffray | f46501c | 2016-11-22 13:45:36 +0000 | [diff] [blame] | 955 | void JitCodeCache::ClearData(Thread* self, |
Nicolas Geoffray | 7f7539b | 2019-06-06 16:20:54 +0100 | [diff] [blame] | 956 | JitMemoryRegion* region, |
Nicolas Geoffray | f46501c | 2016-11-22 13:45:36 +0000 | [diff] [blame] | 957 | uint8_t* roots_data) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 958 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | 7f7539b | 2019-06-06 16:20:54 +0100 | [diff] [blame] | 959 | region->FreeData(reinterpret_cast<uint8_t*>(roots_data)); |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 960 | } |
| 961 | |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 962 | uint8_t* JitCodeCache::ReserveData(Thread* self, |
| 963 | JitMemoryRegion* region, |
| 964 | size_t stack_map_size, |
| 965 | size_t number_of_roots, |
| 966 | ArtMethod* method) { |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 967 | size_t table_size = ComputeRootTableSize(number_of_roots); |
David Srbecky | 8cd5454 | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 968 | size_t size = RoundUp(stack_map_size + table_size, sizeof(void*)); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 969 | uint8_t* result = nullptr; |
| 970 | |
| 971 | { |
| 972 | ScopedThreadSuspension sts(self, kSuspended); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 973 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 974 | WaitForPotentialCollectionToComplete(self); |
Nicolas Geoffray | 7f7539b | 2019-06-06 16:20:54 +0100 | [diff] [blame] | 975 | result = region->AllocateData(size); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 976 | } |
| 977 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 978 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 979 | histogram_stack_map_memory_use_.AddValue(size); |
| 980 | if (size > kStackMapSizeLogThreshold) { |
| 981 | LOG(INFO) << "JIT allocated " |
| 982 | << PrettySize(size) |
| 983 | << " for stack maps of " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 984 | << ArtMethod::PrettyMethod(method); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 985 | } |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 986 | return result; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 987 | } |
| 988 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 989 | class MarkCodeClosure final : public Closure { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 990 | public: |
Nicolas Geoffray | b9f1af5 | 2018-11-16 10:30:29 +0000 | [diff] [blame] | 991 | MarkCodeClosure(JitCodeCache* code_cache, CodeCacheBitmap* bitmap, Barrier* barrier) |
| 992 | : code_cache_(code_cache), bitmap_(bitmap), barrier_(barrier) {} |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 993 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 994 | void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 995 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 996 | DCHECK(thread == Thread::Current() || thread->IsSuspended()); |
Andreas Gampe | c7d878d | 2018-11-19 18:42:06 +0000 | [diff] [blame] | 997 | StackVisitor::WalkStack( |
| 998 | [&](const art::StackVisitor* stack_visitor) { |
| 999 | const OatQuickMethodHeader* method_header = |
| 1000 | stack_visitor->GetCurrentOatQuickMethodHeader(); |
| 1001 | if (method_header == nullptr) { |
| 1002 | return true; |
| 1003 | } |
| 1004 | const void* code = method_header->GetCode(); |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 1005 | if (code_cache_->ContainsPc(code) && !code_cache_->IsInZygoteExecSpace(code)) { |
Andreas Gampe | c7d878d | 2018-11-19 18:42:06 +0000 | [diff] [blame] | 1006 | // Use the atomic set version, as multiple threads are executing this code. |
| 1007 | bitmap_->AtomicTestAndSet(FromCodeToAllocation(code)); |
| 1008 | } |
| 1009 | return true; |
| 1010 | }, |
| 1011 | thread, |
| 1012 | /* context= */ nullptr, |
| 1013 | art::StackVisitor::StackWalkKind::kSkipInlinedFrames); |
| 1014 | |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 1015 | if (kIsDebugBuild) { |
| 1016 | // The stack walking code queries the side instrumentation stack if it |
| 1017 | // sees an instrumentation exit pc, so the JIT code of methods in that stack |
| 1018 | // must have been seen. We sanity check this below. |
| 1019 | for (const instrumentation::InstrumentationStackFrame& frame |
| 1020 | : *thread->GetInstrumentationStack()) { |
| 1021 | // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in |
| 1022 | // its stack frame, it is not the method owning return_pc_. We just pass null to |
| 1023 | // LookupMethodHeader: the method is only checked against in debug builds. |
| 1024 | OatQuickMethodHeader* method_header = |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 1025 | code_cache_->LookupMethodHeader(frame.return_pc_, /* method= */ nullptr); |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 1026 | if (method_header != nullptr) { |
| 1027 | const void* code = method_header->GetCode(); |
Nicolas Geoffray | b9f1af5 | 2018-11-16 10:30:29 +0000 | [diff] [blame] | 1028 | CHECK(bitmap_->Test(FromCodeToAllocation(code))); |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 1029 | } |
| 1030 | } |
| 1031 | } |
Mathieu Chartier | 10d2508 | 2015-10-28 18:36:09 -0700 | [diff] [blame] | 1032 | barrier_->Pass(Thread::Current()); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1033 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1034 | |
| 1035 | private: |
| 1036 | JitCodeCache* const code_cache_; |
Nicolas Geoffray | b9f1af5 | 2018-11-16 10:30:29 +0000 | [diff] [blame] | 1037 | CodeCacheBitmap* const bitmap_; |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1038 | Barrier* const barrier_; |
| 1039 | }; |
| 1040 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1041 | void JitCodeCache::NotifyCollectionDone(Thread* self) { |
| 1042 | collection_in_progress_ = false; |
| 1043 | lock_cond_.Broadcast(self); |
| 1044 | } |
| 1045 | |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1046 | void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) { |
| 1047 | Barrier barrier(0); |
| 1048 | size_t threads_running_checkpoint = 0; |
Nicolas Geoffray | b9f1af5 | 2018-11-16 10:30:29 +0000 | [diff] [blame] | 1049 | MarkCodeClosure closure(this, GetLiveBitmap(), &barrier); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1050 | threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); |
| 1051 | // Now that we have run our checkpoint, move to a suspended state and wait |
| 1052 | // for other threads to run the checkpoint. |
| 1053 | ScopedThreadSuspension sts(self, kSuspended); |
| 1054 | if (threads_running_checkpoint != 0) { |
| 1055 | barrier.Increment(self, threads_running_checkpoint); |
| 1056 | } |
| 1057 | } |
| 1058 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1059 | bool JitCodeCache::ShouldDoFullCollection() { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1060 | if (private_region_.GetCurrentCapacity() == private_region_.GetMaxCapacity()) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1061 | // Always do a full collection when the code cache is full. |
| 1062 | return true; |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1063 | } else if (private_region_.GetCurrentCapacity() < kReservedCapacity) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1064 | // Always do partial collection when the code cache size is below the reserved |
| 1065 | // capacity. |
| 1066 | return false; |
| 1067 | } else if (last_collection_increased_code_cache_) { |
| 1068 | // This time do a full collection. |
| 1069 | return true; |
| 1070 | } else { |
| 1071 | // This time do a partial collection. |
| 1072 | return false; |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1073 | } |
| 1074 | } |
| 1075 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1076 | void JitCodeCache::GarbageCollectCache(Thread* self) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1077 | ScopedTrace trace(__FUNCTION__); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1078 | // Wait for an existing collection, or let everyone know we are starting one. |
| 1079 | { |
| 1080 | ScopedThreadSuspension sts(self, kSuspended); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1081 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | 226805d | 2018-12-14 10:59:02 +0000 | [diff] [blame] | 1082 | if (!garbage_collect_code_) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1083 | private_region_.IncreaseCodeCacheCapacity(); |
Nicolas Geoffray | 226805d | 2018-12-14 10:59:02 +0000 | [diff] [blame] | 1084 | return; |
| 1085 | } else if (WaitForPotentialCollectionToComplete(self)) { |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1086 | return; |
| 1087 | } else { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1088 | number_of_collections_++; |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1089 | live_bitmap_.reset(CodeCacheBitmap::Create( |
| 1090 | "code-cache-bitmap", |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1091 | reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()), |
| 1092 | reinterpret_cast<uintptr_t>( |
| 1093 | private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2))); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1094 | collection_in_progress_ = true; |
| 1095 | } |
| 1096 | } |
| 1097 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1098 | TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit)); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1099 | { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1100 | TimingLogger::ScopedTiming st("Code cache collection", &logger); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1101 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1102 | bool do_full_collection = false; |
| 1103 | { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1104 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1105 | do_full_collection = ShouldDoFullCollection(); |
Nicolas Geoffray | a96917a | 2016-03-01 22:18:02 +0000 | [diff] [blame] | 1106 | } |
| 1107 | |
Nicolas Geoffray | 646d638 | 2017-08-09 10:50:00 +0100 | [diff] [blame] | 1108 | VLOG(jit) << "Do " |
| 1109 | << (do_full_collection ? "full" : "partial") |
| 1110 | << " code cache collection, code=" |
| 1111 | << PrettySize(CodeCacheSize()) |
| 1112 | << ", data=" << PrettySize(DataCacheSize()); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1113 | |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 1114 | DoCollection(self, /* collect_profiling_info= */ do_full_collection); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1115 | |
Nicolas Geoffray | 646d638 | 2017-08-09 10:50:00 +0100 | [diff] [blame] | 1116 | VLOG(jit) << "After code cache collection, code=" |
| 1117 | << PrettySize(CodeCacheSize()) |
| 1118 | << ", data=" << PrettySize(DataCacheSize()); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1119 | |
| 1120 | { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1121 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1122 | |
| 1123 | // Increase the code cache only when we do partial collections. |
| 1124 | // TODO: base this strategy on how full the code cache is? |
| 1125 | if (do_full_collection) { |
| 1126 | last_collection_increased_code_cache_ = false; |
| 1127 | } else { |
| 1128 | last_collection_increased_code_cache_ = true; |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1129 | private_region_.IncreaseCodeCacheCapacity(); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1130 | } |
| 1131 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1132 | bool next_collection_will_be_full = ShouldDoFullCollection(); |
| 1133 | |
| 1134 | // Start polling the liveness of compiled code to prepare for the next full collection. |
Nicolas Geoffray | 480d510 | 2016-04-18 12:09:30 +0100 | [diff] [blame] | 1135 | if (next_collection_will_be_full) { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1136 | // Save the entry point of methods we have compiled, and update the entry |
| 1137 | // point of those methods to the interpreter. If the method is invoked, the |
| 1138 | // interpreter will update its entry point to the compiled code and call it. |
| 1139 | for (ProfilingInfo* info : profiling_infos_) { |
| 1140 | const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 1141 | if (!IsInZygoteDataSpace(info) && ContainsPc(entry_point)) { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1142 | info->SetSavedEntryPoint(entry_point); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1143 | // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring |
Nicolas Geoffray | 3b1a7f4 | 2017-02-22 10:21:00 +0000 | [diff] [blame] | 1144 | // class of the method. We may be concurrently running a GC which makes accessing |
| 1145 | // the class unsafe. We know it is OK to bypass the instrumentation as we've just |
| 1146 | // checked that the current entry point is JIT compiled code. |
| 1147 | info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1148 | } |
| 1149 | } |
| 1150 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1151 | // Change entry points of native methods back to the GenericJNI entrypoint. |
| 1152 | for (const auto& entry : jni_stubs_map_) { |
| 1153 | const JniStubData& data = entry.second; |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 1154 | if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1155 | continue; |
| 1156 | } |
| 1157 | // Make sure a single invocation of the GenericJNI trampoline tries to recompile. |
| 1158 | uint16_t new_counter = Runtime::Current()->GetJit()->HotMethodThreshold() - 1u; |
| 1159 | const OatQuickMethodHeader* method_header = |
| 1160 | OatQuickMethodHeader::FromCodePointer(data.GetCode()); |
| 1161 | for (ArtMethod* method : data.GetMethods()) { |
| 1162 | if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) { |
| 1163 | // Don't call Instrumentation::UpdateMethodsCode(), same as for normal methods above. |
| 1164 | method->SetCounter(new_counter); |
| 1165 | method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub()); |
| 1166 | } |
| 1167 | } |
| 1168 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1169 | } |
| 1170 | live_bitmap_.reset(nullptr); |
| 1171 | NotifyCollectionDone(self); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1172 | } |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1173 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1174 | Runtime::Current()->GetJit()->AddTimingLogger(logger); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1175 | } |
| 1176 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1177 | void JitCodeCache::RemoveUnmarkedCode(Thread* self) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1178 | ScopedTrace trace(__FUNCTION__); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1179 | std::unordered_set<OatQuickMethodHeader*> method_headers; |
| 1180 | { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1181 | MutexLock mu(self, *Locks::jit_lock_); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1182 | // Iterate over all compiled code and remove entries that are not marked. |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1183 | for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) { |
| 1184 | JniStubData* data = &it->second; |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 1185 | if (IsInZygoteExecSpace(data->GetCode()) || |
| 1186 | !data->IsCompiled() || |
| 1187 | GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1188 | ++it; |
| 1189 | } else { |
| 1190 | method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode())); |
| 1191 | it = jni_stubs_map_.erase(it); |
| 1192 | } |
| 1193 | } |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1194 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 1195 | const void* code_ptr = it->first; |
| 1196 | uintptr_t allocation = FromCodeToAllocation(code_ptr); |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 1197 | if (IsInZygoteExecSpace(code_ptr) || GetLiveBitmap()->Test(allocation)) { |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1198 | ++it; |
| 1199 | } else { |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 1200 | OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1201 | method_headers.insert(header); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1202 | it = method_code_map_.erase(it); |
| 1203 | } |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1204 | } |
| 1205 | } |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1206 | FreeAllMethodHeaders(method_headers); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1207 | } |
| 1208 | |
Nicolas Geoffray | 226805d | 2018-12-14 10:59:02 +0000 | [diff] [blame] | 1209 | bool JitCodeCache::GetGarbageCollectCode() { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1210 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Nicolas Geoffray | 226805d | 2018-12-14 10:59:02 +0000 | [diff] [blame] | 1211 | return garbage_collect_code_; |
| 1212 | } |
| 1213 | |
| 1214 | void JitCodeCache::SetGarbageCollectCode(bool value) { |
| 1215 | Thread* self = Thread::Current(); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1216 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | 226805d | 2018-12-14 10:59:02 +0000 | [diff] [blame] | 1217 | if (garbage_collect_code_ != value) { |
| 1218 | if (garbage_collect_code_) { |
| 1219 | // When dynamically disabling the garbage collection, we neee |
| 1220 | // to make sure that a potential current collection is finished, and also |
| 1221 | // clear the saved entry point in profiling infos to avoid dangling pointers. |
| 1222 | WaitForPotentialCollectionToComplete(self); |
| 1223 | for (ProfilingInfo* info : profiling_infos_) { |
| 1224 | info->SetSavedEntryPoint(nullptr); |
| 1225 | } |
| 1226 | } |
| 1227 | // Update the flag while holding the lock to ensure no thread will try to GC. |
| 1228 | garbage_collect_code_ = value; |
| 1229 | } |
| 1230 | } |
| 1231 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1232 | void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1233 | ScopedTrace trace(__FUNCTION__); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1234 | { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1235 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1236 | if (collect_profiling_info) { |
| 1237 | // Clear the profiling info of methods that do not have compiled code as entrypoint. |
| 1238 | // Also remove the saved entry point from the ProfilingInfo objects. |
| 1239 | for (ProfilingInfo* info : profiling_infos_) { |
| 1240 | const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 1241 | if (!ContainsPc(ptr) && !info->IsInUseByCompiler() && !IsInZygoteDataSpace(info)) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1242 | info->GetMethod()->SetProfilingInfo(nullptr); |
| 1243 | } |
Nicolas Geoffray | b9a639d | 2016-03-22 11:25:20 +0000 | [diff] [blame] | 1244 | |
| 1245 | if (info->GetSavedEntryPoint() != nullptr) { |
| 1246 | info->SetSavedEntryPoint(nullptr); |
| 1247 | // We are going to move this method back to interpreter. Clear the counter now to |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 1248 | // give it a chance to be hot again. |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 1249 | ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true); |
Nicolas Geoffray | b9a639d | 2016-03-22 11:25:20 +0000 | [diff] [blame] | 1250 | } |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1251 | } |
| 1252 | } else if (kIsDebugBuild) { |
| 1253 | // Sanity check that the profiling infos do not have a dangling entry point. |
| 1254 | for (ProfilingInfo* info : profiling_infos_) { |
David Srbecky | 605a5fe | 2019-04-24 14:05:21 +0100 | [diff] [blame] | 1255 | DCHECK(!Runtime::Current()->IsZygote()); |
| 1256 | const void* entry_point = info->GetSavedEntryPoint(); |
| 1257 | DCHECK(entry_point == nullptr || IsInZygoteExecSpace(entry_point)); |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1258 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1259 | } |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1260 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1261 | // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not |
| 1262 | // an entry point is either: |
| 1263 | // - an osr compiled code, that will be removed if not in a thread call stack. |
| 1264 | // - discarded compiled code, that will be removed if not in a thread call stack. |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1265 | for (const auto& entry : jni_stubs_map_) { |
| 1266 | const JniStubData& data = entry.second; |
| 1267 | const void* code_ptr = data.GetCode(); |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 1268 | if (IsInZygoteExecSpace(code_ptr)) { |
| 1269 | continue; |
| 1270 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1271 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1272 | for (ArtMethod* method : data.GetMethods()) { |
| 1273 | if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { |
| 1274 | GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); |
| 1275 | break; |
| 1276 | } |
| 1277 | } |
| 1278 | } |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1279 | for (const auto& it : method_code_map_) { |
| 1280 | ArtMethod* method = it.second; |
| 1281 | const void* code_ptr = it.first; |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 1282 | if (IsInZygoteExecSpace(code_ptr)) { |
| 1283 | continue; |
| 1284 | } |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1285 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1286 | if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { |
| 1287 | GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); |
| 1288 | } |
| 1289 | } |
| 1290 | |
Nicolas Geoffray | d9994f0 | 2016-02-11 17:35:55 +0000 | [diff] [blame] | 1291 | // Empty osr method map, as osr compiled code will be deleted (except the ones |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1292 | // on thread stacks). |
| 1293 | osr_code_map_.clear(); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1294 | } |
| 1295 | |
| 1296 | // Run a checkpoint on all threads to mark the JIT compiled code they are running. |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1297 | MarkCompiledCodeOnThreadStacks(self); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1298 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1299 | // At this point, mutator threads are still running, and entrypoints of methods can |
| 1300 | // change. We do know they cannot change to a code cache entry that is not marked, |
| 1301 | // therefore we can safely remove those entries. |
| 1302 | RemoveUnmarkedCode(self); |
Nicolas Geoffray | a96917a | 2016-03-01 22:18:02 +0000 | [diff] [blame] | 1303 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1304 | if (collect_profiling_info) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1305 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1306 | // Free all profiling infos of methods not compiled nor being compiled. |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1307 | auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(), |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1308 | [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1309 | const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); |
Nicolas Geoffray | 511e41b | 2016-03-02 17:09:35 +0000 | [diff] [blame] | 1310 | // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope |
| 1311 | // that the compiled code would not get revived. As mutator threads run concurrently, |
| 1312 | // they may have revived the compiled code, and now we are in the situation where |
| 1313 | // a method has compiled code but no ProfilingInfo. |
| 1314 | // We make sure compiled methods have a ProfilingInfo object. It is needed for |
| 1315 | // code cache collection. |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1316 | if (ContainsPc(ptr) && |
| 1317 | info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) == nullptr) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1318 | info->GetMethod()->SetProfilingInfo(info); |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1319 | } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1320 | // No need for this ProfilingInfo object anymore. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1321 | private_region_.FreeData(reinterpret_cast<uint8_t*>(info)); |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1322 | return true; |
| 1323 | } |
| 1324 | return false; |
| 1325 | }); |
| 1326 | profiling_infos_.erase(profiling_kept_end, profiling_infos_.end()); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1327 | } |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1328 | } |
| 1329 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1330 | OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) { |
Vladimir Marko | 33bff25 | 2017-11-01 14:35:42 +0000 | [diff] [blame] | 1331 | static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA"); |
| 1332 | if (kRuntimeISA == InstructionSet::kArm) { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1333 | // On Thumb-2, the pc is offset by one. |
| 1334 | --pc; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1335 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1336 | if (!ContainsPc(reinterpret_cast<const void*>(pc))) { |
| 1337 | return nullptr; |
| 1338 | } |
| 1339 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1340 | if (!kIsDebugBuild) { |
| 1341 | // Called with null `method` only from MarkCodeClosure::Run() in debug build. |
| 1342 | CHECK(method != nullptr); |
Vladimir Marko | 47d3185 | 2017-11-28 18:36:12 +0000 | [diff] [blame] | 1343 | } |
Vladimir Marko | e744163 | 2017-11-29 13:00:56 +0000 | [diff] [blame] | 1344 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1345 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1346 | OatQuickMethodHeader* method_header = nullptr; |
| 1347 | ArtMethod* found_method = nullptr; // Only for DCHECK(), not for JNI stubs. |
| 1348 | if (method != nullptr && UNLIKELY(method->IsNative())) { |
| 1349 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 1350 | if (it == jni_stubs_map_.end() || !ContainsElement(it->second.GetMethods(), method)) { |
| 1351 | return nullptr; |
| 1352 | } |
| 1353 | const void* code_ptr = it->second.GetCode(); |
| 1354 | method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1355 | if (!method_header->Contains(pc)) { |
| 1356 | return nullptr; |
| 1357 | } |
| 1358 | } else { |
Nicolas Geoffray | e32d24c | 2019-07-05 10:28:59 +0100 | [diff] [blame] | 1359 | if (shared_region_.IsInExecSpace(reinterpret_cast<const void*>(pc))) { |
| 1360 | const void* code_ptr = zygote_map_.GetCodeFor(method, pc); |
| 1361 | if (code_ptr != nullptr) { |
| 1362 | return OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1363 | } |
| 1364 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1365 | auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc)); |
| 1366 | if (it != method_code_map_.begin()) { |
| 1367 | --it; |
| 1368 | const void* code_ptr = it->first; |
| 1369 | if (OatQuickMethodHeader::FromCodePointer(code_ptr)->Contains(pc)) { |
| 1370 | method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1371 | found_method = it->second; |
| 1372 | } |
| 1373 | } |
| 1374 | if (method_header == nullptr && method == nullptr) { |
| 1375 | // Scan all compiled JNI stubs as well. This slow search is used only |
| 1376 | // for checks in debug build, for release builds the `method` is not null. |
| 1377 | for (auto&& entry : jni_stubs_map_) { |
| 1378 | const JniStubData& data = entry.second; |
| 1379 | if (data.IsCompiled() && |
| 1380 | OatQuickMethodHeader::FromCodePointer(data.GetCode())->Contains(pc)) { |
| 1381 | method_header = OatQuickMethodHeader::FromCodePointer(data.GetCode()); |
| 1382 | } |
| 1383 | } |
| 1384 | } |
| 1385 | if (method_header == nullptr) { |
| 1386 | return nullptr; |
| 1387 | } |
Nicolas Geoffray | 056d775 | 2017-11-30 09:12:13 +0000 | [diff] [blame] | 1388 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1389 | |
| 1390 | if (kIsDebugBuild && method != nullptr && !method->IsNative()) { |
Vladimir Marko | eab0248 | 2019-05-09 10:28:17 +0100 | [diff] [blame] | 1391 | DCHECK_EQ(found_method, method) |
| 1392 | << ArtMethod::PrettyMethod(method) << " " |
| 1393 | << ArtMethod::PrettyMethod(found_method) << " " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 1394 | << std::hex << pc; |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 1395 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1396 | return method_header; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1397 | } |
| 1398 | |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1399 | OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1400 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1401 | auto it = osr_code_map_.find(method); |
| 1402 | if (it == osr_code_map_.end()) { |
| 1403 | return nullptr; |
| 1404 | } |
| 1405 | return OatQuickMethodHeader::FromCodePointer(it->second); |
| 1406 | } |
| 1407 | |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1408 | ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self, |
| 1409 | ArtMethod* method, |
| 1410 | const std::vector<uint32_t>& entries, |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1411 | bool retry_allocation) |
| 1412 | // No thread safety analysis as we are using TryLock/Unlock explicitly. |
| 1413 | NO_THREAD_SAFETY_ANALYSIS { |
Nicolas Geoffray | a48c3df | 2019-06-27 13:11:12 +0000 | [diff] [blame] | 1414 | DCHECK(CanAllocateProfilingInfo()); |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1415 | ProfilingInfo* info = nullptr; |
| 1416 | if (!retry_allocation) { |
| 1417 | // If we are allocating for the interpreter, just try to lock, to avoid |
| 1418 | // lock contention with the JIT. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1419 | if (Locks::jit_lock_->ExclusiveTryLock(self)) { |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1420 | info = AddProfilingInfoInternal(self, method, entries); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1421 | Locks::jit_lock_->ExclusiveUnlock(self); |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1422 | } |
| 1423 | } else { |
| 1424 | { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1425 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1426 | info = AddProfilingInfoInternal(self, method, entries); |
| 1427 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1428 | |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1429 | if (info == nullptr) { |
| 1430 | GarbageCollectCache(self); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1431 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1432 | info = AddProfilingInfoInternal(self, method, entries); |
| 1433 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1434 | } |
| 1435 | return info; |
| 1436 | } |
| 1437 | |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1438 | ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED, |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1439 | ArtMethod* method, |
| 1440 | const std::vector<uint32_t>& entries) { |
| 1441 | size_t profile_info_size = RoundUp( |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1442 | sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(), |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1443 | sizeof(void*)); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1444 | |
| 1445 | // Check whether some other thread has concurrently created it. |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1446 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1447 | if (info != nullptr) { |
| 1448 | return info; |
| 1449 | } |
| 1450 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1451 | uint8_t* data = private_region_.AllocateData(profile_info_size); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1452 | if (data == nullptr) { |
| 1453 | return nullptr; |
| 1454 | } |
| 1455 | info = new (data) ProfilingInfo(method, entries); |
Nicolas Geoffray | 07f3564 | 2016-01-04 16:06:51 +0000 | [diff] [blame] | 1456 | |
| 1457 | // Make sure other threads see the data in the profiling info object before the |
| 1458 | // store in the ArtMethod's ProfilingInfo pointer. |
Orion Hodson | 27b9676 | 2018-03-13 16:06:57 +0000 | [diff] [blame] | 1459 | std::atomic_thread_fence(std::memory_order_release); |
Nicolas Geoffray | 07f3564 | 2016-01-04 16:06:51 +0000 | [diff] [blame] | 1460 | |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1461 | method->SetProfilingInfo(info); |
| 1462 | profiling_infos_.push_back(info); |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 1463 | histogram_profiling_info_memory_use_.AddValue(profile_info_size); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1464 | return info; |
| 1465 | } |
| 1466 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1467 | void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) { |
Nicolas Geoffray | a48c3df | 2019-06-27 13:11:12 +0000 | [diff] [blame] | 1468 | return shared_region_.OwnsSpace(mspace) |
| 1469 | ? shared_region_.MoreCore(mspace, increment) |
| 1470 | : private_region_.MoreCore(mspace, increment); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1471 | } |
| 1472 | |
Calin Juravle | 9962962 | 2016-04-19 16:33:46 +0100 | [diff] [blame] | 1473 | void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations, |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1474 | std::vector<ProfileMethodInfo>& methods) { |
Nicolas Geoffray | 1afdfe6 | 2018-11-21 09:38:10 +0000 | [diff] [blame] | 1475 | Thread* self = Thread::Current(); |
| 1476 | WaitUntilInlineCacheAccessible(self); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1477 | MutexLock mu(self, *Locks::jit_lock_); |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1478 | ScopedTrace trace(__FUNCTION__); |
Calin Juravle | a39fd98 | 2017-05-18 10:15:52 -0700 | [diff] [blame] | 1479 | uint16_t jit_compile_threshold = Runtime::Current()->GetJITOptions()->GetCompileThreshold(); |
Calin Juravle | 9962962 | 2016-04-19 16:33:46 +0100 | [diff] [blame] | 1480 | for (const ProfilingInfo* info : profiling_infos_) { |
| 1481 | ArtMethod* method = info->GetMethod(); |
| 1482 | const DexFile* dex_file = method->GetDexFile(); |
Mathieu Chartier | 79c87da | 2017-10-10 11:54:29 -0700 | [diff] [blame] | 1483 | const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation()); |
| 1484 | if (!ContainsElement(dex_base_locations, base_location)) { |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1485 | // Skip dex files which are not profiled. |
| 1486 | continue; |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 1487 | } |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1488 | std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches; |
Calin Juravle | a39fd98 | 2017-05-18 10:15:52 -0700 | [diff] [blame] | 1489 | |
| 1490 | // If the method didn't reach the compilation threshold don't save the inline caches. |
| 1491 | // They might be incomplete and cause unnecessary deoptimizations. |
| 1492 | // If the inline cache is empty the compiler will generate a regular invoke virtual/interface. |
| 1493 | if (method->GetCounter() < jit_compile_threshold) { |
| 1494 | methods.emplace_back(/*ProfileMethodInfo*/ |
Mathieu Chartier | bbe3a5e | 2017-06-13 16:36:17 -0700 | [diff] [blame] | 1495 | MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches); |
Calin Juravle | a39fd98 | 2017-05-18 10:15:52 -0700 | [diff] [blame] | 1496 | continue; |
| 1497 | } |
| 1498 | |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1499 | for (size_t i = 0; i < info->number_of_inline_caches_; ++i) { |
Mathieu Chartier | dbddc22 | 2017-05-24 12:04:13 -0700 | [diff] [blame] | 1500 | std::vector<TypeReference> profile_classes; |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1501 | const InlineCache& cache = info->cache_[i]; |
Calin Juravle | 13439f0 | 2017-02-21 01:17:21 -0800 | [diff] [blame] | 1502 | ArtMethod* caller = info->GetMethod(); |
Calin Juravle | 589e71e | 2017-03-03 16:05:05 -0800 | [diff] [blame] | 1503 | bool is_missing_types = false; |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1504 | for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) { |
| 1505 | mirror::Class* cls = cache.classes_[k].Read(); |
| 1506 | if (cls == nullptr) { |
| 1507 | break; |
| 1508 | } |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1509 | |
Calin Juravle | 13439f0 | 2017-02-21 01:17:21 -0800 | [diff] [blame] | 1510 | // Check if the receiver is in the boot class path or if it's in the |
| 1511 | // same class loader as the caller. If not, skip it, as there is not |
| 1512 | // much we can do during AOT. |
| 1513 | if (!cls->IsBootStrapClassLoaded() && |
| 1514 | caller->GetClassLoader() != cls->GetClassLoader()) { |
| 1515 | is_missing_types = true; |
| 1516 | continue; |
| 1517 | } |
| 1518 | |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1519 | const DexFile* class_dex_file = nullptr; |
| 1520 | dex::TypeIndex type_index; |
| 1521 | |
| 1522 | if (cls->GetDexCache() == nullptr) { |
| 1523 | DCHECK(cls->IsArrayClass()) << cls->PrettyClass(); |
Calin Juravle | e21806f | 2017-02-22 11:49:43 -0800 | [diff] [blame] | 1524 | // Make a best effort to find the type index in the method's dex file. |
| 1525 | // We could search all open dex files but that might turn expensive |
| 1526 | // and probably not worth it. |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1527 | class_dex_file = dex_file; |
| 1528 | type_index = cls->FindTypeIndexInOtherDexFile(*dex_file); |
| 1529 | } else { |
| 1530 | class_dex_file = &(cls->GetDexFile()); |
| 1531 | type_index = cls->GetDexTypeIndex(); |
| 1532 | } |
| 1533 | if (!type_index.IsValid()) { |
| 1534 | // Could be a proxy class or an array for which we couldn't find the type index. |
Calin Juravle | 589e71e | 2017-03-03 16:05:05 -0800 | [diff] [blame] | 1535 | is_missing_types = true; |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1536 | continue; |
| 1537 | } |
Mathieu Chartier | 79c87da | 2017-10-10 11:54:29 -0700 | [diff] [blame] | 1538 | if (ContainsElement(dex_base_locations, |
| 1539 | DexFileLoader::GetBaseLocation(class_dex_file->GetLocation()))) { |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1540 | // Only consider classes from the same apk (including multidex). |
| 1541 | profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/ |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1542 | class_dex_file, type_index); |
Calin Juravle | 589e71e | 2017-03-03 16:05:05 -0800 | [diff] [blame] | 1543 | } else { |
| 1544 | is_missing_types = true; |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1545 | } |
| 1546 | } |
| 1547 | if (!profile_classes.empty()) { |
| 1548 | inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/ |
Calin Juravle | 589e71e | 2017-03-03 16:05:05 -0800 | [diff] [blame] | 1549 | cache.dex_pc_, is_missing_types, profile_classes); |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1550 | } |
| 1551 | } |
| 1552 | methods.emplace_back(/*ProfileMethodInfo*/ |
Mathieu Chartier | bbe3a5e | 2017-06-13 16:36:17 -0700 | [diff] [blame] | 1553 | MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches); |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 1554 | } |
| 1555 | } |
| 1556 | |
Nicolas Geoffray | 71cd50f | 2016-04-14 15:00:33 +0100 | [diff] [blame] | 1557 | bool JitCodeCache::IsOsrCompiled(ArtMethod* method) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1558 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Nicolas Geoffray | 71cd50f | 2016-04-14 15:00:33 +0100 | [diff] [blame] | 1559 | return osr_code_map_.find(method) != osr_code_map_.end(); |
| 1560 | } |
| 1561 | |
Nicolas Geoffray | a48c3df | 2019-06-27 13:11:12 +0000 | [diff] [blame] | 1562 | bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, |
| 1563 | Thread* self, |
| 1564 | bool osr, |
| 1565 | bool prejit, |
| 1566 | JitMemoryRegion* region) { |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1567 | if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1568 | return false; |
| 1569 | } |
Nicolas Geoffray | a42363f | 2015-12-17 14:57:09 +0000 | [diff] [blame] | 1570 | |
Nicolas Geoffray | d03e8dd | 2019-04-10 23:13:20 +0100 | [diff] [blame] | 1571 | ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); |
| 1572 | if (class_linker->IsQuickResolutionStub(method->GetEntryPointFromQuickCompiledCode())) { |
Nicolas Geoffray | d2f13ba | 2019-06-04 16:48:58 +0100 | [diff] [blame] | 1573 | if (!prejit) { |
| 1574 | // Unless we're pre-jitting, we currently don't save the JIT compiled code if we cannot |
| 1575 | // update the entrypoint due to having the resolution stub. |
Nicolas Geoffray | 7989ac9 | 2019-04-10 12:42:30 +0100 | [diff] [blame] | 1576 | VLOG(jit) << "Not compiling " |
| 1577 | << method->PrettyMethod() |
| 1578 | << " because it has the resolution stub"; |
| 1579 | // Give it a new chance to be hot. |
| 1580 | ClearMethodCounter(method, /*was_warm=*/ false); |
| 1581 | return false; |
| 1582 | } |
Nicolas Geoffray | d03e8dd | 2019-04-10 23:13:20 +0100 | [diff] [blame] | 1583 | } |
| 1584 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1585 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1586 | if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) { |
| 1587 | return false; |
| 1588 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1589 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1590 | if (UNLIKELY(method->IsNative())) { |
| 1591 | JniStubKey key(method); |
| 1592 | auto it = jni_stubs_map_.find(key); |
| 1593 | bool new_compilation = false; |
| 1594 | if (it == jni_stubs_map_.end()) { |
| 1595 | // Create a new entry to mark the stub as being compiled. |
| 1596 | it = jni_stubs_map_.Put(key, JniStubData{}); |
| 1597 | new_compilation = true; |
| 1598 | } |
| 1599 | JniStubData* data = &it->second; |
| 1600 | data->AddMethod(method); |
| 1601 | if (data->IsCompiled()) { |
| 1602 | OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(data->GetCode()); |
| 1603 | const void* entrypoint = method_header->GetEntryPoint(); |
| 1604 | // Update also entrypoints of other methods held by the JniStubData. |
| 1605 | // We could simply update the entrypoint of `method` but if the last JIT GC has |
| 1606 | // changed these entrypoints to GenericJNI in preparation for a full GC, we may |
| 1607 | // as well change them back as this stub shall not be collected anyway and this |
| 1608 | // can avoid a few expensive GenericJNI calls. |
| 1609 | instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); |
| 1610 | for (ArtMethod* m : data->GetMethods()) { |
Nicolas Geoffray | a6e0e7d | 2018-01-26 13:16:50 +0000 | [diff] [blame] | 1611 | // Call the dedicated method instead of the more generic UpdateMethodsCode, because |
| 1612 | // `m` might be in the process of being deleted. |
Nicolas Geoffray | 7989ac9 | 2019-04-10 12:42:30 +0100 | [diff] [blame] | 1613 | if (!class_linker->IsQuickResolutionStub(m->GetEntryPointFromQuickCompiledCode())) { |
| 1614 | instrumentation->UpdateNativeMethodsCodeToJitCode(m, entrypoint); |
| 1615 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1616 | } |
| 1617 | if (collection_in_progress_) { |
David Srbecky | c45b589 | 2019-04-24 10:32:04 +0100 | [diff] [blame] | 1618 | if (!IsInZygoteExecSpace(data->GetCode())) { |
| 1619 | GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode())); |
| 1620 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1621 | } |
| 1622 | } |
| 1623 | return new_compilation; |
| 1624 | } else { |
| 1625 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
| 1626 | if (info == nullptr) { |
Nicolas Geoffray | d2f13ba | 2019-06-04 16:48:58 +0100 | [diff] [blame] | 1627 | // When prejitting, we don't allocate a profiling info. |
Nicolas Geoffray | a48c3df | 2019-06-27 13:11:12 +0000 | [diff] [blame] | 1628 | if (!prejit && !IsSharedRegion(*region)) { |
Nicolas Geoffray | d2f13ba | 2019-06-04 16:48:58 +0100 | [diff] [blame] | 1629 | VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled"; |
| 1630 | // Because the counter is not atomic, there are some rare cases where we may not hit the |
| 1631 | // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this. |
| 1632 | ClearMethodCounter(method, /*was_warm=*/ false); |
| 1633 | return false; |
| 1634 | } |
| 1635 | } else { |
| 1636 | if (info->IsMethodBeingCompiled(osr)) { |
| 1637 | return false; |
| 1638 | } |
| 1639 | info->SetIsMethodBeingCompiled(true, osr); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1640 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1641 | return true; |
| 1642 | } |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1643 | } |
| 1644 | |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 1645 | ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1646 | MutexLock mu(self, *Locks::jit_lock_); |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1647 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1648 | if (info != nullptr) { |
Nicolas Geoffray | f6d4668 | 2017-02-28 17:41:45 +0000 | [diff] [blame] | 1649 | if (!info->IncrementInlineUse()) { |
| 1650 | // Overflow of inlining uses, just bail. |
| 1651 | return nullptr; |
| 1652 | } |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1653 | } |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 1654 | return info; |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1655 | } |
| 1656 | |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 1657 | void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1658 | MutexLock mu(self, *Locks::jit_lock_); |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1659 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 1660 | DCHECK(info != nullptr); |
| 1661 | info->DecrementInlineUse(); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1662 | } |
| 1663 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1664 | void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr) { |
| 1665 | DCHECK_EQ(Thread::Current(), self); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1666 | MutexLock mu(self, *Locks::jit_lock_); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1667 | if (UNLIKELY(method->IsNative())) { |
| 1668 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 1669 | DCHECK(it != jni_stubs_map_.end()); |
| 1670 | JniStubData* data = &it->second; |
| 1671 | DCHECK(ContainsElement(data->GetMethods(), method)); |
| 1672 | if (UNLIKELY(!data->IsCompiled())) { |
| 1673 | // Failed to compile; the JNI compiler never fails, but the cache may be full. |
| 1674 | jni_stubs_map_.erase(it); // Remove the entry added in NotifyCompilationOf(). |
| 1675 | } // else CommitCodeInternal() updated entrypoints of all methods in the JniStubData. |
| 1676 | } else { |
| 1677 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
Nicolas Geoffray | d2f13ba | 2019-06-04 16:48:58 +0100 | [diff] [blame] | 1678 | if (info != nullptr) { |
| 1679 | DCHECK(info->IsMethodBeingCompiled(osr)); |
| 1680 | info->SetIsMethodBeingCompiled(false, osr); |
| 1681 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1682 | } |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1683 | } |
| 1684 | |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 1685 | void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method, |
| 1686 | const OatQuickMethodHeader* header) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1687 | DCHECK(!method->IsNative()); |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1688 | ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize); |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 1689 | const void* method_entrypoint = method->GetEntryPointFromQuickCompiledCode(); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1690 | if ((profiling_info != nullptr) && |
| 1691 | (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) { |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 1692 | // When instrumentation is set, the actual entrypoint is the one in the profiling info. |
| 1693 | method_entrypoint = profiling_info->GetSavedEntryPoint(); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1694 | // Prevent future uses of the compiled code. |
| 1695 | profiling_info->SetSavedEntryPoint(nullptr); |
| 1696 | } |
| 1697 | |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 1698 | // Clear the method counter if we are running jitted code since we might want to jit this again in |
| 1699 | // the future. |
| 1700 | if (method_entrypoint == header->GetEntryPoint()) { |
Jeff Hao | 00286db | 2017-05-30 16:53:07 -0700 | [diff] [blame] | 1701 | // The entrypoint is the one to invalidate, so we just update it to the interpreter entry point |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 1702 | // and clear the counter to get the method Jitted again. |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 1703 | Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( |
| 1704 | method, GetQuickToInterpreterBridge()); |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 1705 | ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr); |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 1706 | } else { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1707 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 1708 | auto it = osr_code_map_.find(method); |
| 1709 | if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) { |
| 1710 | // Remove the OSR method, to avoid using it again. |
| 1711 | osr_code_map_.erase(it); |
| 1712 | } |
| 1713 | } |
Nicolas Geoffray | e32d24c | 2019-07-05 10:28:59 +0100 | [diff] [blame] | 1714 | |
Nicolas Geoffray | 3238440 | 2019-07-17 20:06:44 +0100 | [diff] [blame] | 1715 | // In case the method was pre-compiled, clear that information so we |
Nicolas Geoffray | e32d24c | 2019-07-05 10:28:59 +0100 | [diff] [blame] | 1716 | // can recompile it ourselves. |
Nicolas Geoffray | 3238440 | 2019-07-17 20:06:44 +0100 | [diff] [blame] | 1717 | if (method->IsPreCompiled()) { |
| 1718 | method->ClearPreCompiled(); |
Nicolas Geoffray | e32d24c | 2019-07-05 10:28:59 +0100 | [diff] [blame] | 1719 | } |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 1720 | } |
| 1721 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1722 | void JitCodeCache::Dump(std::ostream& os) { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1723 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
David Srbecky | 44b977d | 2019-08-09 12:15:32 +0100 | [diff] [blame] | 1724 | os << "Current JIT code cache size (used / resident): " |
| 1725 | << GetCurrentRegion()->GetUsedMemoryForCode() / KB << "KB / " |
| 1726 | << GetCurrentRegion()->GetResidentMemoryForCode() / KB << "KB\n" |
| 1727 | << "Current JIT data cache size (used / resident): " |
| 1728 | << GetCurrentRegion()->GetUsedMemoryForData() / KB << "KB / " |
| 1729 | << GetCurrentRegion()->GetResidentMemoryForData() / KB << "KB\n"; |
Nicolas Geoffray | f2dcba0 | 2019-07-22 13:59:24 +0100 | [diff] [blame] | 1730 | if (!Runtime::Current()->IsZygote()) { |
| 1731 | os << "Zygote JIT code cache size (at point of fork): " |
David Srbecky | 44b977d | 2019-08-09 12:15:32 +0100 | [diff] [blame] | 1732 | << shared_region_.GetUsedMemoryForCode() / KB << "KB / " |
| 1733 | << shared_region_.GetResidentMemoryForCode() / KB << "KB\n" |
Nicolas Geoffray | f2dcba0 | 2019-07-22 13:59:24 +0100 | [diff] [blame] | 1734 | << "Zygote JIT data cache size (at point of fork): " |
David Srbecky | 44b977d | 2019-08-09 12:15:32 +0100 | [diff] [blame] | 1735 | << shared_region_.GetUsedMemoryForData() / KB << "KB / " |
| 1736 | << shared_region_.GetResidentMemoryForData() / KB << "KB\n"; |
Nicolas Geoffray | f2dcba0 | 2019-07-22 13:59:24 +0100 | [diff] [blame] | 1737 | } |
| 1738 | os << "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n" |
| 1739 | << "Current JIT capacity: " << PrettySize(GetCurrentRegion()->GetCurrentCapacity()) << "\n" |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1740 | << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n" |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1741 | << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n" |
| 1742 | << "Total number of JIT compilations: " << number_of_compilations_ << "\n" |
| 1743 | << "Total number of JIT compilations for on stack replacement: " |
| 1744 | << number_of_osr_compilations_ << "\n" |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1745 | << "Total number of JIT code cache collections: " << number_of_collections_ << std::endl; |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 1746 | histogram_stack_map_memory_use_.PrintMemoryUse(os); |
| 1747 | histogram_code_memory_use_.PrintMemoryUse(os); |
| 1748 | histogram_profiling_info_memory_use_.PrintMemoryUse(os); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1749 | } |
| 1750 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame] | 1751 | void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) { |
Nicolas Geoffray | b08d5db | 2019-07-17 10:45:36 +0100 | [diff] [blame] | 1752 | Thread* self = Thread::Current(); |
| 1753 | |
| 1754 | // Remove potential tasks that have been inherited from the zygote. |
| 1755 | // We do this now and not in Jit::PostForkChildAction, as system server calls |
| 1756 | // JitCodeCache::PostForkChildAction first, and then does some code loading |
| 1757 | // that may result in new JIT tasks that we want to keep. |
| 1758 | ThreadPool* pool = Runtime::Current()->GetJit()->GetThreadPool(); |
| 1759 | if (pool != nullptr) { |
| 1760 | pool->RemoveAllTasks(self); |
| 1761 | } |
| 1762 | |
| 1763 | MutexLock mu(self, *Locks::jit_lock_); |
Nicolas Geoffray | 88f3fd9 | 2019-06-27 16:32:13 +0100 | [diff] [blame] | 1764 | |
| 1765 | // Reset potential writable MemMaps inherited from the zygote. We never want |
| 1766 | // to write to them. |
| 1767 | shared_region_.ResetWritableMappings(); |
| 1768 | |
Nicolas Geoffray | a48c3df | 2019-06-27 13:11:12 +0000 | [diff] [blame] | 1769 | if (is_zygote || Runtime::Current()->IsSafeMode()) { |
| 1770 | // Don't create a private region for a child zygote. Regions are usually map shared |
| 1771 | // (to satisfy dual-view), and we don't want children of a child zygote to inherit it. |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 1772 | return; |
| 1773 | } |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 1774 | |
| 1775 | // Reset all statistics to be specific to this process. |
| 1776 | number_of_compilations_ = 0; |
| 1777 | number_of_osr_compilations_ = 0; |
| 1778 | number_of_collections_ = 0; |
Nicolas Geoffray | f2dcba0 | 2019-07-22 13:59:24 +0100 | [diff] [blame] | 1779 | histogram_stack_map_memory_use_.Reset(); |
| 1780 | histogram_code_memory_use_.Reset(); |
| 1781 | histogram_profiling_info_memory_use_.Reset(); |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame] | 1782 | |
| 1783 | size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity(); |
| 1784 | size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity(); |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame] | 1785 | std::string error_msg; |
Nicolas Geoffray | 9c54e18 | 2019-06-18 10:42:52 +0100 | [diff] [blame] | 1786 | if (!private_region_.Initialize(initial_capacity, |
| 1787 | max_capacity, |
| 1788 | /* rwx_memory_allowed= */ !is_system_server, |
| 1789 | is_zygote, |
| 1790 | &error_msg)) { |
| 1791 | LOG(WARNING) << "Could not create private region after zygote fork: " << error_msg; |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame] | 1792 | } |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame] | 1793 | } |
| 1794 | |
Nicolas Geoffray | a48c3df | 2019-06-27 13:11:12 +0000 | [diff] [blame] | 1795 | JitMemoryRegion* JitCodeCache::GetCurrentRegion() { |
| 1796 | return Runtime::Current()->IsZygote() ? &shared_region_ : &private_region_; |
| 1797 | } |
| 1798 | |
Nicolas Geoffray | e32d24c | 2019-07-05 10:28:59 +0100 | [diff] [blame] | 1799 | void ZygoteMap::Initialize(uint32_t number_of_methods) { |
| 1800 | MutexLock mu(Thread::Current(), *Locks::jit_lock_); |
| 1801 | // Allocate for 40-80% capacity. This will offer OK lookup times, and termination |
| 1802 | // cases. |
| 1803 | size_t capacity = RoundUpToPowerOfTwo(number_of_methods * 100 / 80); |
| 1804 | Entry* data = reinterpret_cast<Entry*>(region_->AllocateData(capacity * sizeof(Entry))); |
| 1805 | if (data != nullptr) { |
| 1806 | region_->FillData(data, capacity, Entry { nullptr, nullptr }); |
| 1807 | map_ = ArrayRef(data, capacity); |
| 1808 | } |
| 1809 | } |
| 1810 | |
| 1811 | const void* ZygoteMap::GetCodeFor(ArtMethod* method, uintptr_t pc) const { |
| 1812 | if (map_.empty()) { |
| 1813 | return nullptr; |
| 1814 | } |
| 1815 | |
| 1816 | if (method == nullptr) { |
| 1817 | // Do a linear search. This should only be used in debug builds. |
| 1818 | CHECK(kIsDebugBuild); |
| 1819 | for (const Entry& entry : map_) { |
| 1820 | const void* code_ptr = entry.code_ptr; |
| 1821 | if (code_ptr != nullptr) { |
| 1822 | OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1823 | if (method_header->Contains(pc)) { |
| 1824 | return code_ptr; |
| 1825 | } |
| 1826 | } |
| 1827 | } |
| 1828 | return nullptr; |
| 1829 | } |
| 1830 | |
| 1831 | std::hash<ArtMethod*> hf; |
| 1832 | size_t index = hf(method) & (map_.size() - 1u); |
| 1833 | size_t original_index = index; |
| 1834 | // Loop over the array: we know this loop terminates as we will either |
| 1835 | // encounter the given method, or a null entry. Both terminate the loop. |
| 1836 | // Note that the zygote may concurrently write new entries to the map. That's OK as the |
| 1837 | // map is never resized. |
| 1838 | while (true) { |
| 1839 | const Entry& entry = map_[index]; |
| 1840 | if (entry.method == nullptr) { |
| 1841 | // Not compiled yet. |
| 1842 | return nullptr; |
| 1843 | } |
| 1844 | if (entry.method == method) { |
| 1845 | if (entry.code_ptr == nullptr) { |
| 1846 | // This is a race with the zygote which wrote the method, but hasn't written the |
| 1847 | // code. Just bail and wait for the next time we need the method. |
| 1848 | return nullptr; |
| 1849 | } |
| 1850 | if (pc != 0 && !OatQuickMethodHeader::FromCodePointer(entry.code_ptr)->Contains(pc)) { |
| 1851 | return nullptr; |
| 1852 | } |
| 1853 | return entry.code_ptr; |
| 1854 | } |
| 1855 | index = (index + 1) & (map_.size() - 1); |
| 1856 | DCHECK_NE(original_index, index); |
| 1857 | } |
| 1858 | } |
| 1859 | |
| 1860 | void ZygoteMap::Put(const void* code, ArtMethod* method) { |
| 1861 | if (map_.empty()) { |
| 1862 | return; |
| 1863 | } |
| 1864 | CHECK(Runtime::Current()->IsZygote()); |
| 1865 | std::hash<ArtMethod*> hf; |
| 1866 | size_t index = hf(method) & (map_.size() - 1); |
| 1867 | size_t original_index = index; |
| 1868 | // Because the size of the map is bigger than the number of methods that will |
| 1869 | // be added, we are guaranteed to find a free slot in the array, and |
| 1870 | // therefore for this loop to terminate. |
| 1871 | while (true) { |
| 1872 | Entry* entry = &map_[index]; |
| 1873 | if (entry->method == nullptr) { |
| 1874 | // Note that readers can read this memory concurrently, but that's OK as |
| 1875 | // we are writing pointers. |
| 1876 | region_->WriteData(entry, Entry { method, code }); |
| 1877 | break; |
| 1878 | } |
| 1879 | index = (index + 1) & (map_.size() - 1); |
| 1880 | DCHECK_NE(original_index, index); |
| 1881 | } |
| 1882 | DCHECK_EQ(GetCodeFor(method), code); |
| 1883 | } |
| 1884 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1885 | } // namespace jit |
| 1886 | } // namespace art |