Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "jit_code_cache.h" |
| 18 | |
| 19 | #include <sstream> |
| 20 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 21 | #include "android-base/unique_fd.h" |
| 22 | |
Andreas Gampe | 5629d2d | 2017-05-15 16:28:13 -0700 | [diff] [blame] | 23 | #include "arch/context.h" |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 24 | #include "art_method-inl.h" |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 25 | #include "base/enums.h" |
Andreas Gampe | f0f3c59 | 2018-06-26 13:28:00 -0700 | [diff] [blame] | 26 | #include "base/histogram-inl.h" |
Andreas Gampe | 170331f | 2017-12-07 18:41:03 -0800 | [diff] [blame] | 27 | #include "base/logging.h" // For VLOG. |
Orion Hodson | 563ada2 | 2018-09-04 11:28:31 +0100 | [diff] [blame] | 28 | #include "base/membarrier.h" |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 29 | #include "base/memfd.h" |
David Sehr | 79e2607 | 2018-04-06 17:58:50 -0700 | [diff] [blame] | 30 | #include "base/mem_map.h" |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 31 | #include "base/quasi_atomic.h" |
Calin Juravle | 66f5523 | 2015-12-08 15:09:10 +0000 | [diff] [blame] | 32 | #include "base/stl_util.h" |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 33 | #include "base/systrace.h" |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 34 | #include "base/time_utils.h" |
Orion Hodson | f233136 | 2018-07-11 15:14:10 +0100 | [diff] [blame] | 35 | #include "base/utils.h" |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 36 | #include "cha.h" |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 37 | #include "debugger_interface.h" |
David Sehr | 9e734c7 | 2018-01-04 17:56:19 -0800 | [diff] [blame] | 38 | #include "dex/dex_file_loader.h" |
Andreas Gampe | f0f3c59 | 2018-06-26 13:28:00 -0700 | [diff] [blame] | 39 | #include "dex/method_reference.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 40 | #include "entrypoints/runtime_asm_entrypoints.h" |
| 41 | #include "gc/accounting/bitmap-inl.h" |
Nicolas Geoffray | cf48fa0 | 2016-07-30 22:49:11 +0100 | [diff] [blame] | 42 | #include "gc/scoped_gc_critical_section.h" |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 43 | #include "handle.h" |
Andreas Gampe | f0f3c59 | 2018-06-26 13:28:00 -0700 | [diff] [blame] | 44 | #include "instrumentation.h" |
Andreas Gampe | b2d18fa | 2017-06-06 20:46:10 -0700 | [diff] [blame] | 45 | #include "intern_table.h" |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 46 | #include "jit/jit.h" |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 47 | #include "jit/profiling_info.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 48 | #include "linear_alloc.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 49 | #include "oat_file-inl.h" |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 50 | #include "oat_quick_method_header.h" |
Andreas Gampe | 5d08fcc | 2017-06-05 17:56:46 -0700 | [diff] [blame] | 51 | #include "object_callbacks.h" |
David Sehr | 82d046e | 2018-04-23 08:14:19 -0700 | [diff] [blame] | 52 | #include "profile/profile_compilation_info.h" |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 53 | #include "scoped_thread_state_change-inl.h" |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 54 | #include "stack.h" |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 55 | #include "thread-current-inl.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 56 | #include "thread_list.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 57 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 58 | using android::base::unique_fd; |
| 59 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 60 | namespace art { |
| 61 | namespace jit { |
| 62 | |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 63 | static constexpr size_t kCodeSizeLogThreshold = 50 * KB; |
| 64 | static constexpr size_t kStackMapSizeLogThreshold = 50 * KB; |
| 65 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 66 | static constexpr int kProtR = PROT_READ; |
| 67 | static constexpr int kProtRW = PROT_READ | PROT_WRITE; |
| 68 | static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC; |
| 69 | static constexpr int kProtRX = PROT_READ | PROT_EXEC; |
| 70 | |
| 71 | namespace { |
| 72 | |
| 73 | // Translate an address belonging to one memory map into an address in a second. This is useful |
| 74 | // when there are two virtual memory ranges for the same physical memory range. |
| 75 | template <typename T> |
| 76 | T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) { |
| 77 | CHECK(src.HasAddress(src_ptr)); |
| 78 | uint8_t* const raw_src_ptr = reinterpret_cast<uint8_t*>(src_ptr); |
| 79 | return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin()); |
| 80 | } |
| 81 | |
| 82 | } // namespace |
| 83 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 84 | class JitCodeCache::JniStubKey { |
| 85 | public: |
| 86 | explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) |
| 87 | : shorty_(method->GetShorty()), |
| 88 | is_static_(method->IsStatic()), |
| 89 | is_fast_native_(method->IsFastNative()), |
| 90 | is_critical_native_(method->IsCriticalNative()), |
| 91 | is_synchronized_(method->IsSynchronized()) { |
| 92 | DCHECK(!(is_fast_native_ && is_critical_native_)); |
| 93 | } |
| 94 | |
| 95 | bool operator<(const JniStubKey& rhs) const { |
| 96 | if (is_static_ != rhs.is_static_) { |
| 97 | return rhs.is_static_; |
| 98 | } |
| 99 | if (is_synchronized_ != rhs.is_synchronized_) { |
| 100 | return rhs.is_synchronized_; |
| 101 | } |
| 102 | if (is_fast_native_ != rhs.is_fast_native_) { |
| 103 | return rhs.is_fast_native_; |
| 104 | } |
| 105 | if (is_critical_native_ != rhs.is_critical_native_) { |
| 106 | return rhs.is_critical_native_; |
| 107 | } |
| 108 | return strcmp(shorty_, rhs.shorty_) < 0; |
| 109 | } |
| 110 | |
| 111 | // Update the shorty to point to another method's shorty. Call this function when removing |
| 112 | // the method that references the old shorty from JniCodeData and not removing the entire |
| 113 | // JniCodeData; the old shorty may become a dangling pointer when that method is unloaded. |
| 114 | void UpdateShorty(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) { |
| 115 | const char* shorty = method->GetShorty(); |
| 116 | DCHECK_STREQ(shorty_, shorty); |
| 117 | shorty_ = shorty; |
| 118 | } |
| 119 | |
| 120 | private: |
| 121 | // The shorty points to a DexFile data and may need to change |
| 122 | // to point to the same shorty in a different DexFile. |
| 123 | mutable const char* shorty_; |
| 124 | |
| 125 | const bool is_static_; |
| 126 | const bool is_fast_native_; |
| 127 | const bool is_critical_native_; |
| 128 | const bool is_synchronized_; |
| 129 | }; |
| 130 | |
| 131 | class JitCodeCache::JniStubData { |
| 132 | public: |
| 133 | JniStubData() : code_(nullptr), methods_() {} |
| 134 | |
| 135 | void SetCode(const void* code) { |
| 136 | DCHECK(code != nullptr); |
| 137 | code_ = code; |
| 138 | } |
| 139 | |
| 140 | const void* GetCode() const { |
| 141 | return code_; |
| 142 | } |
| 143 | |
| 144 | bool IsCompiled() const { |
| 145 | return GetCode() != nullptr; |
| 146 | } |
| 147 | |
| 148 | void AddMethod(ArtMethod* method) { |
| 149 | if (!ContainsElement(methods_, method)) { |
| 150 | methods_.push_back(method); |
| 151 | } |
| 152 | } |
| 153 | |
| 154 | const std::vector<ArtMethod*>& GetMethods() const { |
| 155 | return methods_; |
| 156 | } |
| 157 | |
| 158 | void RemoveMethodsIn(const LinearAlloc& alloc) { |
| 159 | auto kept_end = std::remove_if( |
| 160 | methods_.begin(), |
| 161 | methods_.end(), |
| 162 | [&alloc](ArtMethod* method) { return alloc.ContainsUnsafe(method); }); |
| 163 | methods_.erase(kept_end, methods_.end()); |
| 164 | } |
| 165 | |
| 166 | bool RemoveMethod(ArtMethod* method) { |
| 167 | auto it = std::find(methods_.begin(), methods_.end(), method); |
| 168 | if (it != methods_.end()) { |
| 169 | methods_.erase(it); |
| 170 | return true; |
| 171 | } else { |
| 172 | return false; |
| 173 | } |
| 174 | } |
| 175 | |
| 176 | void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) { |
| 177 | std::replace(methods_.begin(), methods_.end(), old_method, new_method); |
| 178 | } |
| 179 | |
| 180 | private: |
| 181 | const void* code_; |
| 182 | std::vector<ArtMethod*> methods_; |
| 183 | }; |
| 184 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 185 | JitCodeCache* JitCodeCache::Create(size_t initial_capacity, |
| 186 | size_t max_capacity, |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 187 | bool generate_debug_info, |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 188 | bool used_only_for_profile_data, |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 189 | std::string* error_msg) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 190 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 191 | CHECK_GE(max_capacity, initial_capacity); |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 192 | |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 193 | // With 'perf', we want a 1-1 mapping between an address and a method. |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 194 | // We aren't able to keep method pointers live during the instrumentation method entry trampoline |
| 195 | // so we will just disable jit-gc if we are doing that. |
| 196 | bool garbage_collect_code = !generate_debug_info && |
| 197 | !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled(); |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 198 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 199 | // We need to have 32 bit offsets from method headers in code cache which point to things |
| 200 | // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work. |
| 201 | // Ensure we're below 1 GB to be safe. |
| 202 | if (max_capacity > 1 * GB) { |
| 203 | std::ostringstream oss; |
| 204 | oss << "Maxium code cache capacity is limited to 1 GB, " |
| 205 | << PrettySize(max_capacity) << " is too big"; |
| 206 | *error_msg = oss.str(); |
| 207 | return nullptr; |
| 208 | } |
| 209 | |
Orion Hodson | 563ada2 | 2018-09-04 11:28:31 +0100 | [diff] [blame] | 210 | // Register for membarrier expedited sync core if JIT will be generating code. |
| 211 | if (!used_only_for_profile_data) { |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 212 | if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) { |
| 213 | // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are |
| 214 | // flushed and it's used when adding code to the JIT. The memory used by the new code may |
| 215 | // have just been released and, in theory, the old code could still be in a pipeline. |
| 216 | VLOG(jit) << "Kernel does not support membarrier sync-core"; |
| 217 | } |
Orion Hodson | 563ada2 | 2018-09-04 11:28:31 +0100 | [diff] [blame] | 218 | } |
| 219 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 220 | // File descriptor enabling dual-view mapping of code section. |
| 221 | unique_fd mem_fd; |
| 222 | |
| 223 | // Bionic supports memfd_create, but the call may fail on older kernels. |
| 224 | mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags */ 0)); |
| 225 | if (mem_fd.get() < 0) { |
| 226 | VLOG(jit) << "Failed to initialize dual view JIT. memfd_create() error: " |
| 227 | << strerror(errno); |
| 228 | } |
| 229 | |
| 230 | if (mem_fd.get() >= 0 && ftruncate(mem_fd, max_capacity) != 0) { |
| 231 | std::ostringstream oss; |
| 232 | oss << "Failed to initialize memory file: " << strerror(errno); |
| 233 | *error_msg = oss.str(); |
| 234 | return nullptr; |
| 235 | } |
| 236 | |
| 237 | // Data cache will be half of the initial allocation. |
| 238 | // Code cache will be the other half of the initial allocation. |
| 239 | // TODO: Make this variable? |
| 240 | |
| 241 | // Align both capacities to page size, as that's the unit mspaces use. |
| 242 | initial_capacity = RoundDown(initial_capacity, 2 * kPageSize); |
| 243 | max_capacity = RoundDown(max_capacity, 2 * kPageSize); |
| 244 | const size_t data_capacity = max_capacity / 2; |
| 245 | const size_t exec_capacity = used_only_for_profile_data ? 0 : max_capacity - data_capacity; |
| 246 | DCHECK_LE(data_capacity + exec_capacity, max_capacity); |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 247 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 248 | std::string error_str; |
| 249 | // Map name specific for android_os_Debug.cpp accounting. |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 250 | // Map in low 4gb to simplify accessing root tables for x86_64. |
| 251 | // We could do PC-relative addressing to avoid this problem, but that |
| 252 | // would require reserving code and data area before submitting, which |
| 253 | // means more windows for the code memory to be RWX. |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 254 | int base_flags; |
| 255 | MemMap data_pages; |
| 256 | if (mem_fd.get() >= 0) { |
| 257 | // Dual view of JIT code cache case. Create an initial mapping of data pages large enough |
| 258 | // for data and non-writable view of JIT code pages. We use the memory file descriptor to |
| 259 | // enable dual mapping - we'll create a second mapping using the descriptor below. The |
| 260 | // mappings will look like: |
| 261 | // |
| 262 | // VA PA |
| 263 | // |
| 264 | // +---------------+ |
| 265 | // | non exec code |\ |
| 266 | // +---------------+ \ |
| 267 | // : :\ \ |
| 268 | // +---------------+.\.+---------------+ |
| 269 | // | exec code | \| code | |
| 270 | // +---------------+...+---------------+ |
| 271 | // | data | | data | |
| 272 | // +---------------+...+---------------+ |
| 273 | // |
| 274 | // In this configuration code updates are written to the non-executable view of the code |
| 275 | // cache, and the executable view of the code cache has fixed RX memory protections. |
| 276 | // |
| 277 | // This memory needs to be mapped shared as the code portions will have two mappings. |
| 278 | base_flags = MAP_SHARED; |
| 279 | data_pages = MemMap::MapFile( |
| 280 | data_capacity + exec_capacity, |
| 281 | kProtRW, |
| 282 | base_flags, |
| 283 | mem_fd, |
| 284 | /* start */ 0, |
| 285 | /* low_4gb */ true, |
| 286 | "data-code-cache", |
| 287 | &error_str); |
| 288 | } else { |
| 289 | // Single view of JIT code cache case. Create an initial mapping of data pages large enough |
| 290 | // for data and JIT code pages. The mappings will look like: |
| 291 | // |
| 292 | // VA PA |
| 293 | // |
| 294 | // +---------------+...+---------------+ |
| 295 | // | exec code | | code | |
| 296 | // +---------------+...+---------------+ |
| 297 | // | data | | data | |
| 298 | // +---------------+...+---------------+ |
| 299 | // |
| 300 | // In this configuration code updates are written to the executable view of the code cache, |
| 301 | // and the executable view of the code cache transitions RX to RWX for the update and then |
| 302 | // back to RX after the update. |
| 303 | base_flags = MAP_PRIVATE | MAP_ANON; |
| 304 | data_pages = MemMap::MapAnonymous( |
| 305 | "data-code-cache", |
| 306 | /* addr */ nullptr, |
| 307 | data_capacity + exec_capacity, |
| 308 | kProtRW, |
| 309 | /* low_4gb */ true, |
| 310 | /* reuse */ false, |
| 311 | /* reservation */ nullptr, |
| 312 | &error_str); |
| 313 | } |
| 314 | |
| 315 | if (!data_pages.IsValid()) { |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 316 | std::ostringstream oss; |
Andreas Gampe | e4deaf3 | 2017-06-09 15:27:15 -0700 | [diff] [blame] | 317 | oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 318 | *error_msg = oss.str(); |
| 319 | return nullptr; |
| 320 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 321 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 322 | MemMap exec_pages; |
| 323 | MemMap non_exec_pages; |
| 324 | if (exec_capacity > 0) { |
| 325 | uint8_t* const divider = data_pages.Begin() + data_capacity; |
| 326 | // Set initial permission for executable view to catch any SELinux permission problems early |
| 327 | // (for processes that cannot map WX pages). Otherwise, this region does not need to be |
| 328 | // executable as there is no code in the cache yet. |
| 329 | exec_pages = data_pages.RemapAtEnd(divider, |
| 330 | "jit-code-cache", |
| 331 | kProtRX, |
| 332 | base_flags | MAP_FIXED, |
| 333 | mem_fd.get(), |
| 334 | (mem_fd.get() >= 0) ? data_capacity : 0, |
| 335 | &error_str); |
| 336 | if (!exec_pages.IsValid()) { |
| 337 | std::ostringstream oss; |
| 338 | oss << "Failed to create read execute code cache: " << error_str << " size=" << max_capacity; |
| 339 | *error_msg = oss.str(); |
| 340 | return nullptr; |
| 341 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 342 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 343 | if (mem_fd.get() >= 0) { |
| 344 | // For dual view, create the secondary view of code memory used for updating code. This view |
| 345 | // is never executable. |
| 346 | non_exec_pages = MemMap::MapFile(exec_capacity, |
| 347 | kProtR, |
| 348 | base_flags, |
| 349 | mem_fd, |
| 350 | /* start */ data_capacity, |
| 351 | /* low_4GB */ false, |
| 352 | "jit-code-cache-rw", |
| 353 | &error_str); |
| 354 | if (!non_exec_pages.IsValid()) { |
| 355 | // Log and continue as single view JIT. |
| 356 | VLOG(jit) << "Failed to map non-executable view of JIT code cache"; |
| 357 | } |
| 358 | } |
| 359 | } else { |
| 360 | // Profiling only. No memory for code required. |
| 361 | DCHECK(used_only_for_profile_data); |
David Sehr | d1dbb74 | 2017-07-17 11:20:38 -0700 | [diff] [blame] | 362 | } |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 363 | |
| 364 | const size_t initial_data_capacity = initial_capacity / 2; |
| 365 | const size_t initial_exec_capacity = |
| 366 | (exec_capacity == 0) ? 0 : (initial_capacity - initial_data_capacity); |
| 367 | |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 368 | return new JitCodeCache( |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 369 | std::move(data_pages), |
| 370 | std::move(exec_pages), |
| 371 | std::move(non_exec_pages), |
| 372 | initial_data_capacity, |
| 373 | initial_exec_capacity, |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 374 | max_capacity, |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 375 | garbage_collect_code); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 376 | } |
| 377 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 378 | JitCodeCache::JitCodeCache(MemMap&& data_pages, |
| 379 | MemMap&& exec_pages, |
| 380 | MemMap&& non_exec_pages, |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 381 | size_t initial_data_capacity, |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 382 | size_t initial_exec_capacity, |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 383 | size_t max_capacity, |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 384 | bool garbage_collect_code) |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 385 | : lock_("Jit code cache", kJitCodeCacheLock), |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 386 | lock_cond_("Jit code cache condition variable", lock_), |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 387 | collection_in_progress_(false), |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 388 | data_pages_(std::move(data_pages)), |
| 389 | exec_pages_(std::move(exec_pages)), |
| 390 | non_exec_pages_(std::move(non_exec_pages)), |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 391 | max_capacity_(max_capacity), |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 392 | current_capacity_(initial_exec_capacity + initial_data_capacity), |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 393 | data_end_(initial_data_capacity), |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 394 | exec_end_(initial_exec_capacity), |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 395 | last_collection_increased_code_cache_(false), |
Nicolas Geoffray | 0a52223 | 2016-01-19 09:34:58 +0000 | [diff] [blame] | 396 | garbage_collect_code_(garbage_collect_code), |
Nicolas Geoffray | b0d2208 | 2016-02-24 17:18:25 +0000 | [diff] [blame] | 397 | used_memory_for_data_(0), |
| 398 | used_memory_for_code_(0), |
Nicolas Geoffray | fcdd729 | 2016-02-25 13:27:47 +0000 | [diff] [blame] | 399 | number_of_compilations_(0), |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 400 | number_of_osr_compilations_(0), |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 401 | number_of_collections_(0), |
| 402 | histogram_stack_map_memory_use_("Memory used for stack maps", 16), |
| 403 | histogram_code_memory_use_("Memory used for compiled code", 16), |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 404 | histogram_profiling_info_memory_use_("Memory used for profiling info", 16), |
| 405 | is_weak_access_enabled_(true), |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 406 | inline_cache_cond_("Jit inline cache condition variable", lock_) { |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 407 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 408 | DCHECK_GE(max_capacity, initial_exec_capacity + initial_data_capacity); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 409 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 410 | // Initialize the data heap |
| 411 | data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/); |
| 412 | CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed"; |
| 413 | |
| 414 | // Initialize the code heap |
| 415 | MemMap* code_heap = nullptr; |
| 416 | if (non_exec_pages_.IsValid()) { |
| 417 | code_heap = &non_exec_pages_; |
| 418 | } else if (exec_pages_.IsValid()) { |
| 419 | code_heap = &exec_pages_; |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 420 | } |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 421 | if (code_heap != nullptr) { |
| 422 | // Make all pages reserved for the code heap writable. The mspace allocator, that manages the |
| 423 | // heap, will take and initialize pages in create_mspace_with_base(). |
| 424 | CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW); |
| 425 | exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/); |
| 426 | CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed"; |
| 427 | SetFootprintLimit(current_capacity_); |
| 428 | // Protect pages containing heap metadata. Updates to the code heap toggle write permission to |
| 429 | // perform the update and there are no other times write access is required. |
| 430 | CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR); |
| 431 | } else { |
| 432 | exec_mspace_ = nullptr; |
| 433 | SetFootprintLimit(current_capacity_); |
| 434 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 435 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 436 | VLOG(jit) << "Created jit code cache: initial data size=" |
| 437 | << PrettySize(initial_data_capacity) |
| 438 | << ", initial code size=" |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 439 | << PrettySize(initial_exec_capacity); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 440 | } |
| 441 | |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 442 | JitCodeCache::~JitCodeCache() {} |
| 443 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 444 | bool JitCodeCache::ContainsPc(const void* ptr) const { |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 445 | return exec_pages_.Begin() <= ptr && ptr < exec_pages_.End(); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 446 | } |
| 447 | |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 448 | bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) { |
| 449 | ScopedObjectAccess soa(art::Thread::Current()); |
| 450 | ScopedAssertNoThreadSuspension sants(__FUNCTION__); |
| 451 | if (ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { |
| 452 | return true; |
| 453 | } else if (method->GetEntryPointFromQuickCompiledCode() == GetQuickInstrumentationEntryPoint()) { |
| 454 | return FindCompiledCodeForInstrumentation(method) != nullptr; |
| 455 | } |
| 456 | return false; |
| 457 | } |
| 458 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 459 | bool JitCodeCache::ContainsMethod(ArtMethod* method) { |
| 460 | MutexLock mu(Thread::Current(), lock_); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 461 | if (UNLIKELY(method->IsNative())) { |
| 462 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 463 | if (it != jni_stubs_map_.end() && |
| 464 | it->second.IsCompiled() && |
| 465 | ContainsElement(it->second.GetMethods(), method)) { |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 466 | return true; |
| 467 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 468 | } else { |
| 469 | for (const auto& it : method_code_map_) { |
| 470 | if (it.second == method) { |
| 471 | return true; |
| 472 | } |
| 473 | } |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 474 | } |
| 475 | return false; |
| 476 | } |
| 477 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 478 | const void* JitCodeCache::GetJniStubCode(ArtMethod* method) { |
| 479 | DCHECK(method->IsNative()); |
| 480 | MutexLock mu(Thread::Current(), lock_); |
| 481 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 482 | if (it != jni_stubs_map_.end()) { |
| 483 | JniStubData& data = it->second; |
| 484 | if (data.IsCompiled() && ContainsElement(data.GetMethods(), method)) { |
| 485 | return data.GetCode(); |
| 486 | } |
| 487 | } |
| 488 | return nullptr; |
| 489 | } |
| 490 | |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 491 | const void* JitCodeCache::FindCompiledCodeForInstrumentation(ArtMethod* method) { |
Alex Light | 839f53a | 2018-07-10 15:46:14 -0700 | [diff] [blame] | 492 | // If jit-gc is still on we use the SavedEntryPoint field for doing that and so cannot use it to |
| 493 | // find the instrumentation entrypoint. |
| 494 | if (LIKELY(GetGarbageCollectCode())) { |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 495 | return nullptr; |
| 496 | } |
| 497 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
| 498 | if (info == nullptr) { |
| 499 | return nullptr; |
| 500 | } |
| 501 | // When GC is disabled for trampoline tracing we will use SavedEntrypoint to hold the actual |
| 502 | // jit-compiled version of the method. If jit-gc is disabled for other reasons this will just be |
| 503 | // nullptr. |
| 504 | return info->GetSavedEntryPoint(); |
| 505 | } |
| 506 | |
Mathieu Chartier | 33fbf37 | 2016-03-07 13:48:08 -0800 | [diff] [blame] | 507 | class ScopedCodeCacheWrite : ScopedTrace { |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 508 | public: |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 509 | explicit ScopedCodeCacheWrite(const JitCodeCache* const code_cache) |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 510 | : ScopedTrace("ScopedCodeCacheWrite"), |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 511 | code_cache_(code_cache) { |
Mathieu Chartier | 33fbf37 | 2016-03-07 13:48:08 -0800 | [diff] [blame] | 512 | ScopedTrace trace("mprotect all"); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 513 | const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping(); |
| 514 | if (updatable_pages != nullptr) { |
| 515 | int prot = code_cache_->HasDualCodeMapping() ? kProtRW : kProtRWX; |
| 516 | CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot); |
| 517 | } |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 518 | } |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 519 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 520 | ~ScopedCodeCacheWrite() { |
Mathieu Chartier | 33fbf37 | 2016-03-07 13:48:08 -0800 | [diff] [blame] | 521 | ScopedTrace trace("mprotect code"); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 522 | const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping(); |
| 523 | if (updatable_pages != nullptr) { |
| 524 | int prot = code_cache_->HasDualCodeMapping() ? kProtR : kProtRX; |
| 525 | CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot); |
| 526 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 527 | } |
Mathieu Chartier | 8d8de0c | 2017-10-04 09:35:30 -0700 | [diff] [blame] | 528 | |
David Sehr | d1dbb74 | 2017-07-17 11:20:38 -0700 | [diff] [blame] | 529 | private: |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 530 | const JitCodeCache* const code_cache_; |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 531 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 532 | DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite); |
| 533 | }; |
| 534 | |
| 535 | uint8_t* JitCodeCache::CommitCode(Thread* self, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 536 | ArtMethod* method, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 537 | uint8_t* stack_map, |
| 538 | uint8_t* roots_data, |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 539 | const uint8_t* code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 540 | size_t code_size, |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 541 | size_t data_size, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 542 | bool osr, |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 543 | const std::vector<Handle<mirror::Object>>& roots, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 544 | bool has_should_deoptimize_flag, |
| 545 | const ArenaSet<ArtMethod*>& cha_single_implementation_list) { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 546 | uint8_t* result = CommitCodeInternal(self, |
| 547 | method, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 548 | stack_map, |
| 549 | roots_data, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 550 | code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 551 | code_size, |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 552 | data_size, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 553 | osr, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 554 | roots, |
| 555 | has_should_deoptimize_flag, |
| 556 | cha_single_implementation_list); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 557 | if (result == nullptr) { |
| 558 | // Retry. |
| 559 | GarbageCollectCache(self); |
| 560 | result = CommitCodeInternal(self, |
| 561 | method, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 562 | stack_map, |
| 563 | roots_data, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 564 | code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 565 | code_size, |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 566 | data_size, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 567 | osr, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 568 | roots, |
| 569 | has_should_deoptimize_flag, |
| 570 | cha_single_implementation_list); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 571 | } |
| 572 | return result; |
| 573 | } |
| 574 | |
| 575 | bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) { |
| 576 | bool in_collection = false; |
| 577 | while (collection_in_progress_) { |
| 578 | in_collection = true; |
| 579 | lock_cond_.Wait(self); |
| 580 | } |
| 581 | return in_collection; |
| 582 | } |
| 583 | |
| 584 | static uintptr_t FromCodeToAllocation(const void* code) { |
| 585 | size_t alignment = GetInstructionSetAlignment(kRuntimeISA); |
| 586 | return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment); |
| 587 | } |
| 588 | |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 589 | static uint32_t ComputeRootTableSize(uint32_t number_of_roots) { |
| 590 | return sizeof(uint32_t) + number_of_roots * sizeof(GcRoot<mirror::Object>); |
| 591 | } |
| 592 | |
| 593 | static uint32_t GetNumberOfRoots(const uint8_t* stack_map) { |
| 594 | // The length of the table is stored just before the stack map (and therefore at the end of |
| 595 | // the table itself), in order to be able to fetch it from a `stack_map` pointer. |
| 596 | return reinterpret_cast<const uint32_t*>(stack_map)[-1]; |
| 597 | } |
| 598 | |
Mathieu Chartier | 7a704be | 2016-11-22 13:24:40 -0800 | [diff] [blame] | 599 | static void FillRootTableLength(uint8_t* roots_data, uint32_t length) { |
| 600 | // Store the length of the table at the end. This will allow fetching it from a `stack_map` |
| 601 | // pointer. |
| 602 | reinterpret_cast<uint32_t*>(roots_data)[length] = length; |
| 603 | } |
| 604 | |
Nicolas Geoffray | f4b9442 | 2016-12-05 00:10:09 +0000 | [diff] [blame] | 605 | static const uint8_t* FromStackMapToRoots(const uint8_t* stack_map_data) { |
| 606 | return stack_map_data - ComputeRootTableSize(GetNumberOfRoots(stack_map_data)); |
| 607 | } |
| 608 | |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 609 | static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots) |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 610 | REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) { |
| 611 | if (!kIsDebugBuild) { |
| 612 | return; |
| 613 | } |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 614 | // Put all roots in `roots_data`. |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 615 | for (Handle<mirror::Object> object : roots) { |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 616 | // Ensure the string is strongly interned. b/32995596 |
| 617 | if (object->IsString()) { |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 618 | ObjPtr<mirror::String> str = object->AsString(); |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 619 | ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); |
| 620 | CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr); |
| 621 | } |
| 622 | } |
| 623 | } |
| 624 | |
| 625 | void JitCodeCache::FillRootTable(uint8_t* roots_data, |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 626 | const std::vector<Handle<mirror::Object>>& roots) { |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 627 | GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data); |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 628 | const uint32_t length = roots.size(); |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 629 | // Put all roots in `roots_data`. |
| 630 | for (uint32_t i = 0; i < length; ++i) { |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 631 | ObjPtr<mirror::Object> object = roots[i].Get(); |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 632 | gc_roots[i] = GcRoot<mirror::Object>(object); |
| 633 | } |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 634 | } |
| 635 | |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 636 | static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) { |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 637 | OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 638 | uint8_t* data = method_header->GetOptimizedCodeInfoPtr(); |
| 639 | uint32_t roots = GetNumberOfRoots(data); |
| 640 | if (number_of_roots != nullptr) { |
| 641 | *number_of_roots = roots; |
| 642 | } |
| 643 | return data - ComputeRootTableSize(roots); |
| 644 | } |
| 645 | |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 646 | // Use a sentinel for marking entries in the JIT table that have been cleared. |
| 647 | // This helps diagnosing in case the compiled code tries to wrongly access such |
| 648 | // entries. |
Andreas Gampe | 5629d2d | 2017-05-15 16:28:13 -0700 | [diff] [blame] | 649 | static mirror::Class* const weak_sentinel = |
| 650 | reinterpret_cast<mirror::Class*>(Context::kBadGprBase + 0xff); |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 651 | |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 652 | // Helper for the GC to process a weak class in a JIT root table. |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 653 | static inline void ProcessWeakClass(GcRoot<mirror::Class>* root_ptr, |
| 654 | IsMarkedVisitor* visitor, |
| 655 | mirror::Class* update) |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 656 | REQUIRES_SHARED(Locks::mutator_lock_) { |
| 657 | // This does not need a read barrier because this is called by GC. |
| 658 | mirror::Class* cls = root_ptr->Read<kWithoutReadBarrier>(); |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 659 | if (cls != nullptr && cls != weak_sentinel) { |
Mathieu Chartier | d7a7f2f | 2018-09-07 11:57:18 -0700 | [diff] [blame] | 660 | DCHECK((cls->IsClass<kDefaultVerifyFlags>())); |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 661 | // Look at the classloader of the class to know if it has been unloaded. |
| 662 | // This does not need a read barrier because this is called by GC. |
| 663 | mirror::Object* class_loader = |
| 664 | cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>(); |
| 665 | if (class_loader == nullptr || visitor->IsMarked(class_loader) != nullptr) { |
| 666 | // The class loader is live, update the entry if the class has moved. |
| 667 | mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls)); |
| 668 | // Note that new_object can be null for CMS and newly allocated objects. |
| 669 | if (new_cls != nullptr && new_cls != cls) { |
| 670 | *root_ptr = GcRoot<mirror::Class>(new_cls); |
| 671 | } |
| 672 | } else { |
| 673 | // The class loader is not live, clear the entry. |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 674 | *root_ptr = GcRoot<mirror::Class>(update); |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 675 | } |
| 676 | } |
| 677 | } |
| 678 | |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 679 | void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) { |
| 680 | MutexLock mu(Thread::Current(), lock_); |
| 681 | for (const auto& entry : method_code_map_) { |
| 682 | uint32_t number_of_roots = 0; |
| 683 | uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots); |
| 684 | GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data); |
| 685 | for (uint32_t i = 0; i < number_of_roots; ++i) { |
| 686 | // This does not need a read barrier because this is called by GC. |
| 687 | mirror::Object* object = roots[i].Read<kWithoutReadBarrier>(); |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 688 | if (object == nullptr || object == weak_sentinel) { |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 689 | // entry got deleted in a previous sweep. |
| 690 | } else if (object->IsString<kDefaultVerifyFlags, kWithoutReadBarrier>()) { |
| 691 | mirror::Object* new_object = visitor->IsMarked(object); |
| 692 | // We know the string is marked because it's a strongly-interned string that |
| 693 | // is always alive. The IsMarked implementation of the CMS collector returns |
| 694 | // null for newly allocated objects, but we know those haven't moved. Therefore, |
| 695 | // only update the entry if we get a different non-null string. |
| 696 | // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method |
| 697 | // out of the weak access/creation pause. b/32167580 |
| 698 | if (new_object != nullptr && new_object != object) { |
| 699 | DCHECK(new_object->IsString()); |
| 700 | roots[i] = GcRoot<mirror::Object>(new_object); |
| 701 | } |
| 702 | } else { |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 703 | ProcessWeakClass( |
| 704 | reinterpret_cast<GcRoot<mirror::Class>*>(&roots[i]), visitor, weak_sentinel); |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 705 | } |
| 706 | } |
| 707 | } |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 708 | // Walk over inline caches to clear entries containing unloaded classes. |
| 709 | for (ProfilingInfo* info : profiling_infos_) { |
| 710 | for (size_t i = 0; i < info->number_of_inline_caches_; ++i) { |
| 711 | InlineCache* cache = &info->cache_[i]; |
| 712 | for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) { |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 713 | ProcessWeakClass(&cache->classes_[j], visitor, nullptr); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 714 | } |
| 715 | } |
| 716 | } |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 717 | } |
| 718 | |
Orion Hodson | 607624f | 2018-05-11 10:10:46 +0100 | [diff] [blame] | 719 | void JitCodeCache::FreeCodeAndData(const void* code_ptr) { |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 720 | uintptr_t allocation = FromCodeToAllocation(code_ptr); |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 721 | // Notify native debugger that we are about to remove the code. |
| 722 | // It does nothing if we are not using native debugger. |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 723 | MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 724 | RemoveNativeDebugInfoForJit(code_ptr); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 725 | if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) { |
| 726 | FreeData(GetRootTable(code_ptr)); |
| 727 | } // else this is a JNI stub without any data. |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 728 | |
| 729 | uint8_t* code_allocation = reinterpret_cast<uint8_t*>(allocation); |
| 730 | if (HasDualCodeMapping()) { |
| 731 | code_allocation = TranslateAddress(code_allocation, exec_pages_, non_exec_pages_); |
| 732 | } |
| 733 | |
| 734 | FreeCode(code_allocation); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 735 | } |
| 736 | |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 737 | void JitCodeCache::FreeAllMethodHeaders( |
| 738 | const std::unordered_set<OatQuickMethodHeader*>& method_headers) { |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 739 | // We need to remove entries in method_headers from CHA dependencies |
| 740 | // first since once we do FreeCode() below, the memory can be reused |
| 741 | // so it's possible for the same method_header to start representing |
| 742 | // different compile code. |
| 743 | MutexLock mu(Thread::Current(), lock_); |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 744 | { |
| 745 | MutexLock mu2(Thread::Current(), *Locks::cha_lock_); |
| 746 | Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis() |
| 747 | ->RemoveDependentsWithMethodHeaders(method_headers); |
| 748 | } |
| 749 | |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 750 | ScopedCodeCacheWrite scc(this); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 751 | for (const OatQuickMethodHeader* method_header : method_headers) { |
Orion Hodson | 607624f | 2018-05-11 10:10:46 +0100 | [diff] [blame] | 752 | FreeCodeAndData(method_header->GetCode()); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 753 | } |
| 754 | } |
| 755 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 756 | void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 757 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 758 | // We use a set to first collect all method_headers whose code need to be |
| 759 | // removed. We need to free the underlying code after we remove CHA dependencies |
| 760 | // for entries in this set. And it's more efficient to iterate through |
| 761 | // the CHA dependency map just once with an unordered_set. |
| 762 | std::unordered_set<OatQuickMethodHeader*> method_headers; |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 763 | { |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 764 | MutexLock mu(self, lock_); |
| 765 | // We do not check if a code cache GC is in progress, as this method comes |
| 766 | // with the classlinker_classes_lock_ held, and suspending ourselves could |
| 767 | // lead to a deadlock. |
| 768 | { |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 769 | ScopedCodeCacheWrite scc(this); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 770 | for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) { |
| 771 | it->second.RemoveMethodsIn(alloc); |
| 772 | if (it->second.GetMethods().empty()) { |
| 773 | method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->second.GetCode())); |
| 774 | it = jni_stubs_map_.erase(it); |
| 775 | } else { |
| 776 | it->first.UpdateShorty(it->second.GetMethods().front()); |
| 777 | ++it; |
| 778 | } |
| 779 | } |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 780 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 781 | if (alloc.ContainsUnsafe(it->second)) { |
| 782 | method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first)); |
| 783 | it = method_code_map_.erase(it); |
| 784 | } else { |
| 785 | ++it; |
| 786 | } |
| 787 | } |
| 788 | } |
| 789 | for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) { |
| 790 | if (alloc.ContainsUnsafe(it->first)) { |
| 791 | // Note that the code has already been pushed to method_headers in the loop |
| 792 | // above and is going to be removed in FreeCode() below. |
| 793 | it = osr_code_map_.erase(it); |
| 794 | } else { |
| 795 | ++it; |
| 796 | } |
| 797 | } |
| 798 | for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) { |
| 799 | ProfilingInfo* info = *it; |
| 800 | if (alloc.ContainsUnsafe(info->GetMethod())) { |
| 801 | info->GetMethod()->SetProfilingInfo(nullptr); |
| 802 | FreeData(reinterpret_cast<uint8_t*>(info)); |
| 803 | it = profiling_infos_.erase(it); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 804 | } else { |
| 805 | ++it; |
| 806 | } |
| 807 | } |
| 808 | } |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 809 | FreeAllMethodHeaders(method_headers); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 810 | } |
| 811 | |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 812 | bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const { |
| 813 | return kUseReadBarrier |
| 814 | ? self->GetWeakRefAccessEnabled() |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 815 | : is_weak_access_enabled_.load(std::memory_order_seq_cst); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 816 | } |
| 817 | |
| 818 | void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) { |
| 819 | if (IsWeakAccessEnabled(self)) { |
| 820 | return; |
| 821 | } |
| 822 | ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 823 | MutexLock mu(self, lock_); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 824 | while (!IsWeakAccessEnabled(self)) { |
| 825 | inline_cache_cond_.Wait(self); |
| 826 | } |
| 827 | } |
| 828 | |
| 829 | void JitCodeCache::BroadcastForInlineCacheAccess() { |
| 830 | Thread* self = Thread::Current(); |
| 831 | MutexLock mu(self, lock_); |
| 832 | inline_cache_cond_.Broadcast(self); |
| 833 | } |
| 834 | |
| 835 | void JitCodeCache::AllowInlineCacheAccess() { |
| 836 | DCHECK(!kUseReadBarrier); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 837 | is_weak_access_enabled_.store(true, std::memory_order_seq_cst); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 838 | BroadcastForInlineCacheAccess(); |
| 839 | } |
| 840 | |
| 841 | void JitCodeCache::DisallowInlineCacheAccess() { |
| 842 | DCHECK(!kUseReadBarrier); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 843 | is_weak_access_enabled_.store(false, std::memory_order_seq_cst); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 844 | } |
| 845 | |
| 846 | void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic, |
| 847 | Handle<mirror::ObjectArray<mirror::Class>> array) { |
| 848 | WaitUntilInlineCacheAccessible(Thread::Current()); |
| 849 | // Note that we don't need to lock `lock_` here, the compiler calling |
| 850 | // this method has already ensured the inline cache will not be deleted. |
| 851 | for (size_t in_cache = 0, in_array = 0; |
| 852 | in_cache < InlineCache::kIndividualCacheSize; |
| 853 | ++in_cache) { |
| 854 | mirror::Class* object = ic.classes_[in_cache].Read(); |
| 855 | if (object != nullptr) { |
| 856 | array->Set(in_array++, object); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 857 | } |
| 858 | } |
| 859 | } |
| 860 | |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 861 | static void ClearMethodCounter(ArtMethod* method, bool was_warm) { |
| 862 | if (was_warm) { |
Vladimir Marko | c945e0d | 2018-07-18 17:26:45 +0100 | [diff] [blame] | 863 | method->SetPreviouslyWarm(); |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 864 | } |
| 865 | // We reset the counter to 1 so that the profile knows that the method was executed at least once. |
| 866 | // This is required for layout purposes. |
Nicolas Geoffray | 88f50b1 | 2017-06-09 16:08:47 +0100 | [diff] [blame] | 867 | // We also need to make sure we'll pass the warmup threshold again, so we set to 0 if |
| 868 | // the warmup threshold is 1. |
| 869 | uint16_t jit_warmup_threshold = Runtime::Current()->GetJITOptions()->GetWarmupThreshold(); |
| 870 | method->SetCounter(std::min(jit_warmup_threshold - 1, 1)); |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 871 | } |
| 872 | |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 873 | void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) { |
| 874 | while (collection_in_progress_) { |
| 875 | lock_.Unlock(self); |
| 876 | { |
| 877 | ScopedThreadSuspension sts(self, kSuspended); |
| 878 | MutexLock mu(self, lock_); |
| 879 | WaitForPotentialCollectionToComplete(self); |
| 880 | } |
| 881 | lock_.Lock(self); |
| 882 | } |
| 883 | } |
| 884 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 885 | const MemMap* JitCodeCache::GetUpdatableCodeMapping() const { |
| 886 | if (HasDualCodeMapping()) { |
| 887 | return &non_exec_pages_; |
| 888 | } else if (HasCodeMapping()) { |
| 889 | return &exec_pages_; |
| 890 | } else { |
| 891 | return nullptr; |
| 892 | } |
| 893 | } |
| 894 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 895 | uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, |
| 896 | ArtMethod* method, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 897 | uint8_t* stack_map, |
| 898 | uint8_t* roots_data, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 899 | const uint8_t* code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 900 | size_t code_size, |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 901 | size_t data_size, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 902 | bool osr, |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 903 | const std::vector<Handle<mirror::Object>>& roots, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 904 | bool has_should_deoptimize_flag, |
| 905 | const ArenaSet<ArtMethod*>& |
| 906 | cha_single_implementation_list) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 907 | DCHECK(!method->IsNative() || !osr); |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 908 | |
| 909 | if (!method->IsNative()) { |
| 910 | // We need to do this before grabbing the lock_ because it needs to be able to see the string |
| 911 | // InternTable. Native methods do not have roots. |
| 912 | DCheckRootsAreValid(roots); |
| 913 | } |
| 914 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 915 | OatQuickMethodHeader* method_header = nullptr; |
Nicolas Geoffray | 1e7de6c | 2015-10-21 12:07:31 +0100 | [diff] [blame] | 916 | uint8_t* code_ptr = nullptr; |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 917 | |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 918 | MutexLock mu(self, lock_); |
| 919 | // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to |
| 920 | // finish. |
| 921 | WaitForPotentialCollectionToCompleteRunnable(self); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 922 | { |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 923 | ScopedCodeCacheWrite scc(this); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 924 | |
| 925 | size_t alignment = GetInstructionSetAlignment(kRuntimeISA); |
| 926 | // Ensure the header ends up at expected instruction alignment. |
| 927 | size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment); |
| 928 | size_t total_size = header_size + code_size; |
| 929 | |
| 930 | // AllocateCode allocates memory in non-executable region for alignment header and code. The |
| 931 | // header size may include alignment padding. |
| 932 | uint8_t* nox_memory = AllocateCode(total_size); |
| 933 | if (nox_memory == nullptr) { |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 934 | return nullptr; |
| 935 | } |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 936 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 937 | // code_ptr points to non-executable code. |
| 938 | code_ptr = nox_memory + header_size; |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 939 | std::copy(code, code + code_size, code_ptr); |
| 940 | method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 941 | |
| 942 | // From here code_ptr points to executable code. |
| 943 | if (HasDualCodeMapping()) { |
| 944 | code_ptr = TranslateAddress(code_ptr, non_exec_pages_, exec_pages_); |
| 945 | } |
| 946 | |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 947 | new (method_header) OatQuickMethodHeader( |
| 948 | (stack_map != nullptr) ? code_ptr - stack_map : 0u, |
| 949 | code_size); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 950 | |
| 951 | DCHECK(!Runtime::Current()->IsAotCompiler()); |
| 952 | if (has_should_deoptimize_flag) { |
| 953 | method_header->SetHasShouldDeoptimizeFlag(); |
| 954 | } |
| 955 | |
| 956 | // Update method_header pointer to executable code region. |
| 957 | if (HasDualCodeMapping()) { |
| 958 | method_header = TranslateAddress(method_header, non_exec_pages_, exec_pages_); |
| 959 | } |
| 960 | |
| 961 | // Both instruction and data caches need flushing to the point of unification where both share |
| 962 | // a common view of memory. Flushing the data cache ensures the dirty cachelines from the |
| 963 | // newly added code are written out to the point of unification. Flushing the instruction |
| 964 | // cache ensures the newly written code will be fetched from the point of unification before |
| 965 | // use. Memory in the code cache is re-cycled as code is added and removed. The flushes |
| 966 | // prevent stale code from residing in the instruction cache. |
| 967 | // |
| 968 | // Caches are flushed before write permission is removed because some ARMv8 Qualcomm kernels |
| 969 | // may trigger a segfault if a page fault occurs when requesting a cache maintenance |
| 970 | // operation. This is a kernel bug that we need to work around until affected devices |
| 971 | // (e.g. Nexus 5X and 6P) stop being supported or their kernels are fixed. |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 972 | // |
| 973 | // For reference, this behavior is caused by this commit: |
| 974 | // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 975 | // |
| 976 | if (HasDualCodeMapping()) { |
| 977 | // Flush the data cache lines associated with the non-executable copy of the code just added. |
| 978 | FlushDataCache(nox_memory, nox_memory + total_size); |
| 979 | } |
| 980 | // FlushInstructionCache() flushes both data and instruction caches lines. The cacheline range |
| 981 | // flushed is for the executable mapping of the code just added. |
Orion Hodson | 38d29fd | 2018-09-07 12:58:37 +0100 | [diff] [blame] | 982 | FlushInstructionCache(code_ptr, code_ptr + code_size); |
Orion Hodson | f233136 | 2018-07-11 15:14:10 +0100 | [diff] [blame] | 983 | |
| 984 | // Ensure CPU instruction pipelines are flushed for all cores. This is necessary for |
| 985 | // correctness as code may still be in instruction pipelines despite the i-cache flush. It is |
| 986 | // not safe to assume that changing permissions with mprotect (RX->RWX->RX) will cause a TLB |
| 987 | // shootdown (incidentally invalidating the CPU pipelines by sending an IPI to all cores to |
| 988 | // notify them of the TLB invalidation). Some architectures, notably ARM and ARM64, have |
| 989 | // hardware support that broadcasts TLB invalidations and so their kernels have no software |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 990 | // based TLB shootdown. The sync-core flavor of membarrier was introduced in Linux 4.16 to |
| 991 | // address this (see mbarrier(2)). The membarrier here will fail on prior kernels and on |
| 992 | // platforms lacking the appropriate support. |
Orion Hodson | 563ada2 | 2018-09-04 11:28:31 +0100 | [diff] [blame] | 993 | art::membarrier(art::MembarrierCommand::kPrivateExpeditedSyncCore); |
Orion Hodson | 38d29fd | 2018-09-07 12:58:37 +0100 | [diff] [blame] | 994 | |
Nicolas Geoffray | 0a52223 | 2016-01-19 09:34:58 +0000 | [diff] [blame] | 995 | number_of_compilations_++; |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 996 | } |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 997 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 998 | // We need to update the entry point in the runnable state for the instrumentation. |
| 999 | { |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 1000 | // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the |
| 1001 | // compiled code is considered invalidated by some class linking, but below we still make the |
| 1002 | // compiled code valid for the method. Need cha_lock_ for checking all single-implementation |
| 1003 | // flags and register dependencies. |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1004 | MutexLock cha_mu(self, *Locks::cha_lock_); |
| 1005 | bool single_impl_still_valid = true; |
| 1006 | for (ArtMethod* single_impl : cha_single_implementation_list) { |
| 1007 | if (!single_impl->HasSingleImplementation()) { |
Jeff Hao | 00286db | 2017-05-30 16:53:07 -0700 | [diff] [blame] | 1008 | // Simply discard the compiled code. Clear the counter so that it may be recompiled later. |
| 1009 | // Hopefully the class hierarchy will be more stable when compilation is retried. |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1010 | single_impl_still_valid = false; |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 1011 | ClearMethodCounter(method, /*was_warm*/ false); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1012 | break; |
| 1013 | } |
| 1014 | } |
| 1015 | |
| 1016 | // Discard the code if any single-implementation assumptions are now invalid. |
| 1017 | if (!single_impl_still_valid) { |
| 1018 | VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions."; |
| 1019 | return nullptr; |
| 1020 | } |
Nicolas Geoffray | 433b79a | 2017-01-30 20:54:45 +0000 | [diff] [blame] | 1021 | DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable()) |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 1022 | << "Should not be using cha on debuggable apps/runs!"; |
| 1023 | |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1024 | for (ArtMethod* single_impl : cha_single_implementation_list) { |
Andreas Gampe | c1ac9ee | 2017-07-24 22:35:49 -0700 | [diff] [blame] | 1025 | Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()->AddDependency( |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1026 | single_impl, method, method_header); |
| 1027 | } |
| 1028 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1029 | if (UNLIKELY(method->IsNative())) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1030 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 1031 | DCHECK(it != jni_stubs_map_.end()) |
| 1032 | << "Entry inserted in NotifyCompilationOf() should be alive."; |
| 1033 | JniStubData* data = &it->second; |
| 1034 | DCHECK(ContainsElement(data->GetMethods(), method)) |
| 1035 | << "Entry inserted in NotifyCompilationOf() should contain this method."; |
| 1036 | data->SetCode(code_ptr); |
| 1037 | instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation(); |
| 1038 | for (ArtMethod* m : data->GetMethods()) { |
| 1039 | instrum->UpdateMethodsCode(m, method_header->GetEntryPoint()); |
| 1040 | } |
Nicolas Geoffray | 480d510 | 2016-04-18 12:09:30 +0100 | [diff] [blame] | 1041 | } else { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1042 | // Fill the root table before updating the entry point. |
| 1043 | DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data); |
| 1044 | DCHECK_LE(roots_data, stack_map); |
| 1045 | FillRootTable(roots_data, roots); |
| 1046 | { |
| 1047 | // Flush data cache, as compiled code references literals in it. |
Orion Hodson | 38d29fd | 2018-09-07 12:58:37 +0100 | [diff] [blame] | 1048 | FlushDataCache(roots_data, roots_data + data_size); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1049 | } |
| 1050 | method_code_map_.Put(code_ptr, method); |
| 1051 | if (osr) { |
| 1052 | number_of_osr_compilations_++; |
| 1053 | osr_code_map_.Put(method, code_ptr); |
| 1054 | } else { |
| 1055 | Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( |
| 1056 | method, method_header->GetEntryPoint()); |
| 1057 | } |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1058 | } |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1059 | VLOG(jit) |
Nicolas Geoffray | 71cd50f | 2016-04-14 15:00:33 +0100 | [diff] [blame] | 1060 | << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 1061 | << ArtMethod::PrettyMethod(method) << "@" << method |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1062 | << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": " |
| 1063 | << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": " |
| 1064 | << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << "," |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1065 | << reinterpret_cast<const void*>(method_header->GetEntryPoint() + |
| 1066 | method_header->GetCodeSize()); |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 1067 | histogram_code_memory_use_.AddValue(code_size); |
| 1068 | if (code_size > kCodeSizeLogThreshold) { |
| 1069 | LOG(INFO) << "JIT allocated " |
| 1070 | << PrettySize(code_size) |
| 1071 | << " for compiled code of " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 1072 | << ArtMethod::PrettyMethod(method); |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 1073 | } |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1074 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 1075 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 1076 | return reinterpret_cast<uint8_t*>(method_header); |
| 1077 | } |
| 1078 | |
| 1079 | size_t JitCodeCache::CodeCacheSize() { |
| 1080 | MutexLock mu(Thread::Current(), lock_); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1081 | return CodeCacheSizeLocked(); |
| 1082 | } |
| 1083 | |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 1084 | bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1085 | // This function is used only for testing and only with non-native methods. |
| 1086 | CHECK(!method->IsNative()); |
| 1087 | |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 1088 | MutexLock mu(Thread::Current(), lock_); |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 1089 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1090 | bool osr = osr_code_map_.find(method) != osr_code_map_.end(); |
| 1091 | bool in_cache = RemoveMethodLocked(method, release_memory); |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 1092 | |
| 1093 | if (!in_cache) { |
| 1094 | return false; |
| 1095 | } |
| 1096 | |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 1097 | method->ClearCounter(); |
| 1098 | Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( |
| 1099 | method, GetQuickToInterpreterBridge()); |
| 1100 | VLOG(jit) |
| 1101 | << "JIT removed (osr=" << std::boolalpha << osr << std::noboolalpha << ") " |
| 1102 | << ArtMethod::PrettyMethod(method) << "@" << method |
| 1103 | << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": " |
| 1104 | << " dcache_size=" << PrettySize(DataCacheSizeLocked()); |
| 1105 | return true; |
| 1106 | } |
| 1107 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1108 | bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) { |
| 1109 | if (LIKELY(!method->IsNative())) { |
| 1110 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
| 1111 | if (info != nullptr) { |
| 1112 | RemoveElement(profiling_infos_, info); |
| 1113 | } |
| 1114 | method->SetProfilingInfo(nullptr); |
| 1115 | } |
| 1116 | |
| 1117 | bool in_cache = false; |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 1118 | ScopedCodeCacheWrite ccw(this); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1119 | if (UNLIKELY(method->IsNative())) { |
| 1120 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 1121 | if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) { |
| 1122 | in_cache = true; |
| 1123 | if (it->second.GetMethods().empty()) { |
| 1124 | if (release_memory) { |
Orion Hodson | 607624f | 2018-05-11 10:10:46 +0100 | [diff] [blame] | 1125 | FreeCodeAndData(it->second.GetCode()); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1126 | } |
| 1127 | jni_stubs_map_.erase(it); |
| 1128 | } else { |
| 1129 | it->first.UpdateShorty(it->second.GetMethods().front()); |
| 1130 | } |
| 1131 | } |
| 1132 | } else { |
| 1133 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 1134 | if (it->second == method) { |
| 1135 | in_cache = true; |
| 1136 | if (release_memory) { |
Orion Hodson | 607624f | 2018-05-11 10:10:46 +0100 | [diff] [blame] | 1137 | FreeCodeAndData(it->first); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1138 | } |
| 1139 | it = method_code_map_.erase(it); |
| 1140 | } else { |
| 1141 | ++it; |
| 1142 | } |
| 1143 | } |
| 1144 | |
| 1145 | auto osr_it = osr_code_map_.find(method); |
| 1146 | if (osr_it != osr_code_map_.end()) { |
| 1147 | osr_code_map_.erase(osr_it); |
| 1148 | } |
| 1149 | } |
| 1150 | |
| 1151 | return in_cache; |
| 1152 | } |
| 1153 | |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 1154 | // This notifies the code cache that the given method has been redefined and that it should remove |
| 1155 | // any cached information it has on the method. All threads must be suspended before calling this |
| 1156 | // method. The compiled code for the method (if there is any) must not be in any threads call stack. |
| 1157 | void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) { |
| 1158 | MutexLock mu(Thread::Current(), lock_); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1159 | RemoveMethodLocked(method, /* release_memory */ true); |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 1160 | } |
| 1161 | |
| 1162 | // This invalidates old_method. Once this function returns one can no longer use old_method to |
| 1163 | // execute code unless it is fixed up. This fixup will happen later in the process of installing a |
| 1164 | // class redefinition. |
| 1165 | // TODO We should add some info to ArtMethod to note that 'old_method' has been invalidated and |
| 1166 | // shouldn't be used since it is no longer logically in the jit code cache. |
| 1167 | // TODO We should add DCHECKS that validate that the JIT is paused when this method is entered. |
| 1168 | void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1169 | MutexLock mu(Thread::Current(), lock_); |
Alex Light | eee0bd4 | 2017-02-14 15:31:45 +0000 | [diff] [blame] | 1170 | if (old_method->IsNative()) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1171 | // Update methods in jni_stubs_map_. |
| 1172 | for (auto& entry : jni_stubs_map_) { |
| 1173 | JniStubData& data = entry.second; |
| 1174 | data.MoveObsoleteMethod(old_method, new_method); |
| 1175 | } |
Alex Light | eee0bd4 | 2017-02-14 15:31:45 +0000 | [diff] [blame] | 1176 | return; |
| 1177 | } |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 1178 | // Update ProfilingInfo to the new one and remove it from the old_method. |
| 1179 | if (old_method->GetProfilingInfo(kRuntimePointerSize) != nullptr) { |
| 1180 | DCHECK_EQ(old_method->GetProfilingInfo(kRuntimePointerSize)->GetMethod(), old_method); |
| 1181 | ProfilingInfo* info = old_method->GetProfilingInfo(kRuntimePointerSize); |
| 1182 | old_method->SetProfilingInfo(nullptr); |
| 1183 | // Since the JIT should be paused and all threads suspended by the time this is called these |
| 1184 | // checks should always pass. |
| 1185 | DCHECK(!info->IsInUseByCompiler()); |
| 1186 | new_method->SetProfilingInfo(info); |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 1187 | // Get rid of the old saved entrypoint if it is there. |
| 1188 | info->SetSavedEntryPoint(nullptr); |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 1189 | info->method_ = new_method; |
| 1190 | } |
| 1191 | // Update method_code_map_ to point to the new method. |
| 1192 | for (auto& it : method_code_map_) { |
| 1193 | if (it.second == old_method) { |
| 1194 | it.second = new_method; |
| 1195 | } |
| 1196 | } |
| 1197 | // Update osr_code_map_ to point to the new method. |
| 1198 | auto code_map = osr_code_map_.find(old_method); |
| 1199 | if (code_map != osr_code_map_.end()) { |
| 1200 | osr_code_map_.Put(new_method, code_map->second); |
| 1201 | osr_code_map_.erase(old_method); |
| 1202 | } |
| 1203 | } |
| 1204 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1205 | size_t JitCodeCache::CodeCacheSizeLocked() { |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1206 | return used_memory_for_code_; |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 1207 | } |
| 1208 | |
| 1209 | size_t JitCodeCache::DataCacheSize() { |
| 1210 | MutexLock mu(Thread::Current(), lock_); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1211 | return DataCacheSizeLocked(); |
| 1212 | } |
| 1213 | |
| 1214 | size_t JitCodeCache::DataCacheSizeLocked() { |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1215 | return used_memory_for_data_; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1216 | } |
| 1217 | |
Nicolas Geoffray | f46501c | 2016-11-22 13:45:36 +0000 | [diff] [blame] | 1218 | void JitCodeCache::ClearData(Thread* self, |
| 1219 | uint8_t* stack_map_data, |
| 1220 | uint8_t* roots_data) { |
| 1221 | DCHECK_EQ(FromStackMapToRoots(stack_map_data), roots_data); |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 1222 | MutexLock mu(self, lock_); |
Nicolas Geoffray | f46501c | 2016-11-22 13:45:36 +0000 | [diff] [blame] | 1223 | FreeData(reinterpret_cast<uint8_t*>(roots_data)); |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 1224 | } |
| 1225 | |
Nicolas Geoffray | ed015ac | 2016-12-15 17:58:48 +0000 | [diff] [blame] | 1226 | size_t JitCodeCache::ReserveData(Thread* self, |
| 1227 | size_t stack_map_size, |
| 1228 | size_t number_of_roots, |
| 1229 | ArtMethod* method, |
| 1230 | uint8_t** stack_map_data, |
| 1231 | uint8_t** roots_data) { |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 1232 | size_t table_size = ComputeRootTableSize(number_of_roots); |
David Srbecky | 8cd5454 | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 1233 | size_t size = RoundUp(stack_map_size + table_size, sizeof(void*)); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1234 | uint8_t* result = nullptr; |
| 1235 | |
| 1236 | { |
| 1237 | ScopedThreadSuspension sts(self, kSuspended); |
| 1238 | MutexLock mu(self, lock_); |
| 1239 | WaitForPotentialCollectionToComplete(self); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1240 | result = AllocateData(size); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1241 | } |
| 1242 | |
| 1243 | if (result == nullptr) { |
| 1244 | // Retry. |
| 1245 | GarbageCollectCache(self); |
| 1246 | ScopedThreadSuspension sts(self, kSuspended); |
| 1247 | MutexLock mu(self, lock_); |
| 1248 | WaitForPotentialCollectionToComplete(self); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1249 | result = AllocateData(size); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1250 | } |
| 1251 | |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 1252 | MutexLock mu(self, lock_); |
| 1253 | histogram_stack_map_memory_use_.AddValue(size); |
| 1254 | if (size > kStackMapSizeLogThreshold) { |
| 1255 | LOG(INFO) << "JIT allocated " |
| 1256 | << PrettySize(size) |
| 1257 | << " for stack maps of " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 1258 | << ArtMethod::PrettyMethod(method); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1259 | } |
Nicolas Geoffray | f4b9442 | 2016-12-05 00:10:09 +0000 | [diff] [blame] | 1260 | if (result != nullptr) { |
| 1261 | *roots_data = result; |
| 1262 | *stack_map_data = result + table_size; |
| 1263 | FillRootTableLength(*roots_data, number_of_roots); |
Nicolas Geoffray | ed015ac | 2016-12-15 17:58:48 +0000 | [diff] [blame] | 1264 | return size; |
Nicolas Geoffray | f4b9442 | 2016-12-05 00:10:09 +0000 | [diff] [blame] | 1265 | } else { |
| 1266 | *roots_data = nullptr; |
| 1267 | *stack_map_data = nullptr; |
Nicolas Geoffray | ed015ac | 2016-12-15 17:58:48 +0000 | [diff] [blame] | 1268 | return 0; |
Nicolas Geoffray | f4b9442 | 2016-12-05 00:10:09 +0000 | [diff] [blame] | 1269 | } |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1270 | } |
| 1271 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 1272 | class MarkCodeVisitor final : public StackVisitor { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1273 | public: |
| 1274 | MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in) |
| 1275 | : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames), |
| 1276 | code_cache_(code_cache_in), |
| 1277 | bitmap_(code_cache_->GetLiveBitmap()) {} |
| 1278 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 1279 | bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1280 | const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); |
| 1281 | if (method_header == nullptr) { |
| 1282 | return true; |
| 1283 | } |
| 1284 | const void* code = method_header->GetCode(); |
| 1285 | if (code_cache_->ContainsPc(code)) { |
| 1286 | // Use the atomic set version, as multiple threads are executing this code. |
| 1287 | bitmap_->AtomicTestAndSet(FromCodeToAllocation(code)); |
| 1288 | } |
| 1289 | return true; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1290 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1291 | |
| 1292 | private: |
| 1293 | JitCodeCache* const code_cache_; |
| 1294 | CodeCacheBitmap* const bitmap_; |
| 1295 | }; |
| 1296 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 1297 | class MarkCodeClosure final : public Closure { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1298 | public: |
| 1299 | MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier) |
| 1300 | : code_cache_(code_cache), barrier_(barrier) {} |
| 1301 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 1302 | void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1303 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1304 | DCHECK(thread == Thread::Current() || thread->IsSuspended()); |
| 1305 | MarkCodeVisitor visitor(thread, code_cache_); |
| 1306 | visitor.WalkStack(); |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 1307 | if (kIsDebugBuild) { |
| 1308 | // The stack walking code queries the side instrumentation stack if it |
| 1309 | // sees an instrumentation exit pc, so the JIT code of methods in that stack |
| 1310 | // must have been seen. We sanity check this below. |
| 1311 | for (const instrumentation::InstrumentationStackFrame& frame |
| 1312 | : *thread->GetInstrumentationStack()) { |
| 1313 | // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in |
| 1314 | // its stack frame, it is not the method owning return_pc_. We just pass null to |
| 1315 | // LookupMethodHeader: the method is only checked against in debug builds. |
| 1316 | OatQuickMethodHeader* method_header = |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1317 | code_cache_->LookupMethodHeader(frame.return_pc_, /* method */ nullptr); |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 1318 | if (method_header != nullptr) { |
| 1319 | const void* code = method_header->GetCode(); |
| 1320 | CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code))); |
| 1321 | } |
| 1322 | } |
| 1323 | } |
Mathieu Chartier | 10d2508 | 2015-10-28 18:36:09 -0700 | [diff] [blame] | 1324 | barrier_->Pass(Thread::Current()); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1325 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1326 | |
| 1327 | private: |
| 1328 | JitCodeCache* const code_cache_; |
| 1329 | Barrier* const barrier_; |
| 1330 | }; |
| 1331 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1332 | void JitCodeCache::NotifyCollectionDone(Thread* self) { |
| 1333 | collection_in_progress_ = false; |
| 1334 | lock_cond_.Broadcast(self); |
| 1335 | } |
| 1336 | |
| 1337 | void JitCodeCache::SetFootprintLimit(size_t new_footprint) { |
| 1338 | size_t per_space_footprint = new_footprint / 2; |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 1339 | DCHECK(IsAlignedParam(per_space_footprint, kPageSize)); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1340 | DCHECK_EQ(per_space_footprint * 2, new_footprint); |
| 1341 | mspace_set_footprint_limit(data_mspace_, per_space_footprint); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 1342 | if (HasCodeMapping()) { |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 1343 | ScopedCodeCacheWrite scc(this); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 1344 | mspace_set_footprint_limit(exec_mspace_, per_space_footprint); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1345 | } |
| 1346 | } |
| 1347 | |
| 1348 | bool JitCodeCache::IncreaseCodeCacheCapacity() { |
| 1349 | if (current_capacity_ == max_capacity_) { |
| 1350 | return false; |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1351 | } |
| 1352 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1353 | // Double the capacity if we're below 1MB, or increase it by 1MB if |
| 1354 | // we're above. |
| 1355 | if (current_capacity_ < 1 * MB) { |
| 1356 | current_capacity_ *= 2; |
| 1357 | } else { |
| 1358 | current_capacity_ += 1 * MB; |
| 1359 | } |
| 1360 | if (current_capacity_ > max_capacity_) { |
| 1361 | current_capacity_ = max_capacity_; |
| 1362 | } |
| 1363 | |
Nicolas Geoffray | 646d638 | 2017-08-09 10:50:00 +0100 | [diff] [blame] | 1364 | VLOG(jit) << "Increasing code cache capacity to " << PrettySize(current_capacity_); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1365 | |
| 1366 | SetFootprintLimit(current_capacity_); |
| 1367 | |
| 1368 | return true; |
| 1369 | } |
| 1370 | |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1371 | void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) { |
| 1372 | Barrier barrier(0); |
| 1373 | size_t threads_running_checkpoint = 0; |
| 1374 | MarkCodeClosure closure(this, &barrier); |
| 1375 | threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); |
| 1376 | // Now that we have run our checkpoint, move to a suspended state and wait |
| 1377 | // for other threads to run the checkpoint. |
| 1378 | ScopedThreadSuspension sts(self, kSuspended); |
| 1379 | if (threads_running_checkpoint != 0) { |
| 1380 | barrier.Increment(self, threads_running_checkpoint); |
| 1381 | } |
| 1382 | } |
| 1383 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1384 | bool JitCodeCache::ShouldDoFullCollection() { |
| 1385 | if (current_capacity_ == max_capacity_) { |
| 1386 | // Always do a full collection when the code cache is full. |
| 1387 | return true; |
| 1388 | } else if (current_capacity_ < kReservedCapacity) { |
| 1389 | // Always do partial collection when the code cache size is below the reserved |
| 1390 | // capacity. |
| 1391 | return false; |
| 1392 | } else if (last_collection_increased_code_cache_) { |
| 1393 | // This time do a full collection. |
| 1394 | return true; |
| 1395 | } else { |
| 1396 | // This time do a partial collection. |
| 1397 | return false; |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1398 | } |
| 1399 | } |
| 1400 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1401 | void JitCodeCache::GarbageCollectCache(Thread* self) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1402 | ScopedTrace trace(__FUNCTION__); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1403 | if (!garbage_collect_code_) { |
| 1404 | MutexLock mu(self, lock_); |
| 1405 | IncreaseCodeCacheCapacity(); |
| 1406 | return; |
| 1407 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1408 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1409 | // Wait for an existing collection, or let everyone know we are starting one. |
| 1410 | { |
| 1411 | ScopedThreadSuspension sts(self, kSuspended); |
| 1412 | MutexLock mu(self, lock_); |
| 1413 | if (WaitForPotentialCollectionToComplete(self)) { |
| 1414 | return; |
| 1415 | } else { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1416 | number_of_collections_++; |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1417 | live_bitmap_.reset(CodeCacheBitmap::Create( |
| 1418 | "code-cache-bitmap", |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 1419 | reinterpret_cast<uintptr_t>(exec_pages_.Begin()), |
| 1420 | reinterpret_cast<uintptr_t>(exec_pages_.Begin() + current_capacity_ / 2))); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1421 | collection_in_progress_ = true; |
| 1422 | } |
| 1423 | } |
| 1424 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1425 | TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit)); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1426 | { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1427 | TimingLogger::ScopedTiming st("Code cache collection", &logger); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1428 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1429 | bool do_full_collection = false; |
| 1430 | { |
| 1431 | MutexLock mu(self, lock_); |
| 1432 | do_full_collection = ShouldDoFullCollection(); |
Nicolas Geoffray | a96917a | 2016-03-01 22:18:02 +0000 | [diff] [blame] | 1433 | } |
| 1434 | |
Nicolas Geoffray | 646d638 | 2017-08-09 10:50:00 +0100 | [diff] [blame] | 1435 | VLOG(jit) << "Do " |
| 1436 | << (do_full_collection ? "full" : "partial") |
| 1437 | << " code cache collection, code=" |
| 1438 | << PrettySize(CodeCacheSize()) |
| 1439 | << ", data=" << PrettySize(DataCacheSize()); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1440 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1441 | DoCollection(self, /* collect_profiling_info */ do_full_collection); |
| 1442 | |
Nicolas Geoffray | 646d638 | 2017-08-09 10:50:00 +0100 | [diff] [blame] | 1443 | VLOG(jit) << "After code cache collection, code=" |
| 1444 | << PrettySize(CodeCacheSize()) |
| 1445 | << ", data=" << PrettySize(DataCacheSize()); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1446 | |
| 1447 | { |
| 1448 | MutexLock mu(self, lock_); |
| 1449 | |
| 1450 | // Increase the code cache only when we do partial collections. |
| 1451 | // TODO: base this strategy on how full the code cache is? |
| 1452 | if (do_full_collection) { |
| 1453 | last_collection_increased_code_cache_ = false; |
| 1454 | } else { |
| 1455 | last_collection_increased_code_cache_ = true; |
| 1456 | IncreaseCodeCacheCapacity(); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1457 | } |
| 1458 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1459 | bool next_collection_will_be_full = ShouldDoFullCollection(); |
| 1460 | |
| 1461 | // Start polling the liveness of compiled code to prepare for the next full collection. |
Nicolas Geoffray | 480d510 | 2016-04-18 12:09:30 +0100 | [diff] [blame] | 1462 | if (next_collection_will_be_full) { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1463 | // Save the entry point of methods we have compiled, and update the entry |
| 1464 | // point of those methods to the interpreter. If the method is invoked, the |
| 1465 | // interpreter will update its entry point to the compiled code and call it. |
| 1466 | for (ProfilingInfo* info : profiling_infos_) { |
| 1467 | const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); |
| 1468 | if (ContainsPc(entry_point)) { |
| 1469 | info->SetSavedEntryPoint(entry_point); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1470 | // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring |
Nicolas Geoffray | 3b1a7f4 | 2017-02-22 10:21:00 +0000 | [diff] [blame] | 1471 | // class of the method. We may be concurrently running a GC which makes accessing |
| 1472 | // the class unsafe. We know it is OK to bypass the instrumentation as we've just |
| 1473 | // checked that the current entry point is JIT compiled code. |
| 1474 | info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1475 | } |
| 1476 | } |
| 1477 | |
| 1478 | DCHECK(CheckLiveCompiledCodeHasProfilingInfo()); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1479 | |
| 1480 | // Change entry points of native methods back to the GenericJNI entrypoint. |
| 1481 | for (const auto& entry : jni_stubs_map_) { |
| 1482 | const JniStubData& data = entry.second; |
| 1483 | if (!data.IsCompiled()) { |
| 1484 | continue; |
| 1485 | } |
| 1486 | // Make sure a single invocation of the GenericJNI trampoline tries to recompile. |
| 1487 | uint16_t new_counter = Runtime::Current()->GetJit()->HotMethodThreshold() - 1u; |
| 1488 | const OatQuickMethodHeader* method_header = |
| 1489 | OatQuickMethodHeader::FromCodePointer(data.GetCode()); |
| 1490 | for (ArtMethod* method : data.GetMethods()) { |
| 1491 | if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) { |
| 1492 | // Don't call Instrumentation::UpdateMethodsCode(), same as for normal methods above. |
| 1493 | method->SetCounter(new_counter); |
| 1494 | method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub()); |
| 1495 | } |
| 1496 | } |
| 1497 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1498 | } |
| 1499 | live_bitmap_.reset(nullptr); |
| 1500 | NotifyCollectionDone(self); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1501 | } |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1502 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1503 | Runtime::Current()->GetJit()->AddTimingLogger(logger); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1504 | } |
| 1505 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1506 | void JitCodeCache::RemoveUnmarkedCode(Thread* self) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1507 | ScopedTrace trace(__FUNCTION__); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1508 | std::unordered_set<OatQuickMethodHeader*> method_headers; |
| 1509 | { |
| 1510 | MutexLock mu(self, lock_); |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 1511 | ScopedCodeCacheWrite scc(this); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1512 | // Iterate over all compiled code and remove entries that are not marked. |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1513 | for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) { |
| 1514 | JniStubData* data = &it->second; |
| 1515 | if (!data->IsCompiled() || GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) { |
| 1516 | ++it; |
| 1517 | } else { |
| 1518 | method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode())); |
| 1519 | it = jni_stubs_map_.erase(it); |
| 1520 | } |
| 1521 | } |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1522 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 1523 | const void* code_ptr = it->first; |
| 1524 | uintptr_t allocation = FromCodeToAllocation(code_ptr); |
| 1525 | if (GetLiveBitmap()->Test(allocation)) { |
| 1526 | ++it; |
| 1527 | } else { |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 1528 | OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1529 | method_headers.insert(header); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1530 | it = method_code_map_.erase(it); |
| 1531 | } |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1532 | } |
| 1533 | } |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1534 | FreeAllMethodHeaders(method_headers); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1535 | } |
| 1536 | |
| 1537 | void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1538 | ScopedTrace trace(__FUNCTION__); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1539 | { |
| 1540 | MutexLock mu(self, lock_); |
| 1541 | if (collect_profiling_info) { |
| 1542 | // Clear the profiling info of methods that do not have compiled code as entrypoint. |
| 1543 | // Also remove the saved entry point from the ProfilingInfo objects. |
| 1544 | for (ProfilingInfo* info : profiling_infos_) { |
| 1545 | const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1546 | if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1547 | info->GetMethod()->SetProfilingInfo(nullptr); |
| 1548 | } |
Nicolas Geoffray | b9a639d | 2016-03-22 11:25:20 +0000 | [diff] [blame] | 1549 | |
| 1550 | if (info->GetSavedEntryPoint() != nullptr) { |
| 1551 | info->SetSavedEntryPoint(nullptr); |
| 1552 | // We are going to move this method back to interpreter. Clear the counter now to |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 1553 | // give it a chance to be hot again. |
| 1554 | ClearMethodCounter(info->GetMethod(), /*was_warm*/ true); |
Nicolas Geoffray | b9a639d | 2016-03-22 11:25:20 +0000 | [diff] [blame] | 1555 | } |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1556 | } |
| 1557 | } else if (kIsDebugBuild) { |
| 1558 | // Sanity check that the profiling infos do not have a dangling entry point. |
| 1559 | for (ProfilingInfo* info : profiling_infos_) { |
| 1560 | DCHECK(info->GetSavedEntryPoint() == nullptr); |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1561 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1562 | } |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1563 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1564 | // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not |
| 1565 | // an entry point is either: |
| 1566 | // - an osr compiled code, that will be removed if not in a thread call stack. |
| 1567 | // - discarded compiled code, that will be removed if not in a thread call stack. |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1568 | for (const auto& entry : jni_stubs_map_) { |
| 1569 | const JniStubData& data = entry.second; |
| 1570 | const void* code_ptr = data.GetCode(); |
| 1571 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1572 | for (ArtMethod* method : data.GetMethods()) { |
| 1573 | if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { |
| 1574 | GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); |
| 1575 | break; |
| 1576 | } |
| 1577 | } |
| 1578 | } |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1579 | for (const auto& it : method_code_map_) { |
| 1580 | ArtMethod* method = it.second; |
| 1581 | const void* code_ptr = it.first; |
| 1582 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1583 | if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { |
| 1584 | GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); |
| 1585 | } |
| 1586 | } |
| 1587 | |
Nicolas Geoffray | d9994f0 | 2016-02-11 17:35:55 +0000 | [diff] [blame] | 1588 | // Empty osr method map, as osr compiled code will be deleted (except the ones |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1589 | // on thread stacks). |
| 1590 | osr_code_map_.clear(); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1591 | } |
| 1592 | |
| 1593 | // Run a checkpoint on all threads to mark the JIT compiled code they are running. |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1594 | MarkCompiledCodeOnThreadStacks(self); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1595 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1596 | // At this point, mutator threads are still running, and entrypoints of methods can |
| 1597 | // change. We do know they cannot change to a code cache entry that is not marked, |
| 1598 | // therefore we can safely remove those entries. |
| 1599 | RemoveUnmarkedCode(self); |
Nicolas Geoffray | a96917a | 2016-03-01 22:18:02 +0000 | [diff] [blame] | 1600 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1601 | if (collect_profiling_info) { |
| 1602 | MutexLock mu(self, lock_); |
| 1603 | // Free all profiling infos of methods not compiled nor being compiled. |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1604 | auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(), |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1605 | [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1606 | const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); |
Nicolas Geoffray | 511e41b | 2016-03-02 17:09:35 +0000 | [diff] [blame] | 1607 | // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope |
| 1608 | // that the compiled code would not get revived. As mutator threads run concurrently, |
| 1609 | // they may have revived the compiled code, and now we are in the situation where |
| 1610 | // a method has compiled code but no ProfilingInfo. |
| 1611 | // We make sure compiled methods have a ProfilingInfo object. It is needed for |
| 1612 | // code cache collection. |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1613 | if (ContainsPc(ptr) && |
| 1614 | info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) == nullptr) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1615 | info->GetMethod()->SetProfilingInfo(info); |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1616 | } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1617 | // No need for this ProfilingInfo object anymore. |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1618 | FreeData(reinterpret_cast<uint8_t*>(info)); |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1619 | return true; |
| 1620 | } |
| 1621 | return false; |
| 1622 | }); |
| 1623 | profiling_infos_.erase(profiling_kept_end, profiling_infos_.end()); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1624 | DCHECK(CheckLiveCompiledCodeHasProfilingInfo()); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1625 | } |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1626 | } |
| 1627 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1628 | bool JitCodeCache::CheckLiveCompiledCodeHasProfilingInfo() { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1629 | ScopedTrace trace(__FUNCTION__); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1630 | // Check that methods we have compiled do have a ProfilingInfo object. We would |
| 1631 | // have memory leaks of compiled code otherwise. |
| 1632 | for (const auto& it : method_code_map_) { |
| 1633 | ArtMethod* method = it.second; |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1634 | if (method->GetProfilingInfo(kRuntimePointerSize) == nullptr) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1635 | const void* code_ptr = it.first; |
| 1636 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1637 | if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { |
| 1638 | // If the code is not dead, then we have a problem. Note that this can even |
| 1639 | // happen just after a collection, as mutator threads are running in parallel |
| 1640 | // and could deoptimize an existing compiled code. |
| 1641 | return false; |
| 1642 | } |
| 1643 | } |
| 1644 | } |
| 1645 | return true; |
| 1646 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1647 | |
| 1648 | OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) { |
Vladimir Marko | 33bff25 | 2017-11-01 14:35:42 +0000 | [diff] [blame] | 1649 | static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA"); |
| 1650 | if (kRuntimeISA == InstructionSet::kArm) { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1651 | // On Thumb-2, the pc is offset by one. |
| 1652 | --pc; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1653 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1654 | if (!ContainsPc(reinterpret_cast<const void*>(pc))) { |
| 1655 | return nullptr; |
| 1656 | } |
| 1657 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1658 | if (!kIsDebugBuild) { |
| 1659 | // Called with null `method` only from MarkCodeClosure::Run() in debug build. |
| 1660 | CHECK(method != nullptr); |
Vladimir Marko | 47d3185 | 2017-11-28 18:36:12 +0000 | [diff] [blame] | 1661 | } |
Vladimir Marko | e744163 | 2017-11-29 13:00:56 +0000 | [diff] [blame] | 1662 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1663 | MutexLock mu(Thread::Current(), lock_); |
| 1664 | OatQuickMethodHeader* method_header = nullptr; |
| 1665 | ArtMethod* found_method = nullptr; // Only for DCHECK(), not for JNI stubs. |
| 1666 | if (method != nullptr && UNLIKELY(method->IsNative())) { |
| 1667 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 1668 | if (it == jni_stubs_map_.end() || !ContainsElement(it->second.GetMethods(), method)) { |
| 1669 | return nullptr; |
| 1670 | } |
| 1671 | const void* code_ptr = it->second.GetCode(); |
| 1672 | method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1673 | if (!method_header->Contains(pc)) { |
| 1674 | return nullptr; |
| 1675 | } |
| 1676 | } else { |
| 1677 | auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc)); |
| 1678 | if (it != method_code_map_.begin()) { |
| 1679 | --it; |
| 1680 | const void* code_ptr = it->first; |
| 1681 | if (OatQuickMethodHeader::FromCodePointer(code_ptr)->Contains(pc)) { |
| 1682 | method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1683 | found_method = it->second; |
| 1684 | } |
| 1685 | } |
| 1686 | if (method_header == nullptr && method == nullptr) { |
| 1687 | // Scan all compiled JNI stubs as well. This slow search is used only |
| 1688 | // for checks in debug build, for release builds the `method` is not null. |
| 1689 | for (auto&& entry : jni_stubs_map_) { |
| 1690 | const JniStubData& data = entry.second; |
| 1691 | if (data.IsCompiled() && |
| 1692 | OatQuickMethodHeader::FromCodePointer(data.GetCode())->Contains(pc)) { |
| 1693 | method_header = OatQuickMethodHeader::FromCodePointer(data.GetCode()); |
| 1694 | } |
| 1695 | } |
| 1696 | } |
| 1697 | if (method_header == nullptr) { |
| 1698 | return nullptr; |
| 1699 | } |
Nicolas Geoffray | 056d775 | 2017-11-30 09:12:13 +0000 | [diff] [blame] | 1700 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1701 | |
| 1702 | if (kIsDebugBuild && method != nullptr && !method->IsNative()) { |
Alex Light | 1ebe4fe | 2017-01-30 14:57:11 -0800 | [diff] [blame] | 1703 | // When we are walking the stack to redefine classes and creating obsolete methods it is |
| 1704 | // possible that we might have updated the method_code_map by making this method obsolete in a |
| 1705 | // previous frame. Therefore we should just check that the non-obsolete version of this method |
| 1706 | // is the one we expect. We change to the non-obsolete versions in the error message since the |
| 1707 | // obsolete version of the method might not be fully initialized yet. This situation can only |
| 1708 | // occur when we are in the process of allocating and setting up obsolete methods. Otherwise |
Andreas Gampe | 06c42a5 | 2017-07-26 14:17:14 -0700 | [diff] [blame] | 1709 | // method and it->second should be identical. (See openjdkjvmti/ti_redefine.cc for more |
Alex Light | 1ebe4fe | 2017-01-30 14:57:11 -0800 | [diff] [blame] | 1710 | // information.) |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1711 | DCHECK_EQ(found_method->GetNonObsoleteMethod(), method->GetNonObsoleteMethod()) |
Alex Light | 1ebe4fe | 2017-01-30 14:57:11 -0800 | [diff] [blame] | 1712 | << ArtMethod::PrettyMethod(method->GetNonObsoleteMethod()) << " " |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1713 | << ArtMethod::PrettyMethod(found_method->GetNonObsoleteMethod()) << " " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 1714 | << std::hex << pc; |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 1715 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1716 | return method_header; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1717 | } |
| 1718 | |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1719 | OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) { |
| 1720 | MutexLock mu(Thread::Current(), lock_); |
| 1721 | auto it = osr_code_map_.find(method); |
| 1722 | if (it == osr_code_map_.end()) { |
| 1723 | return nullptr; |
| 1724 | } |
| 1725 | return OatQuickMethodHeader::FromCodePointer(it->second); |
| 1726 | } |
| 1727 | |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1728 | ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self, |
| 1729 | ArtMethod* method, |
| 1730 | const std::vector<uint32_t>& entries, |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1731 | bool retry_allocation) |
| 1732 | // No thread safety analysis as we are using TryLock/Unlock explicitly. |
| 1733 | NO_THREAD_SAFETY_ANALYSIS { |
| 1734 | ProfilingInfo* info = nullptr; |
| 1735 | if (!retry_allocation) { |
| 1736 | // If we are allocating for the interpreter, just try to lock, to avoid |
| 1737 | // lock contention with the JIT. |
| 1738 | if (lock_.ExclusiveTryLock(self)) { |
| 1739 | info = AddProfilingInfoInternal(self, method, entries); |
| 1740 | lock_.ExclusiveUnlock(self); |
| 1741 | } |
| 1742 | } else { |
| 1743 | { |
| 1744 | MutexLock mu(self, lock_); |
| 1745 | info = AddProfilingInfoInternal(self, method, entries); |
| 1746 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1747 | |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1748 | if (info == nullptr) { |
| 1749 | GarbageCollectCache(self); |
| 1750 | MutexLock mu(self, lock_); |
| 1751 | info = AddProfilingInfoInternal(self, method, entries); |
| 1752 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1753 | } |
| 1754 | return info; |
| 1755 | } |
| 1756 | |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1757 | ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED, |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1758 | ArtMethod* method, |
| 1759 | const std::vector<uint32_t>& entries) { |
| 1760 | size_t profile_info_size = RoundUp( |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1761 | sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(), |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1762 | sizeof(void*)); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1763 | |
| 1764 | // Check whether some other thread has concurrently created it. |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1765 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1766 | if (info != nullptr) { |
| 1767 | return info; |
| 1768 | } |
| 1769 | |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1770 | uint8_t* data = AllocateData(profile_info_size); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1771 | if (data == nullptr) { |
| 1772 | return nullptr; |
| 1773 | } |
| 1774 | info = new (data) ProfilingInfo(method, entries); |
Nicolas Geoffray | 07f3564 | 2016-01-04 16:06:51 +0000 | [diff] [blame] | 1775 | |
| 1776 | // Make sure other threads see the data in the profiling info object before the |
| 1777 | // store in the ArtMethod's ProfilingInfo pointer. |
Orion Hodson | 27b9676 | 2018-03-13 16:06:57 +0000 | [diff] [blame] | 1778 | std::atomic_thread_fence(std::memory_order_release); |
Nicolas Geoffray | 07f3564 | 2016-01-04 16:06:51 +0000 | [diff] [blame] | 1779 | |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1780 | method->SetProfilingInfo(info); |
| 1781 | profiling_infos_.push_back(info); |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 1782 | histogram_profiling_info_memory_use_.AddValue(profile_info_size); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1783 | return info; |
| 1784 | } |
| 1785 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1786 | // NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock |
| 1787 | // is already held. |
| 1788 | void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS { |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 1789 | if (mspace == exec_mspace_) { |
| 1790 | DCHECK(exec_mspace_ != nullptr); |
| 1791 | const MemMap* const code_pages = GetUpdatableCodeMapping(); |
| 1792 | void* result = code_pages->Begin() + exec_end_; |
| 1793 | exec_end_ += increment; |
| 1794 | return result; |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1795 | } else { |
| 1796 | DCHECK_EQ(data_mspace_, mspace); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 1797 | void* result = data_pages_.Begin() + data_end_; |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1798 | data_end_ += increment; |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 1799 | return result; |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1800 | } |
| 1801 | } |
| 1802 | |
Calin Juravle | 9962962 | 2016-04-19 16:33:46 +0100 | [diff] [blame] | 1803 | void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations, |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1804 | std::vector<ProfileMethodInfo>& methods) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1805 | ScopedTrace trace(__FUNCTION__); |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 1806 | MutexLock mu(Thread::Current(), lock_); |
Calin Juravle | a39fd98 | 2017-05-18 10:15:52 -0700 | [diff] [blame] | 1807 | uint16_t jit_compile_threshold = Runtime::Current()->GetJITOptions()->GetCompileThreshold(); |
Calin Juravle | 9962962 | 2016-04-19 16:33:46 +0100 | [diff] [blame] | 1808 | for (const ProfilingInfo* info : profiling_infos_) { |
| 1809 | ArtMethod* method = info->GetMethod(); |
| 1810 | const DexFile* dex_file = method->GetDexFile(); |
Mathieu Chartier | 79c87da | 2017-10-10 11:54:29 -0700 | [diff] [blame] | 1811 | const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation()); |
| 1812 | if (!ContainsElement(dex_base_locations, base_location)) { |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1813 | // Skip dex files which are not profiled. |
| 1814 | continue; |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 1815 | } |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1816 | std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches; |
Calin Juravle | a39fd98 | 2017-05-18 10:15:52 -0700 | [diff] [blame] | 1817 | |
| 1818 | // If the method didn't reach the compilation threshold don't save the inline caches. |
| 1819 | // They might be incomplete and cause unnecessary deoptimizations. |
| 1820 | // If the inline cache is empty the compiler will generate a regular invoke virtual/interface. |
| 1821 | if (method->GetCounter() < jit_compile_threshold) { |
| 1822 | methods.emplace_back(/*ProfileMethodInfo*/ |
Mathieu Chartier | bbe3a5e | 2017-06-13 16:36:17 -0700 | [diff] [blame] | 1823 | MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches); |
Calin Juravle | a39fd98 | 2017-05-18 10:15:52 -0700 | [diff] [blame] | 1824 | continue; |
| 1825 | } |
| 1826 | |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1827 | for (size_t i = 0; i < info->number_of_inline_caches_; ++i) { |
Mathieu Chartier | dbddc22 | 2017-05-24 12:04:13 -0700 | [diff] [blame] | 1828 | std::vector<TypeReference> profile_classes; |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1829 | const InlineCache& cache = info->cache_[i]; |
Calin Juravle | 13439f0 | 2017-02-21 01:17:21 -0800 | [diff] [blame] | 1830 | ArtMethod* caller = info->GetMethod(); |
Calin Juravle | 589e71e | 2017-03-03 16:05:05 -0800 | [diff] [blame] | 1831 | bool is_missing_types = false; |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1832 | for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) { |
| 1833 | mirror::Class* cls = cache.classes_[k].Read(); |
| 1834 | if (cls == nullptr) { |
| 1835 | break; |
| 1836 | } |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1837 | |
Calin Juravle | 13439f0 | 2017-02-21 01:17:21 -0800 | [diff] [blame] | 1838 | // Check if the receiver is in the boot class path or if it's in the |
| 1839 | // same class loader as the caller. If not, skip it, as there is not |
| 1840 | // much we can do during AOT. |
| 1841 | if (!cls->IsBootStrapClassLoaded() && |
| 1842 | caller->GetClassLoader() != cls->GetClassLoader()) { |
| 1843 | is_missing_types = true; |
| 1844 | continue; |
| 1845 | } |
| 1846 | |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1847 | const DexFile* class_dex_file = nullptr; |
| 1848 | dex::TypeIndex type_index; |
| 1849 | |
| 1850 | if (cls->GetDexCache() == nullptr) { |
| 1851 | DCHECK(cls->IsArrayClass()) << cls->PrettyClass(); |
Calin Juravle | e21806f | 2017-02-22 11:49:43 -0800 | [diff] [blame] | 1852 | // Make a best effort to find the type index in the method's dex file. |
| 1853 | // We could search all open dex files but that might turn expensive |
| 1854 | // and probably not worth it. |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1855 | class_dex_file = dex_file; |
| 1856 | type_index = cls->FindTypeIndexInOtherDexFile(*dex_file); |
| 1857 | } else { |
| 1858 | class_dex_file = &(cls->GetDexFile()); |
| 1859 | type_index = cls->GetDexTypeIndex(); |
| 1860 | } |
| 1861 | if (!type_index.IsValid()) { |
| 1862 | // Could be a proxy class or an array for which we couldn't find the type index. |
Calin Juravle | 589e71e | 2017-03-03 16:05:05 -0800 | [diff] [blame] | 1863 | is_missing_types = true; |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1864 | continue; |
| 1865 | } |
Mathieu Chartier | 79c87da | 2017-10-10 11:54:29 -0700 | [diff] [blame] | 1866 | if (ContainsElement(dex_base_locations, |
| 1867 | DexFileLoader::GetBaseLocation(class_dex_file->GetLocation()))) { |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1868 | // Only consider classes from the same apk (including multidex). |
| 1869 | profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/ |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1870 | class_dex_file, type_index); |
Calin Juravle | 589e71e | 2017-03-03 16:05:05 -0800 | [diff] [blame] | 1871 | } else { |
| 1872 | is_missing_types = true; |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1873 | } |
| 1874 | } |
| 1875 | if (!profile_classes.empty()) { |
| 1876 | inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/ |
Calin Juravle | 589e71e | 2017-03-03 16:05:05 -0800 | [diff] [blame] | 1877 | cache.dex_pc_, is_missing_types, profile_classes); |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1878 | } |
| 1879 | } |
| 1880 | methods.emplace_back(/*ProfileMethodInfo*/ |
Mathieu Chartier | bbe3a5e | 2017-06-13 16:36:17 -0700 | [diff] [blame] | 1881 | MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches); |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 1882 | } |
| 1883 | } |
| 1884 | |
Nicolas Geoffray | 71cd50f | 2016-04-14 15:00:33 +0100 | [diff] [blame] | 1885 | bool JitCodeCache::IsOsrCompiled(ArtMethod* method) { |
| 1886 | MutexLock mu(Thread::Current(), lock_); |
| 1887 | return osr_code_map_.find(method) != osr_code_map_.end(); |
| 1888 | } |
| 1889 | |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1890 | bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr) { |
| 1891 | if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1892 | return false; |
| 1893 | } |
Nicolas Geoffray | a42363f | 2015-12-17 14:57:09 +0000 | [diff] [blame] | 1894 | |
Nicolas Geoffray | a42363f | 2015-12-17 14:57:09 +0000 | [diff] [blame] | 1895 | MutexLock mu(self, lock_); |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1896 | if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) { |
| 1897 | return false; |
| 1898 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1899 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1900 | if (UNLIKELY(method->IsNative())) { |
| 1901 | JniStubKey key(method); |
| 1902 | auto it = jni_stubs_map_.find(key); |
| 1903 | bool new_compilation = false; |
| 1904 | if (it == jni_stubs_map_.end()) { |
| 1905 | // Create a new entry to mark the stub as being compiled. |
| 1906 | it = jni_stubs_map_.Put(key, JniStubData{}); |
| 1907 | new_compilation = true; |
| 1908 | } |
| 1909 | JniStubData* data = &it->second; |
| 1910 | data->AddMethod(method); |
| 1911 | if (data->IsCompiled()) { |
| 1912 | OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(data->GetCode()); |
| 1913 | const void* entrypoint = method_header->GetEntryPoint(); |
| 1914 | // Update also entrypoints of other methods held by the JniStubData. |
| 1915 | // We could simply update the entrypoint of `method` but if the last JIT GC has |
| 1916 | // changed these entrypoints to GenericJNI in preparation for a full GC, we may |
| 1917 | // as well change them back as this stub shall not be collected anyway and this |
| 1918 | // can avoid a few expensive GenericJNI calls. |
| 1919 | instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); |
| 1920 | for (ArtMethod* m : data->GetMethods()) { |
Nicolas Geoffray | a6e0e7d | 2018-01-26 13:16:50 +0000 | [diff] [blame] | 1921 | // Call the dedicated method instead of the more generic UpdateMethodsCode, because |
| 1922 | // `m` might be in the process of being deleted. |
| 1923 | instrumentation->UpdateNativeMethodsCodeToJitCode(m, entrypoint); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1924 | } |
| 1925 | if (collection_in_progress_) { |
| 1926 | GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode())); |
| 1927 | } |
| 1928 | } |
| 1929 | return new_compilation; |
| 1930 | } else { |
| 1931 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
| 1932 | if (info == nullptr) { |
| 1933 | VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled"; |
| 1934 | // Because the counter is not atomic, there are some rare cases where we may not hit the |
| 1935 | // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this. |
| 1936 | ClearMethodCounter(method, /*was_warm*/ false); |
| 1937 | return false; |
| 1938 | } |
Nicolas Geoffray | 056d775 | 2017-11-30 09:12:13 +0000 | [diff] [blame] | 1939 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1940 | if (info->IsMethodBeingCompiled(osr)) { |
| 1941 | return false; |
| 1942 | } |
Nicolas Geoffray | 056d775 | 2017-11-30 09:12:13 +0000 | [diff] [blame] | 1943 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1944 | info->SetIsMethodBeingCompiled(true, osr); |
| 1945 | return true; |
| 1946 | } |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1947 | } |
| 1948 | |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 1949 | ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) { |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1950 | MutexLock mu(self, lock_); |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1951 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1952 | if (info != nullptr) { |
Nicolas Geoffray | f6d4668 | 2017-02-28 17:41:45 +0000 | [diff] [blame] | 1953 | if (!info->IncrementInlineUse()) { |
| 1954 | // Overflow of inlining uses, just bail. |
| 1955 | return nullptr; |
| 1956 | } |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1957 | } |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 1958 | return info; |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1959 | } |
| 1960 | |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 1961 | void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) { |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1962 | MutexLock mu(self, lock_); |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1963 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 1964 | DCHECK(info != nullptr); |
| 1965 | info->DecrementInlineUse(); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1966 | } |
| 1967 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1968 | void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr) { |
| 1969 | DCHECK_EQ(Thread::Current(), self); |
| 1970 | MutexLock mu(self, lock_); |
| 1971 | if (UNLIKELY(method->IsNative())) { |
| 1972 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 1973 | DCHECK(it != jni_stubs_map_.end()); |
| 1974 | JniStubData* data = &it->second; |
| 1975 | DCHECK(ContainsElement(data->GetMethods(), method)); |
| 1976 | if (UNLIKELY(!data->IsCompiled())) { |
| 1977 | // Failed to compile; the JNI compiler never fails, but the cache may be full. |
| 1978 | jni_stubs_map_.erase(it); // Remove the entry added in NotifyCompilationOf(). |
| 1979 | } // else CommitCodeInternal() updated entrypoints of all methods in the JniStubData. |
| 1980 | } else { |
| 1981 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
| 1982 | DCHECK(info->IsMethodBeingCompiled(osr)); |
| 1983 | info->SetIsMethodBeingCompiled(false, osr); |
| 1984 | } |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1985 | } |
| 1986 | |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 1987 | size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) { |
| 1988 | MutexLock mu(Thread::Current(), lock_); |
| 1989 | return mspace_usable_size(reinterpret_cast<const void*>(FromCodeToAllocation(ptr))); |
| 1990 | } |
| 1991 | |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 1992 | void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method, |
| 1993 | const OatQuickMethodHeader* header) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1994 | DCHECK(!method->IsNative()); |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1995 | ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize); |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 1996 | const void* method_entrypoint = method->GetEntryPointFromQuickCompiledCode(); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1997 | if ((profiling_info != nullptr) && |
| 1998 | (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) { |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 1999 | // When instrumentation is set, the actual entrypoint is the one in the profiling info. |
| 2000 | method_entrypoint = profiling_info->GetSavedEntryPoint(); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 2001 | // Prevent future uses of the compiled code. |
| 2002 | profiling_info->SetSavedEntryPoint(nullptr); |
| 2003 | } |
| 2004 | |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 2005 | // Clear the method counter if we are running jitted code since we might want to jit this again in |
| 2006 | // the future. |
| 2007 | if (method_entrypoint == header->GetEntryPoint()) { |
Jeff Hao | 00286db | 2017-05-30 16:53:07 -0700 | [diff] [blame] | 2008 | // The entrypoint is the one to invalidate, so we just update it to the interpreter entry point |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 2009 | // and clear the counter to get the method Jitted again. |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 2010 | Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( |
| 2011 | method, GetQuickToInterpreterBridge()); |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 2012 | ClearMethodCounter(method, /*was_warm*/ profiling_info != nullptr); |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 2013 | } else { |
| 2014 | MutexLock mu(Thread::Current(), lock_); |
| 2015 | auto it = osr_code_map_.find(method); |
| 2016 | if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) { |
| 2017 | // Remove the OSR method, to avoid using it again. |
| 2018 | osr_code_map_.erase(it); |
| 2019 | } |
| 2020 | } |
| 2021 | } |
| 2022 | |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 2023 | uint8_t* JitCodeCache::AllocateCode(size_t code_size) { |
| 2024 | size_t alignment = GetInstructionSetAlignment(kRuntimeISA); |
| 2025 | uint8_t* result = reinterpret_cast<uint8_t*>( |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 2026 | mspace_memalign(exec_mspace_, alignment, code_size)); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 2027 | size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment); |
| 2028 | // Ensure the header ends up at expected instruction alignment. |
| 2029 | DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment); |
| 2030 | used_memory_for_code_ += mspace_usable_size(result); |
| 2031 | return result; |
| 2032 | } |
| 2033 | |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 2034 | void JitCodeCache::FreeCode(uint8_t* code) { |
| 2035 | used_memory_for_code_ -= mspace_usable_size(code); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame^] | 2036 | mspace_free(exec_mspace_, code); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 2037 | } |
| 2038 | |
| 2039 | uint8_t* JitCodeCache::AllocateData(size_t data_size) { |
| 2040 | void* result = mspace_malloc(data_mspace_, data_size); |
| 2041 | used_memory_for_data_ += mspace_usable_size(result); |
| 2042 | return reinterpret_cast<uint8_t*>(result); |
| 2043 | } |
| 2044 | |
| 2045 | void JitCodeCache::FreeData(uint8_t* data) { |
| 2046 | used_memory_for_data_ -= mspace_usable_size(data); |
| 2047 | mspace_free(data_mspace_, data); |
| 2048 | } |
| 2049 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 2050 | void JitCodeCache::Dump(std::ostream& os) { |
| 2051 | MutexLock mu(Thread::Current(), lock_); |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 2052 | MutexLock mu2(Thread::Current(), *Locks::native_debug_interface_lock_); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 2053 | os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n" |
| 2054 | << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n" |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 2055 | << "Current JIT mini-debug-info size: " << PrettySize(GetJitNativeDebugInfoMemUsage()) << "\n" |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 2056 | << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n" |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 2057 | << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n" |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 2058 | << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n" |
| 2059 | << "Total number of JIT compilations: " << number_of_compilations_ << "\n" |
| 2060 | << "Total number of JIT compilations for on stack replacement: " |
| 2061 | << number_of_osr_compilations_ << "\n" |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 2062 | << "Total number of JIT code cache collections: " << number_of_collections_ << std::endl; |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 2063 | histogram_stack_map_memory_use_.PrintMemoryUse(os); |
| 2064 | histogram_code_memory_use_.PrintMemoryUse(os); |
| 2065 | histogram_profiling_info_memory_use_.PrintMemoryUse(os); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 2066 | } |
| 2067 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 2068 | } // namespace jit |
| 2069 | } // namespace art |