Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "jit_code_cache.h" |
| 18 | |
| 19 | #include <sstream> |
| 20 | |
Nicolas Geoffray | ec43a01 | 2018-11-17 13:10:40 +0000 | [diff] [blame] | 21 | #include "android-base/unique_fd.h" |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 22 | |
Andreas Gampe | 5629d2d | 2017-05-15 16:28:13 -0700 | [diff] [blame] | 23 | #include "arch/context.h" |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 24 | #include "art_method-inl.h" |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 25 | #include "base/enums.h" |
Andreas Gampe | f0f3c59 | 2018-06-26 13:28:00 -0700 | [diff] [blame] | 26 | #include "base/histogram-inl.h" |
Andreas Gampe | 170331f | 2017-12-07 18:41:03 -0800 | [diff] [blame] | 27 | #include "base/logging.h" // For VLOG. |
Orion Hodson | 563ada2 | 2018-09-04 11:28:31 +0100 | [diff] [blame] | 28 | #include "base/membarrier.h" |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 29 | #include "base/memfd.h" |
David Sehr | 79e2607 | 2018-04-06 17:58:50 -0700 | [diff] [blame] | 30 | #include "base/mem_map.h" |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 31 | #include "base/quasi_atomic.h" |
Calin Juravle | 66f5523 | 2015-12-08 15:09:10 +0000 | [diff] [blame] | 32 | #include "base/stl_util.h" |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 33 | #include "base/systrace.h" |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 34 | #include "base/time_utils.h" |
Orion Hodson | f233136 | 2018-07-11 15:14:10 +0100 | [diff] [blame] | 35 | #include "base/utils.h" |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 36 | #include "cha.h" |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 37 | #include "debugger_interface.h" |
David Sehr | 9e734c7 | 2018-01-04 17:56:19 -0800 | [diff] [blame] | 38 | #include "dex/dex_file_loader.h" |
Andreas Gampe | f0f3c59 | 2018-06-26 13:28:00 -0700 | [diff] [blame] | 39 | #include "dex/method_reference.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 40 | #include "entrypoints/runtime_asm_entrypoints.h" |
| 41 | #include "gc/accounting/bitmap-inl.h" |
Andreas Gampe | 88dbad3 | 2018-06-26 19:54:12 -0700 | [diff] [blame] | 42 | #include "gc/allocator/dlmalloc.h" |
Nicolas Geoffray | cf48fa0 | 2016-07-30 22:49:11 +0100 | [diff] [blame] | 43 | #include "gc/scoped_gc_critical_section.h" |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 44 | #include "handle.h" |
Andreas Gampe | f0f3c59 | 2018-06-26 13:28:00 -0700 | [diff] [blame] | 45 | #include "instrumentation.h" |
Andreas Gampe | b2d18fa | 2017-06-06 20:46:10 -0700 | [diff] [blame] | 46 | #include "intern_table.h" |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 47 | #include "jit/jit.h" |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 48 | #include "jit/profiling_info.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 49 | #include "linear_alloc.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 50 | #include "oat_file-inl.h" |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 51 | #include "oat_quick_method_header.h" |
Andreas Gampe | 5d08fcc | 2017-06-05 17:56:46 -0700 | [diff] [blame] | 52 | #include "object_callbacks.h" |
David Sehr | 82d046e | 2018-04-23 08:14:19 -0700 | [diff] [blame] | 53 | #include "profile/profile_compilation_info.h" |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 54 | #include "scoped_thread_state_change-inl.h" |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 55 | #include "stack.h" |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 56 | #include "thread-current-inl.h" |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 57 | #include "thread_list.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 58 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 59 | using android::base::unique_fd; |
| 60 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 61 | namespace art { |
| 62 | namespace jit { |
| 63 | |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 64 | static constexpr size_t kCodeSizeLogThreshold = 50 * KB; |
| 65 | static constexpr size_t kStackMapSizeLogThreshold = 50 * KB; |
| 66 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 67 | // Data cache will be half of the capacity |
| 68 | // Code cache will be the other half of the capacity. |
| 69 | // TODO: Make this variable? |
| 70 | static constexpr size_t kCodeAndDataCapacityDivider = 2; |
| 71 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 72 | static constexpr int kProtR = PROT_READ; |
| 73 | static constexpr int kProtRW = PROT_READ | PROT_WRITE; |
| 74 | static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC; |
| 75 | static constexpr int kProtRX = PROT_READ | PROT_EXEC; |
| 76 | |
| 77 | namespace { |
| 78 | |
| 79 | // Translate an address belonging to one memory map into an address in a second. This is useful |
| 80 | // when there are two virtual memory ranges for the same physical memory range. |
| 81 | template <typename T> |
| 82 | T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) { |
| 83 | CHECK(src.HasAddress(src_ptr)); |
| 84 | uint8_t* const raw_src_ptr = reinterpret_cast<uint8_t*>(src_ptr); |
| 85 | return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin()); |
| 86 | } |
| 87 | |
| 88 | } // namespace |
| 89 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 90 | class JitCodeCache::JniStubKey { |
| 91 | public: |
| 92 | explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) |
| 93 | : shorty_(method->GetShorty()), |
| 94 | is_static_(method->IsStatic()), |
| 95 | is_fast_native_(method->IsFastNative()), |
| 96 | is_critical_native_(method->IsCriticalNative()), |
| 97 | is_synchronized_(method->IsSynchronized()) { |
| 98 | DCHECK(!(is_fast_native_ && is_critical_native_)); |
| 99 | } |
| 100 | |
| 101 | bool operator<(const JniStubKey& rhs) const { |
| 102 | if (is_static_ != rhs.is_static_) { |
| 103 | return rhs.is_static_; |
| 104 | } |
| 105 | if (is_synchronized_ != rhs.is_synchronized_) { |
| 106 | return rhs.is_synchronized_; |
| 107 | } |
| 108 | if (is_fast_native_ != rhs.is_fast_native_) { |
| 109 | return rhs.is_fast_native_; |
| 110 | } |
| 111 | if (is_critical_native_ != rhs.is_critical_native_) { |
| 112 | return rhs.is_critical_native_; |
| 113 | } |
| 114 | return strcmp(shorty_, rhs.shorty_) < 0; |
| 115 | } |
| 116 | |
| 117 | // Update the shorty to point to another method's shorty. Call this function when removing |
| 118 | // the method that references the old shorty from JniCodeData and not removing the entire |
| 119 | // JniCodeData; the old shorty may become a dangling pointer when that method is unloaded. |
| 120 | void UpdateShorty(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) { |
| 121 | const char* shorty = method->GetShorty(); |
| 122 | DCHECK_STREQ(shorty_, shorty); |
| 123 | shorty_ = shorty; |
| 124 | } |
| 125 | |
| 126 | private: |
| 127 | // The shorty points to a DexFile data and may need to change |
| 128 | // to point to the same shorty in a different DexFile. |
| 129 | mutable const char* shorty_; |
| 130 | |
| 131 | const bool is_static_; |
| 132 | const bool is_fast_native_; |
| 133 | const bool is_critical_native_; |
| 134 | const bool is_synchronized_; |
| 135 | }; |
| 136 | |
| 137 | class JitCodeCache::JniStubData { |
| 138 | public: |
| 139 | JniStubData() : code_(nullptr), methods_() {} |
| 140 | |
| 141 | void SetCode(const void* code) { |
| 142 | DCHECK(code != nullptr); |
| 143 | code_ = code; |
| 144 | } |
| 145 | |
| 146 | const void* GetCode() const { |
| 147 | return code_; |
| 148 | } |
| 149 | |
| 150 | bool IsCompiled() const { |
| 151 | return GetCode() != nullptr; |
| 152 | } |
| 153 | |
| 154 | void AddMethod(ArtMethod* method) { |
| 155 | if (!ContainsElement(methods_, method)) { |
| 156 | methods_.push_back(method); |
| 157 | } |
| 158 | } |
| 159 | |
| 160 | const std::vector<ArtMethod*>& GetMethods() const { |
| 161 | return methods_; |
| 162 | } |
| 163 | |
| 164 | void RemoveMethodsIn(const LinearAlloc& alloc) { |
| 165 | auto kept_end = std::remove_if( |
| 166 | methods_.begin(), |
| 167 | methods_.end(), |
| 168 | [&alloc](ArtMethod* method) { return alloc.ContainsUnsafe(method); }); |
| 169 | methods_.erase(kept_end, methods_.end()); |
| 170 | } |
| 171 | |
| 172 | bool RemoveMethod(ArtMethod* method) { |
| 173 | auto it = std::find(methods_.begin(), methods_.end(), method); |
| 174 | if (it != methods_.end()) { |
| 175 | methods_.erase(it); |
| 176 | return true; |
| 177 | } else { |
| 178 | return false; |
| 179 | } |
| 180 | } |
| 181 | |
| 182 | void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) { |
| 183 | std::replace(methods_.begin(), methods_.end(), old_method, new_method); |
| 184 | } |
| 185 | |
| 186 | private: |
| 187 | const void* code_; |
| 188 | std::vector<ArtMethod*> methods_; |
| 189 | }; |
| 190 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 191 | bool JitCodeCache::InitializeMappings(bool rwx_memory_allowed, |
| 192 | bool is_zygote, |
| 193 | std::string* error_msg) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 194 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 195 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 196 | const size_t capacity = max_capacity_; |
| 197 | const size_t data_capacity = capacity / kCodeAndDataCapacityDivider; |
| 198 | const size_t exec_capacity = capacity - data_capacity; |
Orion Hodson | 563ada2 | 2018-09-04 11:28:31 +0100 | [diff] [blame] | 199 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 200 | // File descriptor enabling dual-view mapping of code section. |
| 201 | unique_fd mem_fd; |
| 202 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 203 | // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view |
| 204 | // for it. |
| 205 | if (!is_zygote) { |
| 206 | // Bionic supports memfd_create, but the call may fail on older kernels. |
| 207 | mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0)); |
| 208 | if (mem_fd.get() < 0) { |
| 209 | std::ostringstream oss; |
| 210 | oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno); |
| 211 | if (!rwx_memory_allowed) { |
| 212 | // Without using RWX page permissions, the JIT can not fallback to single mapping as it |
| 213 | // requires tranitioning the code pages to RWX for updates. |
| 214 | *error_msg = oss.str(); |
| 215 | return false; |
| 216 | } |
| 217 | VLOG(jit) << oss.str(); |
Orion Hodson | ad28f5e | 2018-10-17 09:08:17 +0100 | [diff] [blame] | 218 | } |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 219 | } |
| 220 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 221 | if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) { |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 222 | std::ostringstream oss; |
| 223 | oss << "Failed to initialize memory file: " << strerror(errno); |
| 224 | *error_msg = oss.str(); |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 225 | return false; |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 226 | } |
| 227 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 228 | std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache"; |
| 229 | std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache"; |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 230 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 231 | std::string error_str; |
| 232 | // Map name specific for android_os_Debug.cpp accounting. |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 233 | // Map in low 4gb to simplify accessing root tables for x86_64. |
| 234 | // We could do PC-relative addressing to avoid this problem, but that |
| 235 | // would require reserving code and data area before submitting, which |
| 236 | // means more windows for the code memory to be RWX. |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 237 | int base_flags; |
| 238 | MemMap data_pages; |
| 239 | if (mem_fd.get() >= 0) { |
| 240 | // Dual view of JIT code cache case. Create an initial mapping of data pages large enough |
| 241 | // for data and non-writable view of JIT code pages. We use the memory file descriptor to |
| 242 | // enable dual mapping - we'll create a second mapping using the descriptor below. The |
| 243 | // mappings will look like: |
| 244 | // |
| 245 | // VA PA |
| 246 | // |
| 247 | // +---------------+ |
| 248 | // | non exec code |\ |
| 249 | // +---------------+ \ |
| 250 | // : :\ \ |
| 251 | // +---------------+.\.+---------------+ |
| 252 | // | exec code | \| code | |
| 253 | // +---------------+...+---------------+ |
| 254 | // | data | | data | |
| 255 | // +---------------+...+---------------+ |
| 256 | // |
| 257 | // In this configuration code updates are written to the non-executable view of the code |
| 258 | // cache, and the executable view of the code cache has fixed RX memory protections. |
| 259 | // |
| 260 | // This memory needs to be mapped shared as the code portions will have two mappings. |
| 261 | base_flags = MAP_SHARED; |
| 262 | data_pages = MemMap::MapFile( |
| 263 | data_capacity + exec_capacity, |
| 264 | kProtRW, |
| 265 | base_flags, |
| 266 | mem_fd, |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 267 | /* start= */ 0, |
| 268 | /* low_4gb= */ true, |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 269 | data_cache_name.c_str(), |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 270 | &error_str); |
| 271 | } else { |
| 272 | // Single view of JIT code cache case. Create an initial mapping of data pages large enough |
| 273 | // for data and JIT code pages. The mappings will look like: |
| 274 | // |
| 275 | // VA PA |
| 276 | // |
| 277 | // +---------------+...+---------------+ |
| 278 | // | exec code | | code | |
| 279 | // +---------------+...+---------------+ |
| 280 | // | data | | data | |
| 281 | // +---------------+...+---------------+ |
| 282 | // |
| 283 | // In this configuration code updates are written to the executable view of the code cache, |
| 284 | // and the executable view of the code cache transitions RX to RWX for the update and then |
| 285 | // back to RX after the update. |
| 286 | base_flags = MAP_PRIVATE | MAP_ANON; |
| 287 | data_pages = MemMap::MapAnonymous( |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 288 | data_cache_name.c_str(), |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 289 | data_capacity + exec_capacity, |
| 290 | kProtRW, |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 291 | /* low_4gb= */ true, |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 292 | &error_str); |
| 293 | } |
| 294 | |
| 295 | if (!data_pages.IsValid()) { |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 296 | std::ostringstream oss; |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 297 | oss << "Failed to create read write cache: " << error_str << " size=" << capacity; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 298 | *error_msg = oss.str(); |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 299 | return false; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 300 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 301 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 302 | MemMap exec_pages; |
| 303 | MemMap non_exec_pages; |
| 304 | if (exec_capacity > 0) { |
| 305 | uint8_t* const divider = data_pages.Begin() + data_capacity; |
| 306 | // Set initial permission for executable view to catch any SELinux permission problems early |
| 307 | // (for processes that cannot map WX pages). Otherwise, this region does not need to be |
| 308 | // executable as there is no code in the cache yet. |
| 309 | exec_pages = data_pages.RemapAtEnd(divider, |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 310 | exec_cache_name.c_str(), |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 311 | kProtRX, |
| 312 | base_flags | MAP_FIXED, |
| 313 | mem_fd.get(), |
| 314 | (mem_fd.get() >= 0) ? data_capacity : 0, |
| 315 | &error_str); |
| 316 | if (!exec_pages.IsValid()) { |
| 317 | std::ostringstream oss; |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 318 | oss << "Failed to create read execute code cache: " << error_str << " size=" << capacity; |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 319 | *error_msg = oss.str(); |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 320 | return false; |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 321 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 322 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 323 | if (mem_fd.get() >= 0) { |
| 324 | // For dual view, create the secondary view of code memory used for updating code. This view |
| 325 | // is never executable. |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 326 | std::string name = exec_cache_name + "-rw"; |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 327 | non_exec_pages = MemMap::MapFile(exec_capacity, |
| 328 | kProtR, |
| 329 | base_flags, |
| 330 | mem_fd, |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 331 | /* start= */ data_capacity, |
| 332 | /* low_4GB= */ false, |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 333 | name.c_str(), |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 334 | &error_str); |
| 335 | if (!non_exec_pages.IsValid()) { |
Orion Hodson | ad28f5e | 2018-10-17 09:08:17 +0100 | [diff] [blame] | 336 | static const char* kFailedNxView = "Failed to map non-executable view of JIT code cache"; |
| 337 | if (rwx_memory_allowed) { |
| 338 | // Log and continue as single view JIT (requires RWX memory). |
| 339 | VLOG(jit) << kFailedNxView; |
| 340 | } else { |
| 341 | *error_msg = kFailedNxView; |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 342 | return false; |
Orion Hodson | ad28f5e | 2018-10-17 09:08:17 +0100 | [diff] [blame] | 343 | } |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 344 | } |
| 345 | } |
| 346 | } else { |
| 347 | // Profiling only. No memory for code required. |
David Sehr | d1dbb74 | 2017-07-17 11:20:38 -0700 | [diff] [blame] | 348 | } |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 349 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 350 | data_pages_ = std::move(data_pages); |
| 351 | exec_pages_ = std::move(exec_pages); |
| 352 | non_exec_pages_ = std::move(non_exec_pages); |
| 353 | return true; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 354 | } |
| 355 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 356 | JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data, |
| 357 | bool rwx_memory_allowed, |
| 358 | bool is_zygote, |
| 359 | std::string* error_msg) { |
| 360 | // Register for membarrier expedited sync core if JIT will be generating code. |
| 361 | if (!used_only_for_profile_data) { |
| 362 | if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) { |
| 363 | // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are |
| 364 | // flushed and it's used when adding code to the JIT. The memory used by the new code may |
| 365 | // have just been released and, in theory, the old code could still be in a pipeline. |
| 366 | VLOG(jit) << "Kernel does not support membarrier sync-core"; |
| 367 | } |
| 368 | } |
| 369 | |
| 370 | // Check whether the provided max capacity in options is below 1GB. |
| 371 | size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity(); |
| 372 | // We need to have 32 bit offsets from method headers in code cache which point to things |
| 373 | // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work. |
| 374 | // Ensure we're below 1 GB to be safe. |
| 375 | if (max_capacity > 1 * GB) { |
| 376 | std::ostringstream oss; |
| 377 | oss << "Maxium code cache capacity is limited to 1 GB, " |
| 378 | << PrettySize(max_capacity) << " is too big"; |
| 379 | *error_msg = oss.str(); |
| 380 | return nullptr; |
| 381 | } |
| 382 | |
| 383 | size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity(); |
| 384 | |
| 385 | std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache()); |
| 386 | |
| 387 | MutexLock mu(Thread::Current(), jit_code_cache->lock_); |
| 388 | jit_code_cache->InitializeState(initial_capacity, max_capacity); |
| 389 | |
| 390 | // Zygote should never collect code to share the memory with the children. |
| 391 | if (is_zygote) { |
| 392 | jit_code_cache->SetGarbageCollectCode(false); |
| 393 | } |
| 394 | |
| 395 | if (!jit_code_cache->InitializeMappings(rwx_memory_allowed, is_zygote, error_msg)) { |
| 396 | return nullptr; |
| 397 | } |
| 398 | |
| 399 | jit_code_cache->InitializeSpaces(); |
| 400 | |
| 401 | VLOG(jit) << "Created jit code cache: initial capacity=" |
| 402 | << PrettySize(initial_capacity) |
| 403 | << ", maximum capacity=" |
| 404 | << PrettySize(max_capacity); |
| 405 | |
| 406 | return jit_code_cache.release(); |
| 407 | } |
| 408 | |
| 409 | JitCodeCache::JitCodeCache() |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 410 | : lock_("Jit code cache", kJitCodeCacheLock), |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 411 | lock_cond_("Jit code cache condition variable", lock_), |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 412 | collection_in_progress_(false), |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 413 | last_collection_increased_code_cache_(false), |
Orion Hodson | ad28f5e | 2018-10-17 09:08:17 +0100 | [diff] [blame] | 414 | garbage_collect_code_(true), |
Nicolas Geoffray | b0d2208 | 2016-02-24 17:18:25 +0000 | [diff] [blame] | 415 | used_memory_for_data_(0), |
| 416 | used_memory_for_code_(0), |
Nicolas Geoffray | fcdd729 | 2016-02-25 13:27:47 +0000 | [diff] [blame] | 417 | number_of_compilations_(0), |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 418 | number_of_osr_compilations_(0), |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 419 | number_of_collections_(0), |
| 420 | histogram_stack_map_memory_use_("Memory used for stack maps", 16), |
| 421 | histogram_code_memory_use_("Memory used for compiled code", 16), |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 422 | histogram_profiling_info_memory_use_("Memory used for profiling info", 16), |
| 423 | is_weak_access_enabled_(true), |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 424 | inline_cache_cond_("Jit inline cache condition variable", lock_), |
| 425 | zygote_data_pages_(), |
| 426 | zygote_exec_pages_(), |
| 427 | zygote_data_mspace_(nullptr), |
| 428 | zygote_exec_mspace_(nullptr) { |
| 429 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 430 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 431 | void JitCodeCache::InitializeState(size_t initial_capacity, size_t max_capacity) { |
| 432 | CHECK_GE(max_capacity, initial_capacity); |
| 433 | CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB"; |
| 434 | // Align both capacities to page size, as that's the unit mspaces use. |
| 435 | initial_capacity = RoundDown(initial_capacity, 2 * kPageSize); |
| 436 | max_capacity = RoundDown(max_capacity, 2 * kPageSize); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 437 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 438 | data_pages_ = MemMap(); |
| 439 | exec_pages_ = MemMap(); |
| 440 | non_exec_pages_ = MemMap(); |
| 441 | initial_capacity_ = initial_capacity; |
| 442 | max_capacity_ = max_capacity; |
| 443 | current_capacity_ = initial_capacity, |
| 444 | data_end_ = initial_capacity / kCodeAndDataCapacityDivider; |
| 445 | exec_end_ = initial_capacity - data_end_; |
| 446 | } |
| 447 | |
| 448 | void JitCodeCache::InitializeSpaces() { |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 449 | // Initialize the data heap |
| 450 | data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/); |
| 451 | CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed"; |
| 452 | |
| 453 | // Initialize the code heap |
| 454 | MemMap* code_heap = nullptr; |
| 455 | if (non_exec_pages_.IsValid()) { |
| 456 | code_heap = &non_exec_pages_; |
| 457 | } else if (exec_pages_.IsValid()) { |
| 458 | code_heap = &exec_pages_; |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 459 | } |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 460 | if (code_heap != nullptr) { |
| 461 | // Make all pages reserved for the code heap writable. The mspace allocator, that manages the |
| 462 | // heap, will take and initialize pages in create_mspace_with_base(). |
| 463 | CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW); |
| 464 | exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/); |
| 465 | CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed"; |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 466 | SetFootprintLimit(initial_capacity_); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 467 | // Protect pages containing heap metadata. Updates to the code heap toggle write permission to |
| 468 | // perform the update and there are no other times write access is required. |
| 469 | CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR); |
| 470 | } else { |
| 471 | exec_mspace_ = nullptr; |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 472 | SetFootprintLimit(initial_capacity_); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 473 | } |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 474 | } |
| 475 | |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 476 | JitCodeCache::~JitCodeCache() {} |
| 477 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 478 | bool JitCodeCache::ContainsPc(const void* ptr) const { |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 479 | return exec_pages_.Begin() <= ptr && ptr < exec_pages_.End(); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 480 | } |
| 481 | |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 482 | bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) { |
| 483 | ScopedObjectAccess soa(art::Thread::Current()); |
| 484 | ScopedAssertNoThreadSuspension sants(__FUNCTION__); |
| 485 | if (ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { |
| 486 | return true; |
| 487 | } else if (method->GetEntryPointFromQuickCompiledCode() == GetQuickInstrumentationEntryPoint()) { |
| 488 | return FindCompiledCodeForInstrumentation(method) != nullptr; |
| 489 | } |
| 490 | return false; |
| 491 | } |
| 492 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 493 | bool JitCodeCache::ContainsMethod(ArtMethod* method) { |
| 494 | MutexLock mu(Thread::Current(), lock_); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 495 | if (UNLIKELY(method->IsNative())) { |
| 496 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 497 | if (it != jni_stubs_map_.end() && |
| 498 | it->second.IsCompiled() && |
| 499 | ContainsElement(it->second.GetMethods(), method)) { |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 500 | return true; |
| 501 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 502 | } else { |
| 503 | for (const auto& it : method_code_map_) { |
| 504 | if (it.second == method) { |
| 505 | return true; |
| 506 | } |
| 507 | } |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 508 | } |
| 509 | return false; |
| 510 | } |
| 511 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 512 | const void* JitCodeCache::GetJniStubCode(ArtMethod* method) { |
| 513 | DCHECK(method->IsNative()); |
| 514 | MutexLock mu(Thread::Current(), lock_); |
| 515 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 516 | if (it != jni_stubs_map_.end()) { |
| 517 | JniStubData& data = it->second; |
| 518 | if (data.IsCompiled() && ContainsElement(data.GetMethods(), method)) { |
| 519 | return data.GetCode(); |
| 520 | } |
| 521 | } |
| 522 | return nullptr; |
| 523 | } |
| 524 | |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 525 | const void* JitCodeCache::FindCompiledCodeForInstrumentation(ArtMethod* method) { |
Alex Light | 839f53a | 2018-07-10 15:46:14 -0700 | [diff] [blame] | 526 | // If jit-gc is still on we use the SavedEntryPoint field for doing that and so cannot use it to |
| 527 | // find the instrumentation entrypoint. |
| 528 | if (LIKELY(GetGarbageCollectCode())) { |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 529 | return nullptr; |
| 530 | } |
| 531 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
| 532 | if (info == nullptr) { |
| 533 | return nullptr; |
| 534 | } |
| 535 | // When GC is disabled for trampoline tracing we will use SavedEntrypoint to hold the actual |
| 536 | // jit-compiled version of the method. If jit-gc is disabled for other reasons this will just be |
| 537 | // nullptr. |
| 538 | return info->GetSavedEntryPoint(); |
| 539 | } |
| 540 | |
Mathieu Chartier | 33fbf37 | 2016-03-07 13:48:08 -0800 | [diff] [blame] | 541 | class ScopedCodeCacheWrite : ScopedTrace { |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 542 | public: |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 543 | explicit ScopedCodeCacheWrite(const JitCodeCache* const code_cache) |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 544 | : ScopedTrace("ScopedCodeCacheWrite"), |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 545 | code_cache_(code_cache) { |
Mathieu Chartier | 33fbf37 | 2016-03-07 13:48:08 -0800 | [diff] [blame] | 546 | ScopedTrace trace("mprotect all"); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 547 | const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping(); |
| 548 | if (updatable_pages != nullptr) { |
| 549 | int prot = code_cache_->HasDualCodeMapping() ? kProtRW : kProtRWX; |
| 550 | CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot); |
| 551 | } |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 552 | } |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 553 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 554 | ~ScopedCodeCacheWrite() { |
Mathieu Chartier | 33fbf37 | 2016-03-07 13:48:08 -0800 | [diff] [blame] | 555 | ScopedTrace trace("mprotect code"); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 556 | const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping(); |
| 557 | if (updatable_pages != nullptr) { |
| 558 | int prot = code_cache_->HasDualCodeMapping() ? kProtR : kProtRX; |
| 559 | CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot); |
| 560 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 561 | } |
Mathieu Chartier | 8d8de0c | 2017-10-04 09:35:30 -0700 | [diff] [blame] | 562 | |
David Sehr | d1dbb74 | 2017-07-17 11:20:38 -0700 | [diff] [blame] | 563 | private: |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 564 | const JitCodeCache* const code_cache_; |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 565 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 566 | DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite); |
| 567 | }; |
| 568 | |
| 569 | uint8_t* JitCodeCache::CommitCode(Thread* self, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 570 | ArtMethod* method, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 571 | uint8_t* stack_map, |
| 572 | uint8_t* roots_data, |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 573 | const uint8_t* code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 574 | size_t code_size, |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 575 | size_t data_size, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 576 | bool osr, |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 577 | const std::vector<Handle<mirror::Object>>& roots, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 578 | bool has_should_deoptimize_flag, |
| 579 | const ArenaSet<ArtMethod*>& cha_single_implementation_list) { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 580 | uint8_t* result = CommitCodeInternal(self, |
| 581 | method, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 582 | stack_map, |
| 583 | roots_data, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 584 | code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 585 | code_size, |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 586 | data_size, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 587 | osr, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 588 | roots, |
| 589 | has_should_deoptimize_flag, |
| 590 | cha_single_implementation_list); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 591 | if (result == nullptr) { |
| 592 | // Retry. |
| 593 | GarbageCollectCache(self); |
| 594 | result = CommitCodeInternal(self, |
| 595 | method, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 596 | stack_map, |
| 597 | roots_data, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 598 | code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 599 | code_size, |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 600 | data_size, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 601 | osr, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 602 | roots, |
| 603 | has_should_deoptimize_flag, |
| 604 | cha_single_implementation_list); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 605 | } |
| 606 | return result; |
| 607 | } |
| 608 | |
| 609 | bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) { |
| 610 | bool in_collection = false; |
| 611 | while (collection_in_progress_) { |
| 612 | in_collection = true; |
| 613 | lock_cond_.Wait(self); |
| 614 | } |
| 615 | return in_collection; |
| 616 | } |
| 617 | |
| 618 | static uintptr_t FromCodeToAllocation(const void* code) { |
| 619 | size_t alignment = GetInstructionSetAlignment(kRuntimeISA); |
| 620 | return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment); |
| 621 | } |
| 622 | |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 623 | static uint32_t ComputeRootTableSize(uint32_t number_of_roots) { |
| 624 | return sizeof(uint32_t) + number_of_roots * sizeof(GcRoot<mirror::Object>); |
| 625 | } |
| 626 | |
| 627 | static uint32_t GetNumberOfRoots(const uint8_t* stack_map) { |
| 628 | // The length of the table is stored just before the stack map (and therefore at the end of |
| 629 | // the table itself), in order to be able to fetch it from a `stack_map` pointer. |
| 630 | return reinterpret_cast<const uint32_t*>(stack_map)[-1]; |
| 631 | } |
| 632 | |
Mathieu Chartier | 7a704be | 2016-11-22 13:24:40 -0800 | [diff] [blame] | 633 | static void FillRootTableLength(uint8_t* roots_data, uint32_t length) { |
| 634 | // Store the length of the table at the end. This will allow fetching it from a `stack_map` |
| 635 | // pointer. |
| 636 | reinterpret_cast<uint32_t*>(roots_data)[length] = length; |
| 637 | } |
| 638 | |
Nicolas Geoffray | f4b9442 | 2016-12-05 00:10:09 +0000 | [diff] [blame] | 639 | static const uint8_t* FromStackMapToRoots(const uint8_t* stack_map_data) { |
| 640 | return stack_map_data - ComputeRootTableSize(GetNumberOfRoots(stack_map_data)); |
| 641 | } |
| 642 | |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 643 | static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots) |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 644 | REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) { |
| 645 | if (!kIsDebugBuild) { |
| 646 | return; |
| 647 | } |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 648 | // Put all roots in `roots_data`. |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 649 | for (Handle<mirror::Object> object : roots) { |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 650 | // Ensure the string is strongly interned. b/32995596 |
| 651 | if (object->IsString()) { |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 652 | ObjPtr<mirror::String> str = object->AsString(); |
Alex Light | 3e36a9c | 2018-06-19 09:45:05 -0700 | [diff] [blame] | 653 | ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); |
| 654 | CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr); |
| 655 | } |
| 656 | } |
| 657 | } |
| 658 | |
| 659 | void JitCodeCache::FillRootTable(uint8_t* roots_data, |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 660 | const std::vector<Handle<mirror::Object>>& roots) { |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 661 | GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data); |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 662 | const uint32_t length = roots.size(); |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 663 | // Put all roots in `roots_data`. |
| 664 | for (uint32_t i = 0; i < length; ++i) { |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 665 | ObjPtr<mirror::Object> object = roots[i].Get(); |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 666 | gc_roots[i] = GcRoot<mirror::Object>(object); |
| 667 | } |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 668 | } |
| 669 | |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 670 | static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) { |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 671 | OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 672 | uint8_t* data = method_header->GetOptimizedCodeInfoPtr(); |
| 673 | uint32_t roots = GetNumberOfRoots(data); |
| 674 | if (number_of_roots != nullptr) { |
| 675 | *number_of_roots = roots; |
| 676 | } |
| 677 | return data - ComputeRootTableSize(roots); |
| 678 | } |
| 679 | |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 680 | // Use a sentinel for marking entries in the JIT table that have been cleared. |
| 681 | // This helps diagnosing in case the compiled code tries to wrongly access such |
| 682 | // entries. |
Andreas Gampe | 5629d2d | 2017-05-15 16:28:13 -0700 | [diff] [blame] | 683 | static mirror::Class* const weak_sentinel = |
| 684 | reinterpret_cast<mirror::Class*>(Context::kBadGprBase + 0xff); |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 685 | |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 686 | // Helper for the GC to process a weak class in a JIT root table. |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 687 | static inline void ProcessWeakClass(GcRoot<mirror::Class>* root_ptr, |
| 688 | IsMarkedVisitor* visitor, |
| 689 | mirror::Class* update) |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 690 | REQUIRES_SHARED(Locks::mutator_lock_) { |
| 691 | // This does not need a read barrier because this is called by GC. |
| 692 | mirror::Class* cls = root_ptr->Read<kWithoutReadBarrier>(); |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 693 | if (cls != nullptr && cls != weak_sentinel) { |
Mathieu Chartier | d7a7f2f | 2018-09-07 11:57:18 -0700 | [diff] [blame] | 694 | DCHECK((cls->IsClass<kDefaultVerifyFlags>())); |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 695 | // Look at the classloader of the class to know if it has been unloaded. |
| 696 | // This does not need a read barrier because this is called by GC. |
| 697 | mirror::Object* class_loader = |
| 698 | cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>(); |
| 699 | if (class_loader == nullptr || visitor->IsMarked(class_loader) != nullptr) { |
| 700 | // The class loader is live, update the entry if the class has moved. |
| 701 | mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls)); |
| 702 | // Note that new_object can be null for CMS and newly allocated objects. |
| 703 | if (new_cls != nullptr && new_cls != cls) { |
| 704 | *root_ptr = GcRoot<mirror::Class>(new_cls); |
| 705 | } |
| 706 | } else { |
| 707 | // The class loader is not live, clear the entry. |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 708 | *root_ptr = GcRoot<mirror::Class>(update); |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 709 | } |
| 710 | } |
| 711 | } |
| 712 | |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 713 | void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) { |
| 714 | MutexLock mu(Thread::Current(), lock_); |
| 715 | for (const auto& entry : method_code_map_) { |
| 716 | uint32_t number_of_roots = 0; |
| 717 | uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots); |
| 718 | GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data); |
| 719 | for (uint32_t i = 0; i < number_of_roots; ++i) { |
| 720 | // This does not need a read barrier because this is called by GC. |
| 721 | mirror::Object* object = roots[i].Read<kWithoutReadBarrier>(); |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 722 | if (object == nullptr || object == weak_sentinel) { |
Nicolas Geoffray | 22384ae | 2016-12-12 22:33:36 +0000 | [diff] [blame] | 723 | // entry got deleted in a previous sweep. |
| 724 | } else if (object->IsString<kDefaultVerifyFlags, kWithoutReadBarrier>()) { |
| 725 | mirror::Object* new_object = visitor->IsMarked(object); |
| 726 | // We know the string is marked because it's a strongly-interned string that |
| 727 | // is always alive. The IsMarked implementation of the CMS collector returns |
| 728 | // null for newly allocated objects, but we know those haven't moved. Therefore, |
| 729 | // only update the entry if we get a different non-null string. |
| 730 | // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method |
| 731 | // out of the weak access/creation pause. b/32167580 |
| 732 | if (new_object != nullptr && new_object != object) { |
| 733 | DCHECK(new_object->IsString()); |
| 734 | roots[i] = GcRoot<mirror::Object>(new_object); |
| 735 | } |
| 736 | } else { |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 737 | ProcessWeakClass( |
| 738 | reinterpret_cast<GcRoot<mirror::Class>*>(&roots[i]), visitor, weak_sentinel); |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 739 | } |
| 740 | } |
| 741 | } |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 742 | // Walk over inline caches to clear entries containing unloaded classes. |
| 743 | for (ProfilingInfo* info : profiling_infos_) { |
| 744 | for (size_t i = 0; i < info->number_of_inline_caches_; ++i) { |
| 745 | InlineCache* cache = &info->cache_[i]; |
| 746 | for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) { |
Nicolas Geoffray | 6ca115b | 2017-05-10 15:09:35 +0100 | [diff] [blame] | 747 | ProcessWeakClass(&cache->classes_[j], visitor, nullptr); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 748 | } |
| 749 | } |
| 750 | } |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 751 | } |
| 752 | |
Orion Hodson | 607624f | 2018-05-11 10:10:46 +0100 | [diff] [blame] | 753 | void JitCodeCache::FreeCodeAndData(const void* code_ptr) { |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 754 | uintptr_t allocation = FromCodeToAllocation(code_ptr); |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 755 | // Notify native debugger that we are about to remove the code. |
| 756 | // It does nothing if we are not using native debugger. |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 757 | MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 758 | RemoveNativeDebugInfoForJit(code_ptr); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 759 | if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) { |
| 760 | FreeData(GetRootTable(code_ptr)); |
| 761 | } // else this is a JNI stub without any data. |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 762 | |
| 763 | uint8_t* code_allocation = reinterpret_cast<uint8_t*>(allocation); |
| 764 | if (HasDualCodeMapping()) { |
| 765 | code_allocation = TranslateAddress(code_allocation, exec_pages_, non_exec_pages_); |
| 766 | } |
| 767 | |
| 768 | FreeCode(code_allocation); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 769 | } |
| 770 | |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 771 | void JitCodeCache::FreeAllMethodHeaders( |
| 772 | const std::unordered_set<OatQuickMethodHeader*>& method_headers) { |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 773 | // We need to remove entries in method_headers from CHA dependencies |
| 774 | // first since once we do FreeCode() below, the memory can be reused |
| 775 | // so it's possible for the same method_header to start representing |
| 776 | // different compile code. |
| 777 | MutexLock mu(Thread::Current(), lock_); |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 778 | { |
| 779 | MutexLock mu2(Thread::Current(), *Locks::cha_lock_); |
| 780 | Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis() |
| 781 | ->RemoveDependentsWithMethodHeaders(method_headers); |
| 782 | } |
| 783 | |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 784 | ScopedCodeCacheWrite scc(this); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 785 | for (const OatQuickMethodHeader* method_header : method_headers) { |
Orion Hodson | 607624f | 2018-05-11 10:10:46 +0100 | [diff] [blame] | 786 | FreeCodeAndData(method_header->GetCode()); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 787 | } |
| 788 | } |
| 789 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 790 | void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 791 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 792 | // We use a set to first collect all method_headers whose code need to be |
| 793 | // removed. We need to free the underlying code after we remove CHA dependencies |
| 794 | // for entries in this set. And it's more efficient to iterate through |
| 795 | // the CHA dependency map just once with an unordered_set. |
| 796 | std::unordered_set<OatQuickMethodHeader*> method_headers; |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 797 | { |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 798 | MutexLock mu(self, lock_); |
| 799 | // We do not check if a code cache GC is in progress, as this method comes |
| 800 | // with the classlinker_classes_lock_ held, and suspending ourselves could |
| 801 | // lead to a deadlock. |
| 802 | { |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 803 | ScopedCodeCacheWrite scc(this); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 804 | for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) { |
| 805 | it->second.RemoveMethodsIn(alloc); |
| 806 | if (it->second.GetMethods().empty()) { |
| 807 | method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->second.GetCode())); |
| 808 | it = jni_stubs_map_.erase(it); |
| 809 | } else { |
| 810 | it->first.UpdateShorty(it->second.GetMethods().front()); |
| 811 | ++it; |
| 812 | } |
| 813 | } |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 814 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 815 | if (alloc.ContainsUnsafe(it->second)) { |
| 816 | method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first)); |
| 817 | it = method_code_map_.erase(it); |
| 818 | } else { |
| 819 | ++it; |
| 820 | } |
| 821 | } |
| 822 | } |
| 823 | for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) { |
| 824 | if (alloc.ContainsUnsafe(it->first)) { |
| 825 | // Note that the code has already been pushed to method_headers in the loop |
| 826 | // above and is going to be removed in FreeCode() below. |
| 827 | it = osr_code_map_.erase(it); |
| 828 | } else { |
| 829 | ++it; |
| 830 | } |
| 831 | } |
| 832 | for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) { |
| 833 | ProfilingInfo* info = *it; |
| 834 | if (alloc.ContainsUnsafe(info->GetMethod())) { |
| 835 | info->GetMethod()->SetProfilingInfo(nullptr); |
| 836 | FreeData(reinterpret_cast<uint8_t*>(info)); |
| 837 | it = profiling_infos_.erase(it); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 838 | } else { |
| 839 | ++it; |
| 840 | } |
| 841 | } |
| 842 | } |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 843 | FreeAllMethodHeaders(method_headers); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 844 | } |
| 845 | |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 846 | bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const { |
| 847 | return kUseReadBarrier |
| 848 | ? self->GetWeakRefAccessEnabled() |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 849 | : is_weak_access_enabled_.load(std::memory_order_seq_cst); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 850 | } |
| 851 | |
| 852 | void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) { |
| 853 | if (IsWeakAccessEnabled(self)) { |
| 854 | return; |
| 855 | } |
| 856 | ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 857 | MutexLock mu(self, lock_); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 858 | while (!IsWeakAccessEnabled(self)) { |
| 859 | inline_cache_cond_.Wait(self); |
| 860 | } |
| 861 | } |
| 862 | |
| 863 | void JitCodeCache::BroadcastForInlineCacheAccess() { |
| 864 | Thread* self = Thread::Current(); |
| 865 | MutexLock mu(self, lock_); |
| 866 | inline_cache_cond_.Broadcast(self); |
| 867 | } |
| 868 | |
| 869 | void JitCodeCache::AllowInlineCacheAccess() { |
| 870 | DCHECK(!kUseReadBarrier); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 871 | is_weak_access_enabled_.store(true, std::memory_order_seq_cst); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 872 | BroadcastForInlineCacheAccess(); |
| 873 | } |
| 874 | |
| 875 | void JitCodeCache::DisallowInlineCacheAccess() { |
| 876 | DCHECK(!kUseReadBarrier); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 877 | is_weak_access_enabled_.store(false, std::memory_order_seq_cst); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 878 | } |
| 879 | |
| 880 | void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic, |
| 881 | Handle<mirror::ObjectArray<mirror::Class>> array) { |
| 882 | WaitUntilInlineCacheAccessible(Thread::Current()); |
| 883 | // Note that we don't need to lock `lock_` here, the compiler calling |
| 884 | // this method has already ensured the inline cache will not be deleted. |
| 885 | for (size_t in_cache = 0, in_array = 0; |
| 886 | in_cache < InlineCache::kIndividualCacheSize; |
| 887 | ++in_cache) { |
| 888 | mirror::Class* object = ic.classes_[in_cache].Read(); |
| 889 | if (object != nullptr) { |
| 890 | array->Set(in_array++, object); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 891 | } |
| 892 | } |
| 893 | } |
| 894 | |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 895 | static void ClearMethodCounter(ArtMethod* method, bool was_warm) { |
| 896 | if (was_warm) { |
Vladimir Marko | c945e0d | 2018-07-18 17:26:45 +0100 | [diff] [blame] | 897 | method->SetPreviouslyWarm(); |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 898 | } |
| 899 | // We reset the counter to 1 so that the profile knows that the method was executed at least once. |
| 900 | // This is required for layout purposes. |
Nicolas Geoffray | 88f50b1 | 2017-06-09 16:08:47 +0100 | [diff] [blame] | 901 | // We also need to make sure we'll pass the warmup threshold again, so we set to 0 if |
| 902 | // the warmup threshold is 1. |
| 903 | uint16_t jit_warmup_threshold = Runtime::Current()->GetJITOptions()->GetWarmupThreshold(); |
| 904 | method->SetCounter(std::min(jit_warmup_threshold - 1, 1)); |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 905 | } |
| 906 | |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 907 | void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) { |
| 908 | while (collection_in_progress_) { |
| 909 | lock_.Unlock(self); |
| 910 | { |
| 911 | ScopedThreadSuspension sts(self, kSuspended); |
| 912 | MutexLock mu(self, lock_); |
| 913 | WaitForPotentialCollectionToComplete(self); |
| 914 | } |
| 915 | lock_.Lock(self); |
| 916 | } |
| 917 | } |
| 918 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 919 | const MemMap* JitCodeCache::GetUpdatableCodeMapping() const { |
| 920 | if (HasDualCodeMapping()) { |
| 921 | return &non_exec_pages_; |
| 922 | } else if (HasCodeMapping()) { |
| 923 | return &exec_pages_; |
| 924 | } else { |
| 925 | return nullptr; |
| 926 | } |
| 927 | } |
| 928 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 929 | uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, |
| 930 | ArtMethod* method, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 931 | uint8_t* stack_map, |
| 932 | uint8_t* roots_data, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 933 | const uint8_t* code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 934 | size_t code_size, |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 935 | size_t data_size, |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 936 | bool osr, |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 937 | const std::vector<Handle<mirror::Object>>& roots, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 938 | bool has_should_deoptimize_flag, |
| 939 | const ArenaSet<ArtMethod*>& |
| 940 | cha_single_implementation_list) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 941 | DCHECK(!method->IsNative() || !osr); |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 942 | |
| 943 | if (!method->IsNative()) { |
| 944 | // We need to do this before grabbing the lock_ because it needs to be able to see the string |
| 945 | // InternTable. Native methods do not have roots. |
| 946 | DCheckRootsAreValid(roots); |
| 947 | } |
| 948 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 949 | OatQuickMethodHeader* method_header = nullptr; |
Nicolas Geoffray | 1e7de6c | 2015-10-21 12:07:31 +0100 | [diff] [blame] | 950 | uint8_t* code_ptr = nullptr; |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 951 | |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 952 | MutexLock mu(self, lock_); |
| 953 | // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to |
| 954 | // finish. |
| 955 | WaitForPotentialCollectionToCompleteRunnable(self); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 956 | { |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 957 | ScopedCodeCacheWrite scc(this); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 958 | |
| 959 | size_t alignment = GetInstructionSetAlignment(kRuntimeISA); |
| 960 | // Ensure the header ends up at expected instruction alignment. |
| 961 | size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment); |
| 962 | size_t total_size = header_size + code_size; |
| 963 | |
| 964 | // AllocateCode allocates memory in non-executable region for alignment header and code. The |
| 965 | // header size may include alignment padding. |
| 966 | uint8_t* nox_memory = AllocateCode(total_size); |
| 967 | if (nox_memory == nullptr) { |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 968 | return nullptr; |
| 969 | } |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 970 | |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 971 | // code_ptr points to non-executable code. |
| 972 | code_ptr = nox_memory + header_size; |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 973 | std::copy(code, code + code_size, code_ptr); |
| 974 | method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 975 | |
| 976 | // From here code_ptr points to executable code. |
| 977 | if (HasDualCodeMapping()) { |
| 978 | code_ptr = TranslateAddress(code_ptr, non_exec_pages_, exec_pages_); |
| 979 | } |
| 980 | |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 981 | new (method_header) OatQuickMethodHeader( |
| 982 | (stack_map != nullptr) ? code_ptr - stack_map : 0u, |
| 983 | code_size); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 984 | |
| 985 | DCHECK(!Runtime::Current()->IsAotCompiler()); |
| 986 | if (has_should_deoptimize_flag) { |
| 987 | method_header->SetHasShouldDeoptimizeFlag(); |
| 988 | } |
| 989 | |
| 990 | // Update method_header pointer to executable code region. |
| 991 | if (HasDualCodeMapping()) { |
| 992 | method_header = TranslateAddress(method_header, non_exec_pages_, exec_pages_); |
| 993 | } |
| 994 | |
| 995 | // Both instruction and data caches need flushing to the point of unification where both share |
| 996 | // a common view of memory. Flushing the data cache ensures the dirty cachelines from the |
| 997 | // newly added code are written out to the point of unification. Flushing the instruction |
| 998 | // cache ensures the newly written code will be fetched from the point of unification before |
| 999 | // use. Memory in the code cache is re-cycled as code is added and removed. The flushes |
| 1000 | // prevent stale code from residing in the instruction cache. |
| 1001 | // |
| 1002 | // Caches are flushed before write permission is removed because some ARMv8 Qualcomm kernels |
| 1003 | // may trigger a segfault if a page fault occurs when requesting a cache maintenance |
| 1004 | // operation. This is a kernel bug that we need to work around until affected devices |
| 1005 | // (e.g. Nexus 5X and 6P) stop being supported or their kernels are fixed. |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 1006 | // |
| 1007 | // For reference, this behavior is caused by this commit: |
| 1008 | // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 1009 | // |
| 1010 | if (HasDualCodeMapping()) { |
| 1011 | // Flush the data cache lines associated with the non-executable copy of the code just added. |
| 1012 | FlushDataCache(nox_memory, nox_memory + total_size); |
| 1013 | } |
| 1014 | // FlushInstructionCache() flushes both data and instruction caches lines. The cacheline range |
| 1015 | // flushed is for the executable mapping of the code just added. |
Orion Hodson | 38d29fd | 2018-09-07 12:58:37 +0100 | [diff] [blame] | 1016 | FlushInstructionCache(code_ptr, code_ptr + code_size); |
Orion Hodson | f233136 | 2018-07-11 15:14:10 +0100 | [diff] [blame] | 1017 | |
| 1018 | // Ensure CPU instruction pipelines are flushed for all cores. This is necessary for |
| 1019 | // correctness as code may still be in instruction pipelines despite the i-cache flush. It is |
| 1020 | // not safe to assume that changing permissions with mprotect (RX->RWX->RX) will cause a TLB |
| 1021 | // shootdown (incidentally invalidating the CPU pipelines by sending an IPI to all cores to |
| 1022 | // notify them of the TLB invalidation). Some architectures, notably ARM and ARM64, have |
| 1023 | // hardware support that broadcasts TLB invalidations and so their kernels have no software |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 1024 | // based TLB shootdown. The sync-core flavor of membarrier was introduced in Linux 4.16 to |
| 1025 | // address this (see mbarrier(2)). The membarrier here will fail on prior kernels and on |
| 1026 | // platforms lacking the appropriate support. |
Orion Hodson | 563ada2 | 2018-09-04 11:28:31 +0100 | [diff] [blame] | 1027 | art::membarrier(art::MembarrierCommand::kPrivateExpeditedSyncCore); |
Orion Hodson | 38d29fd | 2018-09-07 12:58:37 +0100 | [diff] [blame] | 1028 | |
Nicolas Geoffray | 0a52223 | 2016-01-19 09:34:58 +0000 | [diff] [blame] | 1029 | number_of_compilations_++; |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 1030 | } |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 1031 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1032 | // We need to update the entry point in the runnable state for the instrumentation. |
| 1033 | { |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 1034 | // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the |
| 1035 | // compiled code is considered invalidated by some class linking, but below we still make the |
| 1036 | // compiled code valid for the method. Need cha_lock_ for checking all single-implementation |
| 1037 | // flags and register dependencies. |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1038 | MutexLock cha_mu(self, *Locks::cha_lock_); |
| 1039 | bool single_impl_still_valid = true; |
| 1040 | for (ArtMethod* single_impl : cha_single_implementation_list) { |
| 1041 | if (!single_impl->HasSingleImplementation()) { |
Jeff Hao | 00286db | 2017-05-30 16:53:07 -0700 | [diff] [blame] | 1042 | // Simply discard the compiled code. Clear the counter so that it may be recompiled later. |
| 1043 | // Hopefully the class hierarchy will be more stable when compilation is retried. |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1044 | single_impl_still_valid = false; |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 1045 | ClearMethodCounter(method, /*was_warm=*/ false); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1046 | break; |
| 1047 | } |
| 1048 | } |
| 1049 | |
| 1050 | // Discard the code if any single-implementation assumptions are now invalid. |
| 1051 | if (!single_impl_still_valid) { |
| 1052 | VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions."; |
| 1053 | return nullptr; |
| 1054 | } |
Nicolas Geoffray | 433b79a | 2017-01-30 20:54:45 +0000 | [diff] [blame] | 1055 | DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable()) |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 1056 | << "Should not be using cha on debuggable apps/runs!"; |
| 1057 | |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1058 | for (ArtMethod* single_impl : cha_single_implementation_list) { |
Andreas Gampe | c1ac9ee | 2017-07-24 22:35:49 -0700 | [diff] [blame] | 1059 | Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()->AddDependency( |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1060 | single_impl, method, method_header); |
| 1061 | } |
| 1062 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1063 | if (UNLIKELY(method->IsNative())) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1064 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 1065 | DCHECK(it != jni_stubs_map_.end()) |
| 1066 | << "Entry inserted in NotifyCompilationOf() should be alive."; |
| 1067 | JniStubData* data = &it->second; |
| 1068 | DCHECK(ContainsElement(data->GetMethods(), method)) |
| 1069 | << "Entry inserted in NotifyCompilationOf() should contain this method."; |
| 1070 | data->SetCode(code_ptr); |
| 1071 | instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation(); |
| 1072 | for (ArtMethod* m : data->GetMethods()) { |
| 1073 | instrum->UpdateMethodsCode(m, method_header->GetEntryPoint()); |
| 1074 | } |
Nicolas Geoffray | 480d510 | 2016-04-18 12:09:30 +0100 | [diff] [blame] | 1075 | } else { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1076 | // Fill the root table before updating the entry point. |
| 1077 | DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data); |
| 1078 | DCHECK_LE(roots_data, stack_map); |
| 1079 | FillRootTable(roots_data, roots); |
| 1080 | { |
| 1081 | // Flush data cache, as compiled code references literals in it. |
Orion Hodson | 38d29fd | 2018-09-07 12:58:37 +0100 | [diff] [blame] | 1082 | FlushDataCache(roots_data, roots_data + data_size); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1083 | } |
| 1084 | method_code_map_.Put(code_ptr, method); |
| 1085 | if (osr) { |
| 1086 | number_of_osr_compilations_++; |
| 1087 | osr_code_map_.Put(method, code_ptr); |
| 1088 | } else { |
| 1089 | Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( |
| 1090 | method, method_header->GetEntryPoint()); |
| 1091 | } |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1092 | } |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1093 | VLOG(jit) |
Nicolas Geoffray | 71cd50f | 2016-04-14 15:00:33 +0100 | [diff] [blame] | 1094 | << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 1095 | << ArtMethod::PrettyMethod(method) << "@" << method |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1096 | << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": " |
| 1097 | << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": " |
| 1098 | << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << "," |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1099 | << reinterpret_cast<const void*>(method_header->GetEntryPoint() + |
| 1100 | method_header->GetCodeSize()); |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 1101 | histogram_code_memory_use_.AddValue(code_size); |
| 1102 | if (code_size > kCodeSizeLogThreshold) { |
| 1103 | LOG(INFO) << "JIT allocated " |
| 1104 | << PrettySize(code_size) |
| 1105 | << " for compiled code of " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 1106 | << ArtMethod::PrettyMethod(method); |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 1107 | } |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1108 | } |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 1109 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 1110 | return reinterpret_cast<uint8_t*>(method_header); |
| 1111 | } |
| 1112 | |
| 1113 | size_t JitCodeCache::CodeCacheSize() { |
| 1114 | MutexLock mu(Thread::Current(), lock_); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1115 | return CodeCacheSizeLocked(); |
| 1116 | } |
| 1117 | |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 1118 | bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1119 | // This function is used only for testing and only with non-native methods. |
| 1120 | CHECK(!method->IsNative()); |
| 1121 | |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 1122 | MutexLock mu(Thread::Current(), lock_); |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 1123 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1124 | bool osr = osr_code_map_.find(method) != osr_code_map_.end(); |
| 1125 | bool in_cache = RemoveMethodLocked(method, release_memory); |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 1126 | |
| 1127 | if (!in_cache) { |
| 1128 | return false; |
| 1129 | } |
| 1130 | |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 1131 | method->ClearCounter(); |
| 1132 | Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( |
| 1133 | method, GetQuickToInterpreterBridge()); |
| 1134 | VLOG(jit) |
| 1135 | << "JIT removed (osr=" << std::boolalpha << osr << std::noboolalpha << ") " |
| 1136 | << ArtMethod::PrettyMethod(method) << "@" << method |
| 1137 | << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": " |
| 1138 | << " dcache_size=" << PrettySize(DataCacheSizeLocked()); |
| 1139 | return true; |
| 1140 | } |
| 1141 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1142 | bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) { |
| 1143 | if (LIKELY(!method->IsNative())) { |
| 1144 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
| 1145 | if (info != nullptr) { |
| 1146 | RemoveElement(profiling_infos_, info); |
| 1147 | } |
| 1148 | method->SetProfilingInfo(nullptr); |
| 1149 | } |
| 1150 | |
| 1151 | bool in_cache = false; |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 1152 | ScopedCodeCacheWrite ccw(this); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1153 | if (UNLIKELY(method->IsNative())) { |
| 1154 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 1155 | if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) { |
| 1156 | in_cache = true; |
| 1157 | if (it->second.GetMethods().empty()) { |
| 1158 | if (release_memory) { |
Orion Hodson | 607624f | 2018-05-11 10:10:46 +0100 | [diff] [blame] | 1159 | FreeCodeAndData(it->second.GetCode()); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1160 | } |
| 1161 | jni_stubs_map_.erase(it); |
| 1162 | } else { |
| 1163 | it->first.UpdateShorty(it->second.GetMethods().front()); |
| 1164 | } |
| 1165 | } |
| 1166 | } else { |
| 1167 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 1168 | if (it->second == method) { |
| 1169 | in_cache = true; |
| 1170 | if (release_memory) { |
Orion Hodson | 607624f | 2018-05-11 10:10:46 +0100 | [diff] [blame] | 1171 | FreeCodeAndData(it->first); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1172 | } |
| 1173 | it = method_code_map_.erase(it); |
| 1174 | } else { |
| 1175 | ++it; |
| 1176 | } |
| 1177 | } |
| 1178 | |
| 1179 | auto osr_it = osr_code_map_.find(method); |
| 1180 | if (osr_it != osr_code_map_.end()) { |
| 1181 | osr_code_map_.erase(osr_it); |
| 1182 | } |
| 1183 | } |
| 1184 | |
| 1185 | return in_cache; |
| 1186 | } |
| 1187 | |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 1188 | // This notifies the code cache that the given method has been redefined and that it should remove |
| 1189 | // any cached information it has on the method. All threads must be suspended before calling this |
| 1190 | // method. The compiled code for the method (if there is any) must not be in any threads call stack. |
| 1191 | void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) { |
| 1192 | MutexLock mu(Thread::Current(), lock_); |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 1193 | RemoveMethodLocked(method, /* release_memory= */ true); |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 1194 | } |
| 1195 | |
| 1196 | // This invalidates old_method. Once this function returns one can no longer use old_method to |
| 1197 | // execute code unless it is fixed up. This fixup will happen later in the process of installing a |
| 1198 | // class redefinition. |
| 1199 | // TODO We should add some info to ArtMethod to note that 'old_method' has been invalidated and |
| 1200 | // shouldn't be used since it is no longer logically in the jit code cache. |
| 1201 | // TODO We should add DCHECKS that validate that the JIT is paused when this method is entered. |
| 1202 | void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1203 | MutexLock mu(Thread::Current(), lock_); |
Alex Light | eee0bd4 | 2017-02-14 15:31:45 +0000 | [diff] [blame] | 1204 | if (old_method->IsNative()) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1205 | // Update methods in jni_stubs_map_. |
| 1206 | for (auto& entry : jni_stubs_map_) { |
| 1207 | JniStubData& data = entry.second; |
| 1208 | data.MoveObsoleteMethod(old_method, new_method); |
| 1209 | } |
Alex Light | eee0bd4 | 2017-02-14 15:31:45 +0000 | [diff] [blame] | 1210 | return; |
| 1211 | } |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 1212 | // Update ProfilingInfo to the new one and remove it from the old_method. |
| 1213 | if (old_method->GetProfilingInfo(kRuntimePointerSize) != nullptr) { |
| 1214 | DCHECK_EQ(old_method->GetProfilingInfo(kRuntimePointerSize)->GetMethod(), old_method); |
| 1215 | ProfilingInfo* info = old_method->GetProfilingInfo(kRuntimePointerSize); |
| 1216 | old_method->SetProfilingInfo(nullptr); |
| 1217 | // Since the JIT should be paused and all threads suspended by the time this is called these |
| 1218 | // checks should always pass. |
| 1219 | DCHECK(!info->IsInUseByCompiler()); |
| 1220 | new_method->SetProfilingInfo(info); |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 1221 | // Get rid of the old saved entrypoint if it is there. |
| 1222 | info->SetSavedEntryPoint(nullptr); |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 1223 | info->method_ = new_method; |
| 1224 | } |
| 1225 | // Update method_code_map_ to point to the new method. |
| 1226 | for (auto& it : method_code_map_) { |
| 1227 | if (it.second == old_method) { |
| 1228 | it.second = new_method; |
| 1229 | } |
| 1230 | } |
| 1231 | // Update osr_code_map_ to point to the new method. |
| 1232 | auto code_map = osr_code_map_.find(old_method); |
| 1233 | if (code_map != osr_code_map_.end()) { |
| 1234 | osr_code_map_.Put(new_method, code_map->second); |
| 1235 | osr_code_map_.erase(old_method); |
| 1236 | } |
| 1237 | } |
| 1238 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1239 | size_t JitCodeCache::CodeCacheSizeLocked() { |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1240 | return used_memory_for_code_; |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 1241 | } |
| 1242 | |
| 1243 | size_t JitCodeCache::DataCacheSize() { |
| 1244 | MutexLock mu(Thread::Current(), lock_); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1245 | return DataCacheSizeLocked(); |
| 1246 | } |
| 1247 | |
| 1248 | size_t JitCodeCache::DataCacheSizeLocked() { |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1249 | return used_memory_for_data_; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1250 | } |
| 1251 | |
Nicolas Geoffray | f46501c | 2016-11-22 13:45:36 +0000 | [diff] [blame] | 1252 | void JitCodeCache::ClearData(Thread* self, |
| 1253 | uint8_t* stack_map_data, |
| 1254 | uint8_t* roots_data) { |
| 1255 | DCHECK_EQ(FromStackMapToRoots(stack_map_data), roots_data); |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 1256 | MutexLock mu(self, lock_); |
Nicolas Geoffray | f46501c | 2016-11-22 13:45:36 +0000 | [diff] [blame] | 1257 | FreeData(reinterpret_cast<uint8_t*>(roots_data)); |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 1258 | } |
| 1259 | |
Nicolas Geoffray | ed015ac | 2016-12-15 17:58:48 +0000 | [diff] [blame] | 1260 | size_t JitCodeCache::ReserveData(Thread* self, |
| 1261 | size_t stack_map_size, |
| 1262 | size_t number_of_roots, |
| 1263 | ArtMethod* method, |
| 1264 | uint8_t** stack_map_data, |
| 1265 | uint8_t** roots_data) { |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 1266 | size_t table_size = ComputeRootTableSize(number_of_roots); |
David Srbecky | 8cd5454 | 2018-07-15 23:58:44 +0100 | [diff] [blame] | 1267 | size_t size = RoundUp(stack_map_size + table_size, sizeof(void*)); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1268 | uint8_t* result = nullptr; |
| 1269 | |
| 1270 | { |
| 1271 | ScopedThreadSuspension sts(self, kSuspended); |
| 1272 | MutexLock mu(self, lock_); |
| 1273 | WaitForPotentialCollectionToComplete(self); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1274 | result = AllocateData(size); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1275 | } |
| 1276 | |
| 1277 | if (result == nullptr) { |
| 1278 | // Retry. |
| 1279 | GarbageCollectCache(self); |
| 1280 | ScopedThreadSuspension sts(self, kSuspended); |
| 1281 | MutexLock mu(self, lock_); |
| 1282 | WaitForPotentialCollectionToComplete(self); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1283 | result = AllocateData(size); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1284 | } |
| 1285 | |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 1286 | MutexLock mu(self, lock_); |
| 1287 | histogram_stack_map_memory_use_.AddValue(size); |
| 1288 | if (size > kStackMapSizeLogThreshold) { |
| 1289 | LOG(INFO) << "JIT allocated " |
| 1290 | << PrettySize(size) |
| 1291 | << " for stack maps of " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 1292 | << ArtMethod::PrettyMethod(method); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1293 | } |
Nicolas Geoffray | f4b9442 | 2016-12-05 00:10:09 +0000 | [diff] [blame] | 1294 | if (result != nullptr) { |
| 1295 | *roots_data = result; |
| 1296 | *stack_map_data = result + table_size; |
| 1297 | FillRootTableLength(*roots_data, number_of_roots); |
Nicolas Geoffray | ed015ac | 2016-12-15 17:58:48 +0000 | [diff] [blame] | 1298 | return size; |
Nicolas Geoffray | f4b9442 | 2016-12-05 00:10:09 +0000 | [diff] [blame] | 1299 | } else { |
| 1300 | *roots_data = nullptr; |
| 1301 | *stack_map_data = nullptr; |
Nicolas Geoffray | ed015ac | 2016-12-15 17:58:48 +0000 | [diff] [blame] | 1302 | return 0; |
Nicolas Geoffray | f4b9442 | 2016-12-05 00:10:09 +0000 | [diff] [blame] | 1303 | } |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1304 | } |
| 1305 | |
Nicolas Geoffray | ec43a01 | 2018-11-17 13:10:40 +0000 | [diff] [blame] | 1306 | class MarkCodeVisitor final : public StackVisitor { |
| 1307 | public: |
| 1308 | MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in, CodeCacheBitmap* bitmap) |
| 1309 | : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames), |
| 1310 | code_cache_(code_cache_in), |
| 1311 | bitmap_(bitmap) {} |
| 1312 | |
| 1313 | bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) { |
| 1314 | const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); |
| 1315 | if (method_header == nullptr) { |
| 1316 | return true; |
| 1317 | } |
| 1318 | const void* code = method_header->GetCode(); |
| 1319 | if (code_cache_->ContainsPc(code)) { |
| 1320 | // Use the atomic set version, as multiple threads are executing this code. |
| 1321 | bitmap_->AtomicTestAndSet(FromCodeToAllocation(code)); |
| 1322 | } |
| 1323 | return true; |
| 1324 | } |
| 1325 | |
| 1326 | private: |
| 1327 | JitCodeCache* const code_cache_; |
| 1328 | CodeCacheBitmap* const bitmap_; |
| 1329 | }; |
| 1330 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 1331 | class MarkCodeClosure final : public Closure { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1332 | public: |
Nicolas Geoffray | b9f1af5 | 2018-11-16 10:30:29 +0000 | [diff] [blame] | 1333 | MarkCodeClosure(JitCodeCache* code_cache, CodeCacheBitmap* bitmap, Barrier* barrier) |
| 1334 | : code_cache_(code_cache), bitmap_(bitmap), barrier_(barrier) {} |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1335 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 1336 | void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1337 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1338 | DCHECK(thread == Thread::Current() || thread->IsSuspended()); |
Nicolas Geoffray | ec43a01 | 2018-11-17 13:10:40 +0000 | [diff] [blame] | 1339 | MarkCodeVisitor visitor(thread, code_cache_, bitmap_); |
| 1340 | visitor.WalkStack(); |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 1341 | if (kIsDebugBuild) { |
| 1342 | // The stack walking code queries the side instrumentation stack if it |
| 1343 | // sees an instrumentation exit pc, so the JIT code of methods in that stack |
| 1344 | // must have been seen. We sanity check this below. |
| 1345 | for (const instrumentation::InstrumentationStackFrame& frame |
| 1346 | : *thread->GetInstrumentationStack()) { |
| 1347 | // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in |
| 1348 | // its stack frame, it is not the method owning return_pc_. We just pass null to |
| 1349 | // LookupMethodHeader: the method is only checked against in debug builds. |
| 1350 | OatQuickMethodHeader* method_header = |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 1351 | code_cache_->LookupMethodHeader(frame.return_pc_, /* method= */ nullptr); |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 1352 | if (method_header != nullptr) { |
| 1353 | const void* code = method_header->GetCode(); |
Nicolas Geoffray | b9f1af5 | 2018-11-16 10:30:29 +0000 | [diff] [blame] | 1354 | CHECK(bitmap_->Test(FromCodeToAllocation(code))); |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 1355 | } |
| 1356 | } |
| 1357 | } |
Mathieu Chartier | 10d2508 | 2015-10-28 18:36:09 -0700 | [diff] [blame] | 1358 | barrier_->Pass(Thread::Current()); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1359 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1360 | |
| 1361 | private: |
| 1362 | JitCodeCache* const code_cache_; |
Nicolas Geoffray | b9f1af5 | 2018-11-16 10:30:29 +0000 | [diff] [blame] | 1363 | CodeCacheBitmap* const bitmap_; |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1364 | Barrier* const barrier_; |
| 1365 | }; |
| 1366 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1367 | void JitCodeCache::NotifyCollectionDone(Thread* self) { |
| 1368 | collection_in_progress_ = false; |
| 1369 | lock_cond_.Broadcast(self); |
| 1370 | } |
| 1371 | |
| 1372 | void JitCodeCache::SetFootprintLimit(size_t new_footprint) { |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 1373 | size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider; |
| 1374 | DCHECK(IsAlignedParam(data_space_footprint, kPageSize)); |
| 1375 | DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint); |
| 1376 | mspace_set_footprint_limit(data_mspace_, data_space_footprint); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 1377 | if (HasCodeMapping()) { |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 1378 | ScopedCodeCacheWrite scc(this); |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 1379 | mspace_set_footprint_limit(exec_mspace_, new_footprint - data_space_footprint); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1380 | } |
| 1381 | } |
| 1382 | |
| 1383 | bool JitCodeCache::IncreaseCodeCacheCapacity() { |
| 1384 | if (current_capacity_ == max_capacity_) { |
| 1385 | return false; |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1386 | } |
| 1387 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1388 | // Double the capacity if we're below 1MB, or increase it by 1MB if |
| 1389 | // we're above. |
| 1390 | if (current_capacity_ < 1 * MB) { |
| 1391 | current_capacity_ *= 2; |
| 1392 | } else { |
| 1393 | current_capacity_ += 1 * MB; |
| 1394 | } |
| 1395 | if (current_capacity_ > max_capacity_) { |
| 1396 | current_capacity_ = max_capacity_; |
| 1397 | } |
| 1398 | |
Nicolas Geoffray | 646d638 | 2017-08-09 10:50:00 +0100 | [diff] [blame] | 1399 | VLOG(jit) << "Increasing code cache capacity to " << PrettySize(current_capacity_); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1400 | |
| 1401 | SetFootprintLimit(current_capacity_); |
| 1402 | |
| 1403 | return true; |
| 1404 | } |
| 1405 | |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1406 | void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) { |
| 1407 | Barrier barrier(0); |
| 1408 | size_t threads_running_checkpoint = 0; |
Nicolas Geoffray | b9f1af5 | 2018-11-16 10:30:29 +0000 | [diff] [blame] | 1409 | MarkCodeClosure closure(this, GetLiveBitmap(), &barrier); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1410 | threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); |
| 1411 | // Now that we have run our checkpoint, move to a suspended state and wait |
| 1412 | // for other threads to run the checkpoint. |
| 1413 | ScopedThreadSuspension sts(self, kSuspended); |
| 1414 | if (threads_running_checkpoint != 0) { |
| 1415 | barrier.Increment(self, threads_running_checkpoint); |
| 1416 | } |
| 1417 | } |
| 1418 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1419 | bool JitCodeCache::ShouldDoFullCollection() { |
| 1420 | if (current_capacity_ == max_capacity_) { |
| 1421 | // Always do a full collection when the code cache is full. |
| 1422 | return true; |
| 1423 | } else if (current_capacity_ < kReservedCapacity) { |
| 1424 | // Always do partial collection when the code cache size is below the reserved |
| 1425 | // capacity. |
| 1426 | return false; |
| 1427 | } else if (last_collection_increased_code_cache_) { |
| 1428 | // This time do a full collection. |
| 1429 | return true; |
| 1430 | } else { |
| 1431 | // This time do a partial collection. |
| 1432 | return false; |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1433 | } |
| 1434 | } |
| 1435 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1436 | void JitCodeCache::GarbageCollectCache(Thread* self) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1437 | ScopedTrace trace(__FUNCTION__); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1438 | if (!garbage_collect_code_) { |
| 1439 | MutexLock mu(self, lock_); |
| 1440 | IncreaseCodeCacheCapacity(); |
| 1441 | return; |
| 1442 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1443 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 1444 | // Wait for an existing collection, or let everyone know we are starting one. |
| 1445 | { |
| 1446 | ScopedThreadSuspension sts(self, kSuspended); |
| 1447 | MutexLock mu(self, lock_); |
| 1448 | if (WaitForPotentialCollectionToComplete(self)) { |
| 1449 | return; |
| 1450 | } else { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1451 | number_of_collections_++; |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1452 | live_bitmap_.reset(CodeCacheBitmap::Create( |
| 1453 | "code-cache-bitmap", |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 1454 | reinterpret_cast<uintptr_t>(exec_pages_.Begin()), |
| 1455 | reinterpret_cast<uintptr_t>(exec_pages_.Begin() + current_capacity_ / 2))); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1456 | collection_in_progress_ = true; |
| 1457 | } |
| 1458 | } |
| 1459 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1460 | TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit)); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1461 | { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1462 | TimingLogger::ScopedTiming st("Code cache collection", &logger); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1463 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1464 | bool do_full_collection = false; |
| 1465 | { |
| 1466 | MutexLock mu(self, lock_); |
| 1467 | do_full_collection = ShouldDoFullCollection(); |
Nicolas Geoffray | a96917a | 2016-03-01 22:18:02 +0000 | [diff] [blame] | 1468 | } |
| 1469 | |
Nicolas Geoffray | 646d638 | 2017-08-09 10:50:00 +0100 | [diff] [blame] | 1470 | VLOG(jit) << "Do " |
| 1471 | << (do_full_collection ? "full" : "partial") |
| 1472 | << " code cache collection, code=" |
| 1473 | << PrettySize(CodeCacheSize()) |
| 1474 | << ", data=" << PrettySize(DataCacheSize()); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1475 | |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 1476 | DoCollection(self, /* collect_profiling_info= */ do_full_collection); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1477 | |
Nicolas Geoffray | 646d638 | 2017-08-09 10:50:00 +0100 | [diff] [blame] | 1478 | VLOG(jit) << "After code cache collection, code=" |
| 1479 | << PrettySize(CodeCacheSize()) |
| 1480 | << ", data=" << PrettySize(DataCacheSize()); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1481 | |
| 1482 | { |
| 1483 | MutexLock mu(self, lock_); |
| 1484 | |
| 1485 | // Increase the code cache only when we do partial collections. |
| 1486 | // TODO: base this strategy on how full the code cache is? |
| 1487 | if (do_full_collection) { |
| 1488 | last_collection_increased_code_cache_ = false; |
| 1489 | } else { |
| 1490 | last_collection_increased_code_cache_ = true; |
| 1491 | IncreaseCodeCacheCapacity(); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1492 | } |
| 1493 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1494 | bool next_collection_will_be_full = ShouldDoFullCollection(); |
| 1495 | |
| 1496 | // Start polling the liveness of compiled code to prepare for the next full collection. |
Nicolas Geoffray | 480d510 | 2016-04-18 12:09:30 +0100 | [diff] [blame] | 1497 | if (next_collection_will_be_full) { |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1498 | // Save the entry point of methods we have compiled, and update the entry |
| 1499 | // point of those methods to the interpreter. If the method is invoked, the |
| 1500 | // interpreter will update its entry point to the compiled code and call it. |
| 1501 | for (ProfilingInfo* info : profiling_infos_) { |
| 1502 | const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); |
| 1503 | if (ContainsPc(entry_point)) { |
| 1504 | info->SetSavedEntryPoint(entry_point); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1505 | // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring |
Nicolas Geoffray | 3b1a7f4 | 2017-02-22 10:21:00 +0000 | [diff] [blame] | 1506 | // class of the method. We may be concurrently running a GC which makes accessing |
| 1507 | // the class unsafe. We know it is OK to bypass the instrumentation as we've just |
| 1508 | // checked that the current entry point is JIT compiled code. |
| 1509 | info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1510 | } |
| 1511 | } |
| 1512 | |
| 1513 | DCHECK(CheckLiveCompiledCodeHasProfilingInfo()); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1514 | |
| 1515 | // Change entry points of native methods back to the GenericJNI entrypoint. |
| 1516 | for (const auto& entry : jni_stubs_map_) { |
| 1517 | const JniStubData& data = entry.second; |
| 1518 | if (!data.IsCompiled()) { |
| 1519 | continue; |
| 1520 | } |
| 1521 | // Make sure a single invocation of the GenericJNI trampoline tries to recompile. |
| 1522 | uint16_t new_counter = Runtime::Current()->GetJit()->HotMethodThreshold() - 1u; |
| 1523 | const OatQuickMethodHeader* method_header = |
| 1524 | OatQuickMethodHeader::FromCodePointer(data.GetCode()); |
| 1525 | for (ArtMethod* method : data.GetMethods()) { |
| 1526 | if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) { |
| 1527 | // Don't call Instrumentation::UpdateMethodsCode(), same as for normal methods above. |
| 1528 | method->SetCounter(new_counter); |
| 1529 | method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub()); |
| 1530 | } |
| 1531 | } |
| 1532 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1533 | } |
| 1534 | live_bitmap_.reset(nullptr); |
| 1535 | NotifyCollectionDone(self); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1536 | } |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1537 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1538 | Runtime::Current()->GetJit()->AddTimingLogger(logger); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1539 | } |
| 1540 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1541 | void JitCodeCache::RemoveUnmarkedCode(Thread* self) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1542 | ScopedTrace trace(__FUNCTION__); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1543 | std::unordered_set<OatQuickMethodHeader*> method_headers; |
| 1544 | { |
| 1545 | MutexLock mu(self, lock_); |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 1546 | ScopedCodeCacheWrite scc(this); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1547 | // Iterate over all compiled code and remove entries that are not marked. |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1548 | for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) { |
| 1549 | JniStubData* data = &it->second; |
| 1550 | if (!data->IsCompiled() || GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) { |
| 1551 | ++it; |
| 1552 | } else { |
| 1553 | method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode())); |
| 1554 | it = jni_stubs_map_.erase(it); |
| 1555 | } |
| 1556 | } |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1557 | for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { |
| 1558 | const void* code_ptr = it->first; |
| 1559 | uintptr_t allocation = FromCodeToAllocation(code_ptr); |
| 1560 | if (GetLiveBitmap()->Test(allocation)) { |
| 1561 | ++it; |
| 1562 | } else { |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 1563 | OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1564 | method_headers.insert(header); |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1565 | it = method_code_map_.erase(it); |
| 1566 | } |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1567 | } |
| 1568 | } |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 1569 | FreeAllMethodHeaders(method_headers); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1570 | } |
| 1571 | |
| 1572 | void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1573 | ScopedTrace trace(__FUNCTION__); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1574 | { |
| 1575 | MutexLock mu(self, lock_); |
| 1576 | if (collect_profiling_info) { |
| 1577 | // Clear the profiling info of methods that do not have compiled code as entrypoint. |
| 1578 | // Also remove the saved entry point from the ProfilingInfo objects. |
| 1579 | for (ProfilingInfo* info : profiling_infos_) { |
| 1580 | const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1581 | if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1582 | info->GetMethod()->SetProfilingInfo(nullptr); |
| 1583 | } |
Nicolas Geoffray | b9a639d | 2016-03-22 11:25:20 +0000 | [diff] [blame] | 1584 | |
| 1585 | if (info->GetSavedEntryPoint() != nullptr) { |
| 1586 | info->SetSavedEntryPoint(nullptr); |
| 1587 | // We are going to move this method back to interpreter. Clear the counter now to |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 1588 | // give it a chance to be hot again. |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 1589 | ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true); |
Nicolas Geoffray | b9a639d | 2016-03-22 11:25:20 +0000 | [diff] [blame] | 1590 | } |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1591 | } |
| 1592 | } else if (kIsDebugBuild) { |
| 1593 | // Sanity check that the profiling infos do not have a dangling entry point. |
| 1594 | for (ProfilingInfo* info : profiling_infos_) { |
| 1595 | DCHECK(info->GetSavedEntryPoint() == nullptr); |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1596 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1597 | } |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1598 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1599 | // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not |
| 1600 | // an entry point is either: |
| 1601 | // - an osr compiled code, that will be removed if not in a thread call stack. |
| 1602 | // - discarded compiled code, that will be removed if not in a thread call stack. |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1603 | for (const auto& entry : jni_stubs_map_) { |
| 1604 | const JniStubData& data = entry.second; |
| 1605 | const void* code_ptr = data.GetCode(); |
| 1606 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1607 | for (ArtMethod* method : data.GetMethods()) { |
| 1608 | if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { |
| 1609 | GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); |
| 1610 | break; |
| 1611 | } |
| 1612 | } |
| 1613 | } |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1614 | for (const auto& it : method_code_map_) { |
| 1615 | ArtMethod* method = it.second; |
| 1616 | const void* code_ptr = it.first; |
| 1617 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1618 | if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { |
| 1619 | GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); |
| 1620 | } |
| 1621 | } |
| 1622 | |
Nicolas Geoffray | d9994f0 | 2016-02-11 17:35:55 +0000 | [diff] [blame] | 1623 | // Empty osr method map, as osr compiled code will be deleted (except the ones |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1624 | // on thread stacks). |
| 1625 | osr_code_map_.clear(); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1626 | } |
| 1627 | |
| 1628 | // Run a checkpoint on all threads to mark the JIT compiled code they are running. |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 1629 | MarkCompiledCodeOnThreadStacks(self); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1630 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 1631 | // At this point, mutator threads are still running, and entrypoints of methods can |
| 1632 | // change. We do know they cannot change to a code cache entry that is not marked, |
| 1633 | // therefore we can safely remove those entries. |
| 1634 | RemoveUnmarkedCode(self); |
Nicolas Geoffray | a96917a | 2016-03-01 22:18:02 +0000 | [diff] [blame] | 1635 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1636 | if (collect_profiling_info) { |
| 1637 | MutexLock mu(self, lock_); |
| 1638 | // Free all profiling infos of methods not compiled nor being compiled. |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1639 | auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(), |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1640 | [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1641 | const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); |
Nicolas Geoffray | 511e41b | 2016-03-02 17:09:35 +0000 | [diff] [blame] | 1642 | // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope |
| 1643 | // that the compiled code would not get revived. As mutator threads run concurrently, |
| 1644 | // they may have revived the compiled code, and now we are in the situation where |
| 1645 | // a method has compiled code but no ProfilingInfo. |
| 1646 | // We make sure compiled methods have a ProfilingInfo object. It is needed for |
| 1647 | // code cache collection. |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1648 | if (ContainsPc(ptr) && |
| 1649 | info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) == nullptr) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1650 | info->GetMethod()->SetProfilingInfo(info); |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1651 | } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1652 | // No need for this ProfilingInfo object anymore. |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1653 | FreeData(reinterpret_cast<uint8_t*>(info)); |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1654 | return true; |
| 1655 | } |
| 1656 | return false; |
| 1657 | }); |
| 1658 | profiling_infos_.erase(profiling_kept_end, profiling_infos_.end()); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1659 | DCHECK(CheckLiveCompiledCodeHasProfilingInfo()); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1660 | } |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1661 | } |
| 1662 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1663 | bool JitCodeCache::CheckLiveCompiledCodeHasProfilingInfo() { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1664 | ScopedTrace trace(__FUNCTION__); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1665 | // Check that methods we have compiled do have a ProfilingInfo object. We would |
| 1666 | // have memory leaks of compiled code otherwise. |
| 1667 | for (const auto& it : method_code_map_) { |
| 1668 | ArtMethod* method = it.second; |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1669 | if (method->GetProfilingInfo(kRuntimePointerSize) == nullptr) { |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 1670 | const void* code_ptr = it.first; |
| 1671 | const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1672 | if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { |
| 1673 | // If the code is not dead, then we have a problem. Note that this can even |
| 1674 | // happen just after a collection, as mutator threads are running in parallel |
| 1675 | // and could deoptimize an existing compiled code. |
| 1676 | return false; |
| 1677 | } |
| 1678 | } |
| 1679 | } |
| 1680 | return true; |
| 1681 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1682 | |
| 1683 | OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) { |
Vladimir Marko | 33bff25 | 2017-11-01 14:35:42 +0000 | [diff] [blame] | 1684 | static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA"); |
| 1685 | if (kRuntimeISA == InstructionSet::kArm) { |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1686 | // On Thumb-2, the pc is offset by one. |
| 1687 | --pc; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1688 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1689 | if (!ContainsPc(reinterpret_cast<const void*>(pc))) { |
| 1690 | return nullptr; |
| 1691 | } |
| 1692 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1693 | if (!kIsDebugBuild) { |
| 1694 | // Called with null `method` only from MarkCodeClosure::Run() in debug build. |
| 1695 | CHECK(method != nullptr); |
Vladimir Marko | 47d3185 | 2017-11-28 18:36:12 +0000 | [diff] [blame] | 1696 | } |
Vladimir Marko | e744163 | 2017-11-29 13:00:56 +0000 | [diff] [blame] | 1697 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1698 | MutexLock mu(Thread::Current(), lock_); |
| 1699 | OatQuickMethodHeader* method_header = nullptr; |
| 1700 | ArtMethod* found_method = nullptr; // Only for DCHECK(), not for JNI stubs. |
| 1701 | if (method != nullptr && UNLIKELY(method->IsNative())) { |
| 1702 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 1703 | if (it == jni_stubs_map_.end() || !ContainsElement(it->second.GetMethods(), method)) { |
| 1704 | return nullptr; |
| 1705 | } |
| 1706 | const void* code_ptr = it->second.GetCode(); |
| 1707 | method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1708 | if (!method_header->Contains(pc)) { |
| 1709 | return nullptr; |
| 1710 | } |
| 1711 | } else { |
| 1712 | auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc)); |
| 1713 | if (it != method_code_map_.begin()) { |
| 1714 | --it; |
| 1715 | const void* code_ptr = it->first; |
| 1716 | if (OatQuickMethodHeader::FromCodePointer(code_ptr)->Contains(pc)) { |
| 1717 | method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); |
| 1718 | found_method = it->second; |
| 1719 | } |
| 1720 | } |
| 1721 | if (method_header == nullptr && method == nullptr) { |
| 1722 | // Scan all compiled JNI stubs as well. This slow search is used only |
| 1723 | // for checks in debug build, for release builds the `method` is not null. |
| 1724 | for (auto&& entry : jni_stubs_map_) { |
| 1725 | const JniStubData& data = entry.second; |
| 1726 | if (data.IsCompiled() && |
| 1727 | OatQuickMethodHeader::FromCodePointer(data.GetCode())->Contains(pc)) { |
| 1728 | method_header = OatQuickMethodHeader::FromCodePointer(data.GetCode()); |
| 1729 | } |
| 1730 | } |
| 1731 | } |
| 1732 | if (method_header == nullptr) { |
| 1733 | return nullptr; |
| 1734 | } |
Nicolas Geoffray | 056d775 | 2017-11-30 09:12:13 +0000 | [diff] [blame] | 1735 | } |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1736 | |
| 1737 | if (kIsDebugBuild && method != nullptr && !method->IsNative()) { |
Alex Light | 1ebe4fe | 2017-01-30 14:57:11 -0800 | [diff] [blame] | 1738 | // When we are walking the stack to redefine classes and creating obsolete methods it is |
| 1739 | // possible that we might have updated the method_code_map by making this method obsolete in a |
| 1740 | // previous frame. Therefore we should just check that the non-obsolete version of this method |
| 1741 | // is the one we expect. We change to the non-obsolete versions in the error message since the |
| 1742 | // obsolete version of the method might not be fully initialized yet. This situation can only |
| 1743 | // occur when we are in the process of allocating and setting up obsolete methods. Otherwise |
Andreas Gampe | 06c42a5 | 2017-07-26 14:17:14 -0700 | [diff] [blame] | 1744 | // method and it->second should be identical. (See openjdkjvmti/ti_redefine.cc for more |
Alex Light | 1ebe4fe | 2017-01-30 14:57:11 -0800 | [diff] [blame] | 1745 | // information.) |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1746 | DCHECK_EQ(found_method->GetNonObsoleteMethod(), method->GetNonObsoleteMethod()) |
Alex Light | 1ebe4fe | 2017-01-30 14:57:11 -0800 | [diff] [blame] | 1747 | << ArtMethod::PrettyMethod(method->GetNonObsoleteMethod()) << " " |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1748 | << ArtMethod::PrettyMethod(found_method->GetNonObsoleteMethod()) << " " |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 1749 | << std::hex << pc; |
Nicolas Geoffray | 5a23d2e | 2015-11-03 18:58:57 +0000 | [diff] [blame] | 1750 | } |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 1751 | return method_header; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1752 | } |
| 1753 | |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1754 | OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) { |
| 1755 | MutexLock mu(Thread::Current(), lock_); |
| 1756 | auto it = osr_code_map_.find(method); |
| 1757 | if (it == osr_code_map_.end()) { |
| 1758 | return nullptr; |
| 1759 | } |
| 1760 | return OatQuickMethodHeader::FromCodePointer(it->second); |
| 1761 | } |
| 1762 | |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1763 | ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self, |
| 1764 | ArtMethod* method, |
| 1765 | const std::vector<uint32_t>& entries, |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1766 | bool retry_allocation) |
| 1767 | // No thread safety analysis as we are using TryLock/Unlock explicitly. |
| 1768 | NO_THREAD_SAFETY_ANALYSIS { |
| 1769 | ProfilingInfo* info = nullptr; |
| 1770 | if (!retry_allocation) { |
| 1771 | // If we are allocating for the interpreter, just try to lock, to avoid |
| 1772 | // lock contention with the JIT. |
| 1773 | if (lock_.ExclusiveTryLock(self)) { |
| 1774 | info = AddProfilingInfoInternal(self, method, entries); |
| 1775 | lock_.ExclusiveUnlock(self); |
| 1776 | } |
| 1777 | } else { |
| 1778 | { |
| 1779 | MutexLock mu(self, lock_); |
| 1780 | info = AddProfilingInfoInternal(self, method, entries); |
| 1781 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1782 | |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1783 | if (info == nullptr) { |
| 1784 | GarbageCollectCache(self); |
| 1785 | MutexLock mu(self, lock_); |
| 1786 | info = AddProfilingInfoInternal(self, method, entries); |
| 1787 | } |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1788 | } |
| 1789 | return info; |
| 1790 | } |
| 1791 | |
Nicolas Geoffray | 1e7da9b | 2016-03-01 14:11:40 +0000 | [diff] [blame] | 1792 | ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED, |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1793 | ArtMethod* method, |
| 1794 | const std::vector<uint32_t>& entries) { |
| 1795 | size_t profile_info_size = RoundUp( |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1796 | sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(), |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1797 | sizeof(void*)); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1798 | |
| 1799 | // Check whether some other thread has concurrently created it. |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1800 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1801 | if (info != nullptr) { |
| 1802 | return info; |
| 1803 | } |
| 1804 | |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 1805 | uint8_t* data = AllocateData(profile_info_size); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1806 | if (data == nullptr) { |
| 1807 | return nullptr; |
| 1808 | } |
| 1809 | info = new (data) ProfilingInfo(method, entries); |
Nicolas Geoffray | 07f3564 | 2016-01-04 16:06:51 +0000 | [diff] [blame] | 1810 | |
| 1811 | // Make sure other threads see the data in the profiling info object before the |
| 1812 | // store in the ArtMethod's ProfilingInfo pointer. |
Orion Hodson | 27b9676 | 2018-03-13 16:06:57 +0000 | [diff] [blame] | 1813 | std::atomic_thread_fence(std::memory_order_release); |
Nicolas Geoffray | 07f3564 | 2016-01-04 16:06:51 +0000 | [diff] [blame] | 1814 | |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1815 | method->SetProfilingInfo(info); |
| 1816 | profiling_infos_.push_back(info); |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 1817 | histogram_profiling_info_memory_use_.AddValue(profile_info_size); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 1818 | return info; |
| 1819 | } |
| 1820 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1821 | // NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock |
| 1822 | // is already held. |
| 1823 | void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS { |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 1824 | if (mspace == exec_mspace_) { |
| 1825 | DCHECK(exec_mspace_ != nullptr); |
| 1826 | const MemMap* const code_pages = GetUpdatableCodeMapping(); |
| 1827 | void* result = code_pages->Begin() + exec_end_; |
| 1828 | exec_end_ += increment; |
| 1829 | return result; |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1830 | } else { |
| 1831 | DCHECK_EQ(data_mspace_, mspace); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 1832 | void* result = data_pages_.Begin() + data_end_; |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1833 | data_end_ += increment; |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 1834 | return result; |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 1835 | } |
| 1836 | } |
| 1837 | |
Calin Juravle | 9962962 | 2016-04-19 16:33:46 +0100 | [diff] [blame] | 1838 | void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations, |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1839 | std::vector<ProfileMethodInfo>& methods) { |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 1840 | ScopedTrace trace(__FUNCTION__); |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 1841 | MutexLock mu(Thread::Current(), lock_); |
Calin Juravle | a39fd98 | 2017-05-18 10:15:52 -0700 | [diff] [blame] | 1842 | uint16_t jit_compile_threshold = Runtime::Current()->GetJITOptions()->GetCompileThreshold(); |
Calin Juravle | 9962962 | 2016-04-19 16:33:46 +0100 | [diff] [blame] | 1843 | for (const ProfilingInfo* info : profiling_infos_) { |
| 1844 | ArtMethod* method = info->GetMethod(); |
| 1845 | const DexFile* dex_file = method->GetDexFile(); |
Mathieu Chartier | 79c87da | 2017-10-10 11:54:29 -0700 | [diff] [blame] | 1846 | const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation()); |
| 1847 | if (!ContainsElement(dex_base_locations, base_location)) { |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1848 | // Skip dex files which are not profiled. |
| 1849 | continue; |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 1850 | } |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1851 | std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches; |
Calin Juravle | a39fd98 | 2017-05-18 10:15:52 -0700 | [diff] [blame] | 1852 | |
| 1853 | // If the method didn't reach the compilation threshold don't save the inline caches. |
| 1854 | // They might be incomplete and cause unnecessary deoptimizations. |
| 1855 | // If the inline cache is empty the compiler will generate a regular invoke virtual/interface. |
| 1856 | if (method->GetCounter() < jit_compile_threshold) { |
| 1857 | methods.emplace_back(/*ProfileMethodInfo*/ |
Mathieu Chartier | bbe3a5e | 2017-06-13 16:36:17 -0700 | [diff] [blame] | 1858 | MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches); |
Calin Juravle | a39fd98 | 2017-05-18 10:15:52 -0700 | [diff] [blame] | 1859 | continue; |
| 1860 | } |
| 1861 | |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1862 | for (size_t i = 0; i < info->number_of_inline_caches_; ++i) { |
Mathieu Chartier | dbddc22 | 2017-05-24 12:04:13 -0700 | [diff] [blame] | 1863 | std::vector<TypeReference> profile_classes; |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1864 | const InlineCache& cache = info->cache_[i]; |
Calin Juravle | 13439f0 | 2017-02-21 01:17:21 -0800 | [diff] [blame] | 1865 | ArtMethod* caller = info->GetMethod(); |
Calin Juravle | 589e71e | 2017-03-03 16:05:05 -0800 | [diff] [blame] | 1866 | bool is_missing_types = false; |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1867 | for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) { |
| 1868 | mirror::Class* cls = cache.classes_[k].Read(); |
| 1869 | if (cls == nullptr) { |
| 1870 | break; |
| 1871 | } |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1872 | |
Calin Juravle | 13439f0 | 2017-02-21 01:17:21 -0800 | [diff] [blame] | 1873 | // Check if the receiver is in the boot class path or if it's in the |
| 1874 | // same class loader as the caller. If not, skip it, as there is not |
| 1875 | // much we can do during AOT. |
| 1876 | if (!cls->IsBootStrapClassLoaded() && |
| 1877 | caller->GetClassLoader() != cls->GetClassLoader()) { |
| 1878 | is_missing_types = true; |
| 1879 | continue; |
| 1880 | } |
| 1881 | |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1882 | const DexFile* class_dex_file = nullptr; |
| 1883 | dex::TypeIndex type_index; |
| 1884 | |
| 1885 | if (cls->GetDexCache() == nullptr) { |
| 1886 | DCHECK(cls->IsArrayClass()) << cls->PrettyClass(); |
Calin Juravle | e21806f | 2017-02-22 11:49:43 -0800 | [diff] [blame] | 1887 | // Make a best effort to find the type index in the method's dex file. |
| 1888 | // We could search all open dex files but that might turn expensive |
| 1889 | // and probably not worth it. |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1890 | class_dex_file = dex_file; |
| 1891 | type_index = cls->FindTypeIndexInOtherDexFile(*dex_file); |
| 1892 | } else { |
| 1893 | class_dex_file = &(cls->GetDexFile()); |
| 1894 | type_index = cls->GetDexTypeIndex(); |
| 1895 | } |
| 1896 | if (!type_index.IsValid()) { |
| 1897 | // Could be a proxy class or an array for which we couldn't find the type index. |
Calin Juravle | 589e71e | 2017-03-03 16:05:05 -0800 | [diff] [blame] | 1898 | is_missing_types = true; |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1899 | continue; |
| 1900 | } |
Mathieu Chartier | 79c87da | 2017-10-10 11:54:29 -0700 | [diff] [blame] | 1901 | if (ContainsElement(dex_base_locations, |
| 1902 | DexFileLoader::GetBaseLocation(class_dex_file->GetLocation()))) { |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1903 | // Only consider classes from the same apk (including multidex). |
| 1904 | profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/ |
Calin Juravle | 4ca70a3 | 2017-02-21 16:22:24 -0800 | [diff] [blame] | 1905 | class_dex_file, type_index); |
Calin Juravle | 589e71e | 2017-03-03 16:05:05 -0800 | [diff] [blame] | 1906 | } else { |
| 1907 | is_missing_types = true; |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1908 | } |
| 1909 | } |
| 1910 | if (!profile_classes.empty()) { |
| 1911 | inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/ |
Calin Juravle | 589e71e | 2017-03-03 16:05:05 -0800 | [diff] [blame] | 1912 | cache.dex_pc_, is_missing_types, profile_classes); |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 1913 | } |
| 1914 | } |
| 1915 | methods.emplace_back(/*ProfileMethodInfo*/ |
Mathieu Chartier | bbe3a5e | 2017-06-13 16:36:17 -0700 | [diff] [blame] | 1916 | MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches); |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 1917 | } |
| 1918 | } |
| 1919 | |
Nicolas Geoffray | 71cd50f | 2016-04-14 15:00:33 +0100 | [diff] [blame] | 1920 | bool JitCodeCache::IsOsrCompiled(ArtMethod* method) { |
| 1921 | MutexLock mu(Thread::Current(), lock_); |
| 1922 | return osr_code_map_.find(method) != osr_code_map_.end(); |
| 1923 | } |
| 1924 | |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1925 | bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr) { |
| 1926 | if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1927 | return false; |
| 1928 | } |
Nicolas Geoffray | a42363f | 2015-12-17 14:57:09 +0000 | [diff] [blame] | 1929 | |
Nicolas Geoffray | a42363f | 2015-12-17 14:57:09 +0000 | [diff] [blame] | 1930 | MutexLock mu(self, lock_); |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 1931 | if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) { |
| 1932 | return false; |
| 1933 | } |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 1934 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1935 | if (UNLIKELY(method->IsNative())) { |
| 1936 | JniStubKey key(method); |
| 1937 | auto it = jni_stubs_map_.find(key); |
| 1938 | bool new_compilation = false; |
| 1939 | if (it == jni_stubs_map_.end()) { |
| 1940 | // Create a new entry to mark the stub as being compiled. |
| 1941 | it = jni_stubs_map_.Put(key, JniStubData{}); |
| 1942 | new_compilation = true; |
| 1943 | } |
| 1944 | JniStubData* data = &it->second; |
| 1945 | data->AddMethod(method); |
| 1946 | if (data->IsCompiled()) { |
| 1947 | OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(data->GetCode()); |
| 1948 | const void* entrypoint = method_header->GetEntryPoint(); |
| 1949 | // Update also entrypoints of other methods held by the JniStubData. |
| 1950 | // We could simply update the entrypoint of `method` but if the last JIT GC has |
| 1951 | // changed these entrypoints to GenericJNI in preparation for a full GC, we may |
| 1952 | // as well change them back as this stub shall not be collected anyway and this |
| 1953 | // can avoid a few expensive GenericJNI calls. |
| 1954 | instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); |
| 1955 | for (ArtMethod* m : data->GetMethods()) { |
Nicolas Geoffray | a6e0e7d | 2018-01-26 13:16:50 +0000 | [diff] [blame] | 1956 | // Call the dedicated method instead of the more generic UpdateMethodsCode, because |
| 1957 | // `m` might be in the process of being deleted. |
| 1958 | instrumentation->UpdateNativeMethodsCodeToJitCode(m, entrypoint); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1959 | } |
| 1960 | if (collection_in_progress_) { |
| 1961 | GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode())); |
| 1962 | } |
| 1963 | } |
| 1964 | return new_compilation; |
| 1965 | } else { |
| 1966 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
| 1967 | if (info == nullptr) { |
| 1968 | VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled"; |
| 1969 | // Because the counter is not atomic, there are some rare cases where we may not hit the |
| 1970 | // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this. |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 1971 | ClearMethodCounter(method, /*was_warm=*/ false); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1972 | return false; |
| 1973 | } |
Nicolas Geoffray | 056d775 | 2017-11-30 09:12:13 +0000 | [diff] [blame] | 1974 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1975 | if (info->IsMethodBeingCompiled(osr)) { |
| 1976 | return false; |
| 1977 | } |
Nicolas Geoffray | 056d775 | 2017-11-30 09:12:13 +0000 | [diff] [blame] | 1978 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 1979 | info->SetIsMethodBeingCompiled(true, osr); |
| 1980 | return true; |
| 1981 | } |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 1982 | } |
| 1983 | |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 1984 | ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) { |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1985 | MutexLock mu(self, lock_); |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1986 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1987 | if (info != nullptr) { |
Nicolas Geoffray | f6d4668 | 2017-02-28 17:41:45 +0000 | [diff] [blame] | 1988 | if (!info->IncrementInlineUse()) { |
| 1989 | // Overflow of inlining uses, just bail. |
| 1990 | return nullptr; |
| 1991 | } |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1992 | } |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 1993 | return info; |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1994 | } |
| 1995 | |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 1996 | void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) { |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 1997 | MutexLock mu(self, lock_); |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 1998 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 1999 | DCHECK(info != nullptr); |
| 2000 | info->DecrementInlineUse(); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 2001 | } |
| 2002 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 2003 | void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr) { |
| 2004 | DCHECK_EQ(Thread::Current(), self); |
| 2005 | MutexLock mu(self, lock_); |
| 2006 | if (UNLIKELY(method->IsNative())) { |
| 2007 | auto it = jni_stubs_map_.find(JniStubKey(method)); |
| 2008 | DCHECK(it != jni_stubs_map_.end()); |
| 2009 | JniStubData* data = &it->second; |
| 2010 | DCHECK(ContainsElement(data->GetMethods(), method)); |
| 2011 | if (UNLIKELY(!data->IsCompiled())) { |
| 2012 | // Failed to compile; the JNI compiler never fails, but the cache may be full. |
| 2013 | jni_stubs_map_.erase(it); // Remove the entry added in NotifyCompilationOf(). |
| 2014 | } // else CommitCodeInternal() updated entrypoints of all methods in the JniStubData. |
| 2015 | } else { |
| 2016 | ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); |
| 2017 | DCHECK(info->IsMethodBeingCompiled(osr)); |
| 2018 | info->SetIsMethodBeingCompiled(false, osr); |
| 2019 | } |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 2020 | } |
| 2021 | |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 2022 | void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method, |
| 2023 | const OatQuickMethodHeader* header) { |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 2024 | DCHECK(!method->IsNative()); |
Andreas Gampe | 542451c | 2016-07-26 09:02:02 -0700 | [diff] [blame] | 2025 | ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize); |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 2026 | const void* method_entrypoint = method->GetEntryPointFromQuickCompiledCode(); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 2027 | if ((profiling_info != nullptr) && |
| 2028 | (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) { |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 2029 | // When instrumentation is set, the actual entrypoint is the one in the profiling info. |
| 2030 | method_entrypoint = profiling_info->GetSavedEntryPoint(); |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 2031 | // Prevent future uses of the compiled code. |
| 2032 | profiling_info->SetSavedEntryPoint(nullptr); |
| 2033 | } |
| 2034 | |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 2035 | // Clear the method counter if we are running jitted code since we might want to jit this again in |
| 2036 | // the future. |
| 2037 | if (method_entrypoint == header->GetEntryPoint()) { |
Jeff Hao | 00286db | 2017-05-30 16:53:07 -0700 | [diff] [blame] | 2038 | // The entrypoint is the one to invalidate, so we just update it to the interpreter entry point |
Mathieu Chartier | f044c22 | 2017-05-31 15:27:54 -0700 | [diff] [blame] | 2039 | // and clear the counter to get the method Jitted again. |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 2040 | Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( |
| 2041 | method, GetQuickToInterpreterBridge()); |
Andreas Gampe | 98ea9d9 | 2018-10-19 14:06:15 -0700 | [diff] [blame] | 2042 | ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr); |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 2043 | } else { |
| 2044 | MutexLock mu(Thread::Current(), lock_); |
| 2045 | auto it = osr_code_map_.find(method); |
| 2046 | if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) { |
| 2047 | // Remove the OSR method, to avoid using it again. |
| 2048 | osr_code_map_.erase(it); |
| 2049 | } |
| 2050 | } |
| 2051 | } |
| 2052 | |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 2053 | uint8_t* JitCodeCache::AllocateCode(size_t code_size) { |
| 2054 | size_t alignment = GetInstructionSetAlignment(kRuntimeISA); |
| 2055 | uint8_t* result = reinterpret_cast<uint8_t*>( |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 2056 | mspace_memalign(exec_mspace_, alignment, code_size)); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 2057 | size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment); |
| 2058 | // Ensure the header ends up at expected instruction alignment. |
| 2059 | DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment); |
| 2060 | used_memory_for_code_ += mspace_usable_size(result); |
| 2061 | return result; |
| 2062 | } |
| 2063 | |
Orion Hodson | dbd05fe | 2017-08-10 11:41:35 +0100 | [diff] [blame] | 2064 | void JitCodeCache::FreeCode(uint8_t* code) { |
| 2065 | used_memory_for_code_ -= mspace_usable_size(code); |
Orion Hodson | 1d3fd08 | 2018-09-28 09:38:35 +0100 | [diff] [blame] | 2066 | mspace_free(exec_mspace_, code); |
Nicolas Geoffray | 38ea9bd | 2016-02-19 16:25:57 +0000 | [diff] [blame] | 2067 | } |
| 2068 | |
| 2069 | uint8_t* JitCodeCache::AllocateData(size_t data_size) { |
| 2070 | void* result = mspace_malloc(data_mspace_, data_size); |
| 2071 | used_memory_for_data_ += mspace_usable_size(result); |
| 2072 | return reinterpret_cast<uint8_t*>(result); |
| 2073 | } |
| 2074 | |
| 2075 | void JitCodeCache::FreeData(uint8_t* data) { |
| 2076 | used_memory_for_data_ -= mspace_usable_size(data); |
| 2077 | mspace_free(data_mspace_, data); |
| 2078 | } |
| 2079 | |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 2080 | void JitCodeCache::Dump(std::ostream& os) { |
| 2081 | MutexLock mu(Thread::Current(), lock_); |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 2082 | MutexLock mu2(Thread::Current(), *Locks::native_debug_interface_lock_); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 2083 | os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n" |
| 2084 | << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n" |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 2085 | << "Current JIT mini-debug-info size: " << PrettySize(GetJitNativeDebugInfoMemUsage()) << "\n" |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 2086 | << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n" |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 2087 | << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n" |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 2088 | << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n" |
| 2089 | << "Total number of JIT compilations: " << number_of_compilations_ << "\n" |
| 2090 | << "Total number of JIT compilations for on stack replacement: " |
| 2091 | << number_of_osr_compilations_ << "\n" |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 2092 | << "Total number of JIT code cache collections: " << number_of_collections_ << std::endl; |
Nicolas Geoffray | 933330a | 2016-03-16 14:20:06 +0000 | [diff] [blame] | 2093 | histogram_stack_map_memory_use_.PrintMemoryUse(os); |
| 2094 | histogram_code_memory_use_.PrintMemoryUse(os); |
| 2095 | histogram_profiling_info_memory_use_.PrintMemoryUse(os); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 2096 | } |
| 2097 | |
Nicolas Geoffray | 7a2c7c2 | 2018-11-20 10:03:13 +0000 | [diff] [blame^] | 2098 | void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) { |
| 2099 | MutexLock mu(Thread::Current(), lock_); |
| 2100 | // Currently, we don't expect any compilations from zygote. |
| 2101 | CHECK_EQ(number_of_compilations_, 0u); |
| 2102 | CHECK_EQ(number_of_osr_compilations_, 0u); |
| 2103 | CHECK(jni_stubs_map_.empty()); |
| 2104 | CHECK(method_code_map_.empty()); |
| 2105 | CHECK(osr_code_map_.empty()); |
| 2106 | |
| 2107 | zygote_data_pages_ = std::move(data_pages_); |
| 2108 | zygote_exec_pages_ = std::move(exec_pages_); |
| 2109 | zygote_data_mspace_ = data_mspace_; |
| 2110 | zygote_exec_mspace_ = exec_mspace_; |
| 2111 | |
| 2112 | size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity(); |
| 2113 | size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity(); |
| 2114 | |
| 2115 | InitializeState(initial_capacity, max_capacity); |
| 2116 | |
| 2117 | std::string error_msg; |
| 2118 | if (!InitializeMappings(/* rwx_memory_allowed= */ !is_system_server, is_zygote, &error_msg)) { |
| 2119 | LOG(WARNING) << "Could not reset JIT state after zygote fork: " << error_msg; |
| 2120 | return; |
| 2121 | } |
| 2122 | |
| 2123 | InitializeSpaces(); |
| 2124 | } |
| 2125 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 2126 | } // namespace jit |
| 2127 | } // namespace art |