blob: 63cb6a4593523395c3b50ecfacf787615e493ce3 [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit_code_cache.h"
18
19#include <sstream>
20
Orion Hodson1d3fd082018-09-28 09:38:35 +010021#include "android-base/unique_fd.h"
22
Andreas Gampe5629d2d2017-05-15 16:28:13 -070023#include "arch/context.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070024#include "art_method-inl.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070025#include "base/enums.h"
Andreas Gampef0f3c592018-06-26 13:28:00 -070026#include "base/histogram-inl.h"
Andreas Gampe170331f2017-12-07 18:41:03 -080027#include "base/logging.h" // For VLOG.
Orion Hodson563ada22018-09-04 11:28:31 +010028#include "base/membarrier.h"
Orion Hodson1d3fd082018-09-28 09:38:35 +010029#include "base/memfd.h"
David Sehr79e26072018-04-06 17:58:50 -070030#include "base/mem_map.h"
David Sehrc431b9d2018-03-02 12:01:51 -080031#include "base/quasi_atomic.h"
Calin Juravle66f55232015-12-08 15:09:10 +000032#include "base/stl_util.h"
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080033#include "base/systrace.h"
Calin Juravle31f2c152015-10-23 17:56:15 +010034#include "base/time_utils.h"
Orion Hodsonf2331362018-07-11 15:14:10 +010035#include "base/utils.h"
Mingyao Yang063fc772016-08-02 11:02:54 -070036#include "cha.h"
David Srbecky5cc349f2015-12-18 15:04:48 +000037#include "debugger_interface.h"
David Sehr9e734c72018-01-04 17:56:19 -080038#include "dex/dex_file_loader.h"
Andreas Gampef0f3c592018-06-26 13:28:00 -070039#include "dex/method_reference.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010040#include "entrypoints/runtime_asm_entrypoints.h"
41#include "gc/accounting/bitmap-inl.h"
Nicolas Geoffraycf48fa02016-07-30 22:49:11 +010042#include "gc/scoped_gc_critical_section.h"
Vladimir Markob0b68cf2017-11-14 18:11:50 +000043#include "handle.h"
Andreas Gampef0f3c592018-06-26 13:28:00 -070044#include "instrumentation.h"
Andreas Gampeb2d18fa2017-06-06 20:46:10 -070045#include "intern_table.h"
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +000046#include "jit/jit.h"
Nicolas Geoffray26705e22015-10-28 12:50:11 +000047#include "jit/profiling_info.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010048#include "linear_alloc.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080049#include "oat_file-inl.h"
Andreas Gampe513061a2017-06-01 09:17:34 -070050#include "oat_quick_method_header.h"
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070051#include "object_callbacks.h"
David Sehr82d046e2018-04-23 08:14:19 -070052#include "profile/profile_compilation_info.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070053#include "scoped_thread_state_change-inl.h"
Andreas Gampe513061a2017-06-01 09:17:34 -070054#include "stack.h"
Vladimir Markob0b68cf2017-11-14 18:11:50 +000055#include "thread-current-inl.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010056#include "thread_list.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080057
Orion Hodson1d3fd082018-09-28 09:38:35 +010058using android::base::unique_fd;
59
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080060namespace art {
61namespace jit {
62
Nicolas Geoffray933330a2016-03-16 14:20:06 +000063static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
64static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
65
Orion Hodson1d3fd082018-09-28 09:38:35 +010066static constexpr int kProtR = PROT_READ;
67static constexpr int kProtRW = PROT_READ | PROT_WRITE;
68static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC;
69static constexpr int kProtRX = PROT_READ | PROT_EXEC;
70
71namespace {
72
73// Translate an address belonging to one memory map into an address in a second. This is useful
74// when there are two virtual memory ranges for the same physical memory range.
75template <typename T>
76T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) {
77 CHECK(src.HasAddress(src_ptr));
78 uint8_t* const raw_src_ptr = reinterpret_cast<uint8_t*>(src_ptr);
79 return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin());
80}
81
82} // namespace
83
Vladimir Marko2196c652017-11-30 16:16:07 +000084class JitCodeCache::JniStubKey {
85 public:
86 explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
87 : shorty_(method->GetShorty()),
88 is_static_(method->IsStatic()),
89 is_fast_native_(method->IsFastNative()),
90 is_critical_native_(method->IsCriticalNative()),
91 is_synchronized_(method->IsSynchronized()) {
92 DCHECK(!(is_fast_native_ && is_critical_native_));
93 }
94
95 bool operator<(const JniStubKey& rhs) const {
96 if (is_static_ != rhs.is_static_) {
97 return rhs.is_static_;
98 }
99 if (is_synchronized_ != rhs.is_synchronized_) {
100 return rhs.is_synchronized_;
101 }
102 if (is_fast_native_ != rhs.is_fast_native_) {
103 return rhs.is_fast_native_;
104 }
105 if (is_critical_native_ != rhs.is_critical_native_) {
106 return rhs.is_critical_native_;
107 }
108 return strcmp(shorty_, rhs.shorty_) < 0;
109 }
110
111 // Update the shorty to point to another method's shorty. Call this function when removing
112 // the method that references the old shorty from JniCodeData and not removing the entire
113 // JniCodeData; the old shorty may become a dangling pointer when that method is unloaded.
114 void UpdateShorty(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
115 const char* shorty = method->GetShorty();
116 DCHECK_STREQ(shorty_, shorty);
117 shorty_ = shorty;
118 }
119
120 private:
121 // The shorty points to a DexFile data and may need to change
122 // to point to the same shorty in a different DexFile.
123 mutable const char* shorty_;
124
125 const bool is_static_;
126 const bool is_fast_native_;
127 const bool is_critical_native_;
128 const bool is_synchronized_;
129};
130
131class JitCodeCache::JniStubData {
132 public:
133 JniStubData() : code_(nullptr), methods_() {}
134
135 void SetCode(const void* code) {
136 DCHECK(code != nullptr);
137 code_ = code;
138 }
139
140 const void* GetCode() const {
141 return code_;
142 }
143
144 bool IsCompiled() const {
145 return GetCode() != nullptr;
146 }
147
148 void AddMethod(ArtMethod* method) {
149 if (!ContainsElement(methods_, method)) {
150 methods_.push_back(method);
151 }
152 }
153
154 const std::vector<ArtMethod*>& GetMethods() const {
155 return methods_;
156 }
157
158 void RemoveMethodsIn(const LinearAlloc& alloc) {
159 auto kept_end = std::remove_if(
160 methods_.begin(),
161 methods_.end(),
162 [&alloc](ArtMethod* method) { return alloc.ContainsUnsafe(method); });
163 methods_.erase(kept_end, methods_.end());
164 }
165
166 bool RemoveMethod(ArtMethod* method) {
167 auto it = std::find(methods_.begin(), methods_.end(), method);
168 if (it != methods_.end()) {
169 methods_.erase(it);
170 return true;
171 } else {
172 return false;
173 }
174 }
175
176 void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
177 std::replace(methods_.begin(), methods_.end(), old_method, new_method);
178 }
179
180 private:
181 const void* code_;
182 std::vector<ArtMethod*> methods_;
183};
184
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000185JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
186 size_t max_capacity,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000187 bool generate_debug_info,
Calin Juravle016fcbe22018-05-03 19:47:35 -0700188 bool used_only_for_profile_data,
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000189 std::string* error_msg) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800190 ScopedTrace trace(__PRETTY_FUNCTION__);
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100191 CHECK_GE(max_capacity, initial_capacity);
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000192
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000193 // With 'perf', we want a 1-1 mapping between an address and a method.
Alex Light2d441b12018-06-08 15:33:21 -0700194 // We aren't able to keep method pointers live during the instrumentation method entry trampoline
195 // so we will just disable jit-gc if we are doing that.
196 bool garbage_collect_code = !generate_debug_info &&
197 !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000198
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000199 // We need to have 32 bit offsets from method headers in code cache which point to things
200 // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
201 // Ensure we're below 1 GB to be safe.
202 if (max_capacity > 1 * GB) {
203 std::ostringstream oss;
204 oss << "Maxium code cache capacity is limited to 1 GB, "
205 << PrettySize(max_capacity) << " is too big";
206 *error_msg = oss.str();
207 return nullptr;
208 }
209
Orion Hodson563ada22018-09-04 11:28:31 +0100210 // Register for membarrier expedited sync core if JIT will be generating code.
211 if (!used_only_for_profile_data) {
Orion Hodson1d3fd082018-09-28 09:38:35 +0100212 if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
213 // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
214 // flushed and it's used when adding code to the JIT. The memory used by the new code may
215 // have just been released and, in theory, the old code could still be in a pipeline.
216 VLOG(jit) << "Kernel does not support membarrier sync-core";
217 }
Orion Hodson563ada22018-09-04 11:28:31 +0100218 }
219
Orion Hodson1d3fd082018-09-28 09:38:35 +0100220 // File descriptor enabling dual-view mapping of code section.
221 unique_fd mem_fd;
222
223 // Bionic supports memfd_create, but the call may fail on older kernels.
224 mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags */ 0));
225 if (mem_fd.get() < 0) {
226 VLOG(jit) << "Failed to initialize dual view JIT. memfd_create() error: "
227 << strerror(errno);
228 }
229
230 if (mem_fd.get() >= 0 && ftruncate(mem_fd, max_capacity) != 0) {
231 std::ostringstream oss;
232 oss << "Failed to initialize memory file: " << strerror(errno);
233 *error_msg = oss.str();
234 return nullptr;
235 }
236
237 // Data cache will be half of the initial allocation.
238 // Code cache will be the other half of the initial allocation.
239 // TODO: Make this variable?
240
241 // Align both capacities to page size, as that's the unit mspaces use.
242 initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
243 max_capacity = RoundDown(max_capacity, 2 * kPageSize);
244 const size_t data_capacity = max_capacity / 2;
245 const size_t exec_capacity = used_only_for_profile_data ? 0 : max_capacity - data_capacity;
246 DCHECK_LE(data_capacity + exec_capacity, max_capacity);
Calin Juravle016fcbe22018-05-03 19:47:35 -0700247
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800248 std::string error_str;
249 // Map name specific for android_os_Debug.cpp accounting.
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000250 // Map in low 4gb to simplify accessing root tables for x86_64.
251 // We could do PC-relative addressing to avoid this problem, but that
252 // would require reserving code and data area before submitting, which
253 // means more windows for the code memory to be RWX.
Orion Hodson1d3fd082018-09-28 09:38:35 +0100254 int base_flags;
255 MemMap data_pages;
256 if (mem_fd.get() >= 0) {
257 // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
258 // for data and non-writable view of JIT code pages. We use the memory file descriptor to
259 // enable dual mapping - we'll create a second mapping using the descriptor below. The
260 // mappings will look like:
261 //
262 // VA PA
263 //
264 // +---------------+
265 // | non exec code |\
266 // +---------------+ \
267 // : :\ \
268 // +---------------+.\.+---------------+
269 // | exec code | \| code |
270 // +---------------+...+---------------+
271 // | data | | data |
272 // +---------------+...+---------------+
273 //
274 // In this configuration code updates are written to the non-executable view of the code
275 // cache, and the executable view of the code cache has fixed RX memory protections.
276 //
277 // This memory needs to be mapped shared as the code portions will have two mappings.
278 base_flags = MAP_SHARED;
279 data_pages = MemMap::MapFile(
280 data_capacity + exec_capacity,
281 kProtRW,
282 base_flags,
283 mem_fd,
284 /* start */ 0,
285 /* low_4gb */ true,
286 "data-code-cache",
287 &error_str);
288 } else {
289 // Single view of JIT code cache case. Create an initial mapping of data pages large enough
290 // for data and JIT code pages. The mappings will look like:
291 //
292 // VA PA
293 //
294 // +---------------+...+---------------+
295 // | exec code | | code |
296 // +---------------+...+---------------+
297 // | data | | data |
298 // +---------------+...+---------------+
299 //
300 // In this configuration code updates are written to the executable view of the code cache,
301 // and the executable view of the code cache transitions RX to RWX for the update and then
302 // back to RX after the update.
303 base_flags = MAP_PRIVATE | MAP_ANON;
304 data_pages = MemMap::MapAnonymous(
305 "data-code-cache",
306 /* addr */ nullptr,
307 data_capacity + exec_capacity,
308 kProtRW,
309 /* low_4gb */ true,
310 /* reuse */ false,
311 /* reservation */ nullptr,
312 &error_str);
313 }
314
315 if (!data_pages.IsValid()) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800316 std::ostringstream oss;
Andreas Gampee4deaf32017-06-09 15:27:15 -0700317 oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800318 *error_msg = oss.str();
319 return nullptr;
320 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100321
Orion Hodson1d3fd082018-09-28 09:38:35 +0100322 MemMap exec_pages;
323 MemMap non_exec_pages;
324 if (exec_capacity > 0) {
325 uint8_t* const divider = data_pages.Begin() + data_capacity;
326 // Set initial permission for executable view to catch any SELinux permission problems early
327 // (for processes that cannot map WX pages). Otherwise, this region does not need to be
328 // executable as there is no code in the cache yet.
329 exec_pages = data_pages.RemapAtEnd(divider,
330 "jit-code-cache",
331 kProtRX,
332 base_flags | MAP_FIXED,
333 mem_fd.get(),
334 (mem_fd.get() >= 0) ? data_capacity : 0,
335 &error_str);
336 if (!exec_pages.IsValid()) {
337 std::ostringstream oss;
338 oss << "Failed to create read execute code cache: " << error_str << " size=" << max_capacity;
339 *error_msg = oss.str();
340 return nullptr;
341 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100342
Orion Hodson1d3fd082018-09-28 09:38:35 +0100343 if (mem_fd.get() >= 0) {
344 // For dual view, create the secondary view of code memory used for updating code. This view
345 // is never executable.
346 non_exec_pages = MemMap::MapFile(exec_capacity,
347 kProtR,
348 base_flags,
349 mem_fd,
350 /* start */ data_capacity,
351 /* low_4GB */ false,
352 "jit-code-cache-rw",
353 &error_str);
354 if (!non_exec_pages.IsValid()) {
355 // Log and continue as single view JIT.
356 VLOG(jit) << "Failed to map non-executable view of JIT code cache";
357 }
358 }
359 } else {
360 // Profiling only. No memory for code required.
361 DCHECK(used_only_for_profile_data);
David Sehrd1dbb742017-07-17 11:20:38 -0700362 }
Orion Hodson1d3fd082018-09-28 09:38:35 +0100363
364 const size_t initial_data_capacity = initial_capacity / 2;
365 const size_t initial_exec_capacity =
366 (exec_capacity == 0) ? 0 : (initial_capacity - initial_data_capacity);
367
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100368 return new JitCodeCache(
Orion Hodson1d3fd082018-09-28 09:38:35 +0100369 std::move(data_pages),
370 std::move(exec_pages),
371 std::move(non_exec_pages),
372 initial_data_capacity,
373 initial_exec_capacity,
Calin Juravle016fcbe22018-05-03 19:47:35 -0700374 max_capacity,
Orion Hodson1d3fd082018-09-28 09:38:35 +0100375 garbage_collect_code);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800376}
377
Orion Hodson1d3fd082018-09-28 09:38:35 +0100378JitCodeCache::JitCodeCache(MemMap&& data_pages,
379 MemMap&& exec_pages,
380 MemMap&& non_exec_pages,
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000381 size_t initial_data_capacity,
Orion Hodson1d3fd082018-09-28 09:38:35 +0100382 size_t initial_exec_capacity,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000383 size_t max_capacity,
Orion Hodson1d3fd082018-09-28 09:38:35 +0100384 bool garbage_collect_code)
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100385 : lock_("Jit code cache", kJitCodeCacheLock),
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000386 lock_cond_("Jit code cache condition variable", lock_),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100387 collection_in_progress_(false),
Orion Hodson1d3fd082018-09-28 09:38:35 +0100388 data_pages_(std::move(data_pages)),
389 exec_pages_(std::move(exec_pages)),
390 non_exec_pages_(std::move(non_exec_pages)),
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000391 max_capacity_(max_capacity),
Orion Hodson1d3fd082018-09-28 09:38:35 +0100392 current_capacity_(initial_exec_capacity + initial_data_capacity),
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000393 data_end_(initial_data_capacity),
Orion Hodson1d3fd082018-09-28 09:38:35 +0100394 exec_end_(initial_exec_capacity),
Nicolas Geoffray35122442016-03-02 12:05:30 +0000395 last_collection_increased_code_cache_(false),
Nicolas Geoffray0a522232016-01-19 09:34:58 +0000396 garbage_collect_code_(garbage_collect_code),
Nicolas Geoffrayb0d22082016-02-24 17:18:25 +0000397 used_memory_for_data_(0),
398 used_memory_for_code_(0),
Nicolas Geoffrayfcdd7292016-02-25 13:27:47 +0000399 number_of_compilations_(0),
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000400 number_of_osr_compilations_(0),
Nicolas Geoffray933330a2016-03-16 14:20:06 +0000401 number_of_collections_(0),
402 histogram_stack_map_memory_use_("Memory used for stack maps", 16),
403 histogram_code_memory_use_("Memory used for compiled code", 16),
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000404 histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
405 is_weak_access_enabled_(true),
Orion Hodson1d3fd082018-09-28 09:38:35 +0100406 inline_cache_cond_("Jit inline cache condition variable", lock_) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100407
Orion Hodson1d3fd082018-09-28 09:38:35 +0100408 DCHECK_GE(max_capacity, initial_exec_capacity + initial_data_capacity);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100409
Orion Hodson1d3fd082018-09-28 09:38:35 +0100410 // Initialize the data heap
411 data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/);
412 CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
413
414 // Initialize the code heap
415 MemMap* code_heap = nullptr;
416 if (non_exec_pages_.IsValid()) {
417 code_heap = &non_exec_pages_;
418 } else if (exec_pages_.IsValid()) {
419 code_heap = &exec_pages_;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100420 }
Orion Hodson1d3fd082018-09-28 09:38:35 +0100421 if (code_heap != nullptr) {
422 // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
423 // heap, will take and initialize pages in create_mspace_with_base().
424 CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
425 exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
426 CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
427 SetFootprintLimit(current_capacity_);
428 // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
429 // perform the update and there are no other times write access is required.
430 CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
431 } else {
432 exec_mspace_ = nullptr;
433 SetFootprintLimit(current_capacity_);
434 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100435
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000436 VLOG(jit) << "Created jit code cache: initial data size="
437 << PrettySize(initial_data_capacity)
438 << ", initial code size="
Orion Hodson1d3fd082018-09-28 09:38:35 +0100439 << PrettySize(initial_exec_capacity);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800440}
441
Vladimir Markob0b68cf2017-11-14 18:11:50 +0000442JitCodeCache::~JitCodeCache() {}
443
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100444bool JitCodeCache::ContainsPc(const void* ptr) const {
Orion Hodson1d3fd082018-09-28 09:38:35 +0100445 return exec_pages_.Begin() <= ptr && ptr < exec_pages_.End();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800446}
447
Alex Light2d441b12018-06-08 15:33:21 -0700448bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
449 ScopedObjectAccess soa(art::Thread::Current());
450 ScopedAssertNoThreadSuspension sants(__FUNCTION__);
451 if (ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
452 return true;
453 } else if (method->GetEntryPointFromQuickCompiledCode() == GetQuickInstrumentationEntryPoint()) {
454 return FindCompiledCodeForInstrumentation(method) != nullptr;
455 }
456 return false;
457}
458
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000459bool JitCodeCache::ContainsMethod(ArtMethod* method) {
460 MutexLock mu(Thread::Current(), lock_);
Vladimir Marko2196c652017-11-30 16:16:07 +0000461 if (UNLIKELY(method->IsNative())) {
462 auto it = jni_stubs_map_.find(JniStubKey(method));
463 if (it != jni_stubs_map_.end() &&
464 it->second.IsCompiled() &&
465 ContainsElement(it->second.GetMethods(), method)) {
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000466 return true;
467 }
Vladimir Marko2196c652017-11-30 16:16:07 +0000468 } else {
469 for (const auto& it : method_code_map_) {
470 if (it.second == method) {
471 return true;
472 }
473 }
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000474 }
475 return false;
476}
477
Vladimir Marko2196c652017-11-30 16:16:07 +0000478const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
479 DCHECK(method->IsNative());
480 MutexLock mu(Thread::Current(), lock_);
481 auto it = jni_stubs_map_.find(JniStubKey(method));
482 if (it != jni_stubs_map_.end()) {
483 JniStubData& data = it->second;
484 if (data.IsCompiled() && ContainsElement(data.GetMethods(), method)) {
485 return data.GetCode();
486 }
487 }
488 return nullptr;
489}
490
Alex Light2d441b12018-06-08 15:33:21 -0700491const void* JitCodeCache::FindCompiledCodeForInstrumentation(ArtMethod* method) {
Alex Light839f53a2018-07-10 15:46:14 -0700492 // If jit-gc is still on we use the SavedEntryPoint field for doing that and so cannot use it to
493 // find the instrumentation entrypoint.
494 if (LIKELY(GetGarbageCollectCode())) {
Alex Light2d441b12018-06-08 15:33:21 -0700495 return nullptr;
496 }
497 ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
498 if (info == nullptr) {
499 return nullptr;
500 }
501 // When GC is disabled for trampoline tracing we will use SavedEntrypoint to hold the actual
502 // jit-compiled version of the method. If jit-gc is disabled for other reasons this will just be
503 // nullptr.
504 return info->GetSavedEntryPoint();
505}
506
Mathieu Chartier33fbf372016-03-07 13:48:08 -0800507class ScopedCodeCacheWrite : ScopedTrace {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100508 public:
Calin Juravle016fcbe22018-05-03 19:47:35 -0700509 explicit ScopedCodeCacheWrite(const JitCodeCache* const code_cache)
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100510 : ScopedTrace("ScopedCodeCacheWrite"),
Calin Juravle016fcbe22018-05-03 19:47:35 -0700511 code_cache_(code_cache) {
Mathieu Chartier33fbf372016-03-07 13:48:08 -0800512 ScopedTrace trace("mprotect all");
Orion Hodson1d3fd082018-09-28 09:38:35 +0100513 const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
514 if (updatable_pages != nullptr) {
515 int prot = code_cache_->HasDualCodeMapping() ? kProtRW : kProtRWX;
516 CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot);
517 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800518 }
Calin Juravle016fcbe22018-05-03 19:47:35 -0700519
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100520 ~ScopedCodeCacheWrite() {
Mathieu Chartier33fbf372016-03-07 13:48:08 -0800521 ScopedTrace trace("mprotect code");
Orion Hodson1d3fd082018-09-28 09:38:35 +0100522 const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
523 if (updatable_pages != nullptr) {
524 int prot = code_cache_->HasDualCodeMapping() ? kProtR : kProtRX;
525 CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot);
526 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100527 }
Mathieu Chartier8d8de0c2017-10-04 09:35:30 -0700528
David Sehrd1dbb742017-07-17 11:20:38 -0700529 private:
Calin Juravle016fcbe22018-05-03 19:47:35 -0700530 const JitCodeCache* const code_cache_;
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100531
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100532 DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
533};
534
535uint8_t* JitCodeCache::CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100536 ArtMethod* method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000537 uint8_t* stack_map,
538 uint8_t* roots_data,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100539 const uint8_t* code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000540 size_t code_size,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100541 size_t data_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000542 bool osr,
Vladimir Markoac3ac682018-09-20 11:01:43 +0100543 const std::vector<Handle<mirror::Object>>& roots,
Mingyao Yang063fc772016-08-02 11:02:54 -0700544 bool has_should_deoptimize_flag,
545 const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100546 uint8_t* result = CommitCodeInternal(self,
547 method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000548 stack_map,
549 roots_data,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100550 code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000551 code_size,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100552 data_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000553 osr,
Mingyao Yang063fc772016-08-02 11:02:54 -0700554 roots,
555 has_should_deoptimize_flag,
556 cha_single_implementation_list);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100557 if (result == nullptr) {
558 // Retry.
559 GarbageCollectCache(self);
560 result = CommitCodeInternal(self,
561 method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000562 stack_map,
563 roots_data,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100564 code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000565 code_size,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100566 data_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000567 osr,
Mingyao Yang063fc772016-08-02 11:02:54 -0700568 roots,
569 has_should_deoptimize_flag,
570 cha_single_implementation_list);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100571 }
572 return result;
573}
574
575bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
576 bool in_collection = false;
577 while (collection_in_progress_) {
578 in_collection = true;
579 lock_cond_.Wait(self);
580 }
581 return in_collection;
582}
583
584static uintptr_t FromCodeToAllocation(const void* code) {
585 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
586 return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
587}
588
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000589static uint32_t ComputeRootTableSize(uint32_t number_of_roots) {
590 return sizeof(uint32_t) + number_of_roots * sizeof(GcRoot<mirror::Object>);
591}
592
593static uint32_t GetNumberOfRoots(const uint8_t* stack_map) {
594 // The length of the table is stored just before the stack map (and therefore at the end of
595 // the table itself), in order to be able to fetch it from a `stack_map` pointer.
596 return reinterpret_cast<const uint32_t*>(stack_map)[-1];
597}
598
Mathieu Chartier7a704be2016-11-22 13:24:40 -0800599static void FillRootTableLength(uint8_t* roots_data, uint32_t length) {
600 // Store the length of the table at the end. This will allow fetching it from a `stack_map`
601 // pointer.
602 reinterpret_cast<uint32_t*>(roots_data)[length] = length;
603}
604
Nicolas Geoffrayf4b94422016-12-05 00:10:09 +0000605static const uint8_t* FromStackMapToRoots(const uint8_t* stack_map_data) {
606 return stack_map_data - ComputeRootTableSize(GetNumberOfRoots(stack_map_data));
607}
608
Vladimir Markoac3ac682018-09-20 11:01:43 +0100609static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots)
Alex Light3e36a9c2018-06-19 09:45:05 -0700610 REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
611 if (!kIsDebugBuild) {
612 return;
613 }
Alex Light3e36a9c2018-06-19 09:45:05 -0700614 // Put all roots in `roots_data`.
Vladimir Markoac3ac682018-09-20 11:01:43 +0100615 for (Handle<mirror::Object> object : roots) {
Alex Light3e36a9c2018-06-19 09:45:05 -0700616 // Ensure the string is strongly interned. b/32995596
617 if (object->IsString()) {
Vladimir Markoac3ac682018-09-20 11:01:43 +0100618 ObjPtr<mirror::String> str = object->AsString();
Alex Light3e36a9c2018-06-19 09:45:05 -0700619 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
620 CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
621 }
622 }
623}
624
625void JitCodeCache::FillRootTable(uint8_t* roots_data,
Vladimir Markoac3ac682018-09-20 11:01:43 +0100626 const std::vector<Handle<mirror::Object>>& roots) {
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000627 GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
Vladimir Markoac3ac682018-09-20 11:01:43 +0100628 const uint32_t length = roots.size();
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000629 // Put all roots in `roots_data`.
630 for (uint32_t i = 0; i < length; ++i) {
Vladimir Markoac3ac682018-09-20 11:01:43 +0100631 ObjPtr<mirror::Object> object = roots[i].Get();
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000632 gc_roots[i] = GcRoot<mirror::Object>(object);
633 }
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000634}
635
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100636static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000637 OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
638 uint8_t* data = method_header->GetOptimizedCodeInfoPtr();
639 uint32_t roots = GetNumberOfRoots(data);
640 if (number_of_roots != nullptr) {
641 *number_of_roots = roots;
642 }
643 return data - ComputeRootTableSize(roots);
644}
645
Nicolas Geoffray6ca115b2017-05-10 15:09:35 +0100646// Use a sentinel for marking entries in the JIT table that have been cleared.
647// This helps diagnosing in case the compiled code tries to wrongly access such
648// entries.
Andreas Gampe5629d2d2017-05-15 16:28:13 -0700649static mirror::Class* const weak_sentinel =
650 reinterpret_cast<mirror::Class*>(Context::kBadGprBase + 0xff);
Nicolas Geoffray6ca115b2017-05-10 15:09:35 +0100651
Nicolas Geoffray22384ae2016-12-12 22:33:36 +0000652// Helper for the GC to process a weak class in a JIT root table.
Nicolas Geoffray6ca115b2017-05-10 15:09:35 +0100653static inline void ProcessWeakClass(GcRoot<mirror::Class>* root_ptr,
654 IsMarkedVisitor* visitor,
655 mirror::Class* update)
Nicolas Geoffray22384ae2016-12-12 22:33:36 +0000656 REQUIRES_SHARED(Locks::mutator_lock_) {
657 // This does not need a read barrier because this is called by GC.
658 mirror::Class* cls = root_ptr->Read<kWithoutReadBarrier>();
Nicolas Geoffray6ca115b2017-05-10 15:09:35 +0100659 if (cls != nullptr && cls != weak_sentinel) {
Mathieu Chartierd7a7f2f2018-09-07 11:57:18 -0700660 DCHECK((cls->IsClass<kDefaultVerifyFlags>()));
Nicolas Geoffray22384ae2016-12-12 22:33:36 +0000661 // Look at the classloader of the class to know if it has been unloaded.
662 // This does not need a read barrier because this is called by GC.
663 mirror::Object* class_loader =
664 cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>();
665 if (class_loader == nullptr || visitor->IsMarked(class_loader) != nullptr) {
666 // The class loader is live, update the entry if the class has moved.
667 mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls));
668 // Note that new_object can be null for CMS and newly allocated objects.
669 if (new_cls != nullptr && new_cls != cls) {
670 *root_ptr = GcRoot<mirror::Class>(new_cls);
671 }
672 } else {
673 // The class loader is not live, clear the entry.
Nicolas Geoffray6ca115b2017-05-10 15:09:35 +0100674 *root_ptr = GcRoot<mirror::Class>(update);
Nicolas Geoffray22384ae2016-12-12 22:33:36 +0000675 }
676 }
677}
678
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000679void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
680 MutexLock mu(Thread::Current(), lock_);
681 for (const auto& entry : method_code_map_) {
682 uint32_t number_of_roots = 0;
683 uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots);
684 GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
685 for (uint32_t i = 0; i < number_of_roots; ++i) {
686 // This does not need a read barrier because this is called by GC.
687 mirror::Object* object = roots[i].Read<kWithoutReadBarrier>();
Nicolas Geoffray6ca115b2017-05-10 15:09:35 +0100688 if (object == nullptr || object == weak_sentinel) {
Nicolas Geoffray22384ae2016-12-12 22:33:36 +0000689 // entry got deleted in a previous sweep.
690 } else if (object->IsString<kDefaultVerifyFlags, kWithoutReadBarrier>()) {
691 mirror::Object* new_object = visitor->IsMarked(object);
692 // We know the string is marked because it's a strongly-interned string that
693 // is always alive. The IsMarked implementation of the CMS collector returns
694 // null for newly allocated objects, but we know those haven't moved. Therefore,
695 // only update the entry if we get a different non-null string.
696 // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
697 // out of the weak access/creation pause. b/32167580
698 if (new_object != nullptr && new_object != object) {
699 DCHECK(new_object->IsString());
700 roots[i] = GcRoot<mirror::Object>(new_object);
701 }
702 } else {
Nicolas Geoffray6ca115b2017-05-10 15:09:35 +0100703 ProcessWeakClass(
704 reinterpret_cast<GcRoot<mirror::Class>*>(&roots[i]), visitor, weak_sentinel);
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000705 }
706 }
707 }
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000708 // Walk over inline caches to clear entries containing unloaded classes.
709 for (ProfilingInfo* info : profiling_infos_) {
710 for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
711 InlineCache* cache = &info->cache_[i];
712 for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
Nicolas Geoffray6ca115b2017-05-10 15:09:35 +0100713 ProcessWeakClass(&cache->classes_[j], visitor, nullptr);
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000714 }
715 }
716 }
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000717}
718
Orion Hodson607624f2018-05-11 10:10:46 +0100719void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100720 uintptr_t allocation = FromCodeToAllocation(code_ptr);
David Srbecky5cc349f2015-12-18 15:04:48 +0000721 // Notify native debugger that we are about to remove the code.
722 // It does nothing if we are not using native debugger.
David Srbeckyfb3de3d2018-01-29 16:11:49 +0000723 MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
David Srbecky440a9b32018-02-15 17:47:29 +0000724 RemoveNativeDebugInfoForJit(code_ptr);
Vladimir Marko2196c652017-11-30 16:16:07 +0000725 if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
726 FreeData(GetRootTable(code_ptr));
727 } // else this is a JNI stub without any data.
Orion Hodson1d3fd082018-09-28 09:38:35 +0100728
729 uint8_t* code_allocation = reinterpret_cast<uint8_t*>(allocation);
730 if (HasDualCodeMapping()) {
731 code_allocation = TranslateAddress(code_allocation, exec_pages_, non_exec_pages_);
732 }
733
734 FreeCode(code_allocation);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100735}
736
Mingyao Yang063fc772016-08-02 11:02:54 -0700737void JitCodeCache::FreeAllMethodHeaders(
738 const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
Mingyao Yang063fc772016-08-02 11:02:54 -0700739 // We need to remove entries in method_headers from CHA dependencies
740 // first since once we do FreeCode() below, the memory can be reused
741 // so it's possible for the same method_header to start representing
742 // different compile code.
743 MutexLock mu(Thread::Current(), lock_);
Alex Light33b7b5d2018-08-07 19:13:51 +0000744 {
745 MutexLock mu2(Thread::Current(), *Locks::cha_lock_);
746 Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
747 ->RemoveDependentsWithMethodHeaders(method_headers);
748 }
749
Calin Juravle016fcbe22018-05-03 19:47:35 -0700750 ScopedCodeCacheWrite scc(this);
Mingyao Yang063fc772016-08-02 11:02:54 -0700751 for (const OatQuickMethodHeader* method_header : method_headers) {
Orion Hodson607624f2018-05-11 10:10:46 +0100752 FreeCodeAndData(method_header->GetCode());
Mingyao Yang063fc772016-08-02 11:02:54 -0700753 }
754}
755
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100756void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800757 ScopedTrace trace(__PRETTY_FUNCTION__);
Mingyao Yang063fc772016-08-02 11:02:54 -0700758 // We use a set to first collect all method_headers whose code need to be
759 // removed. We need to free the underlying code after we remove CHA dependencies
760 // for entries in this set. And it's more efficient to iterate through
761 // the CHA dependency map just once with an unordered_set.
762 std::unordered_set<OatQuickMethodHeader*> method_headers;
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000763 {
Mingyao Yang063fc772016-08-02 11:02:54 -0700764 MutexLock mu(self, lock_);
765 // We do not check if a code cache GC is in progress, as this method comes
766 // with the classlinker_classes_lock_ held, and suspending ourselves could
767 // lead to a deadlock.
768 {
Calin Juravle016fcbe22018-05-03 19:47:35 -0700769 ScopedCodeCacheWrite scc(this);
Vladimir Marko2196c652017-11-30 16:16:07 +0000770 for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
771 it->second.RemoveMethodsIn(alloc);
772 if (it->second.GetMethods().empty()) {
773 method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->second.GetCode()));
774 it = jni_stubs_map_.erase(it);
775 } else {
776 it->first.UpdateShorty(it->second.GetMethods().front());
777 ++it;
778 }
779 }
Mingyao Yang063fc772016-08-02 11:02:54 -0700780 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
781 if (alloc.ContainsUnsafe(it->second)) {
782 method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
783 it = method_code_map_.erase(it);
784 } else {
785 ++it;
786 }
787 }
788 }
789 for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) {
790 if (alloc.ContainsUnsafe(it->first)) {
791 // Note that the code has already been pushed to method_headers in the loop
792 // above and is going to be removed in FreeCode() below.
793 it = osr_code_map_.erase(it);
794 } else {
795 ++it;
796 }
797 }
798 for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
799 ProfilingInfo* info = *it;
800 if (alloc.ContainsUnsafe(info->GetMethod())) {
801 info->GetMethod()->SetProfilingInfo(nullptr);
802 FreeData(reinterpret_cast<uint8_t*>(info));
803 it = profiling_infos_.erase(it);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000804 } else {
805 ++it;
806 }
807 }
808 }
Mingyao Yang063fc772016-08-02 11:02:54 -0700809 FreeAllMethodHeaders(method_headers);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100810}
811
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000812bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
813 return kUseReadBarrier
814 ? self->GetWeakRefAccessEnabled()
Orion Hodson88591fe2018-03-06 13:35:43 +0000815 : is_weak_access_enabled_.load(std::memory_order_seq_cst);
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000816}
817
818void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) {
819 if (IsWeakAccessEnabled(self)) {
820 return;
821 }
822 ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000823 MutexLock mu(self, lock_);
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000824 while (!IsWeakAccessEnabled(self)) {
825 inline_cache_cond_.Wait(self);
826 }
827}
828
829void JitCodeCache::BroadcastForInlineCacheAccess() {
830 Thread* self = Thread::Current();
831 MutexLock mu(self, lock_);
832 inline_cache_cond_.Broadcast(self);
833}
834
835void JitCodeCache::AllowInlineCacheAccess() {
836 DCHECK(!kUseReadBarrier);
Orion Hodson88591fe2018-03-06 13:35:43 +0000837 is_weak_access_enabled_.store(true, std::memory_order_seq_cst);
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000838 BroadcastForInlineCacheAccess();
839}
840
841void JitCodeCache::DisallowInlineCacheAccess() {
842 DCHECK(!kUseReadBarrier);
Orion Hodson88591fe2018-03-06 13:35:43 +0000843 is_weak_access_enabled_.store(false, std::memory_order_seq_cst);
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000844}
845
846void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic,
847 Handle<mirror::ObjectArray<mirror::Class>> array) {
848 WaitUntilInlineCacheAccessible(Thread::Current());
849 // Note that we don't need to lock `lock_` here, the compiler calling
850 // this method has already ensured the inline cache will not be deleted.
851 for (size_t in_cache = 0, in_array = 0;
852 in_cache < InlineCache::kIndividualCacheSize;
853 ++in_cache) {
854 mirror::Class* object = ic.classes_[in_cache].Read();
855 if (object != nullptr) {
856 array->Set(in_array++, object);
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000857 }
858 }
859}
860
Mathieu Chartierf044c222017-05-31 15:27:54 -0700861static void ClearMethodCounter(ArtMethod* method, bool was_warm) {
862 if (was_warm) {
Vladimir Markoc945e0d2018-07-18 17:26:45 +0100863 method->SetPreviouslyWarm();
Mathieu Chartierf044c222017-05-31 15:27:54 -0700864 }
865 // We reset the counter to 1 so that the profile knows that the method was executed at least once.
866 // This is required for layout purposes.
Nicolas Geoffray88f50b12017-06-09 16:08:47 +0100867 // We also need to make sure we'll pass the warmup threshold again, so we set to 0 if
868 // the warmup threshold is 1.
869 uint16_t jit_warmup_threshold = Runtime::Current()->GetJITOptions()->GetWarmupThreshold();
870 method->SetCounter(std::min(jit_warmup_threshold - 1, 1));
Mathieu Chartierf044c222017-05-31 15:27:54 -0700871}
872
Alex Light33b7b5d2018-08-07 19:13:51 +0000873void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
874 while (collection_in_progress_) {
875 lock_.Unlock(self);
876 {
877 ScopedThreadSuspension sts(self, kSuspended);
878 MutexLock mu(self, lock_);
879 WaitForPotentialCollectionToComplete(self);
880 }
881 lock_.Lock(self);
882 }
883}
884
Orion Hodson1d3fd082018-09-28 09:38:35 +0100885const MemMap* JitCodeCache::GetUpdatableCodeMapping() const {
886 if (HasDualCodeMapping()) {
887 return &non_exec_pages_;
888 } else if (HasCodeMapping()) {
889 return &exec_pages_;
890 } else {
891 return nullptr;
892 }
893}
894
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100895uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
896 ArtMethod* method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000897 uint8_t* stack_map,
898 uint8_t* roots_data,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100899 const uint8_t* code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000900 size_t code_size,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100901 size_t data_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000902 bool osr,
Vladimir Markoac3ac682018-09-20 11:01:43 +0100903 const std::vector<Handle<mirror::Object>>& roots,
Mingyao Yang063fc772016-08-02 11:02:54 -0700904 bool has_should_deoptimize_flag,
905 const ArenaSet<ArtMethod*>&
906 cha_single_implementation_list) {
Vladimir Marko2196c652017-11-30 16:16:07 +0000907 DCHECK(!method->IsNative() || !osr);
Alex Light33b7b5d2018-08-07 19:13:51 +0000908
909 if (!method->IsNative()) {
910 // We need to do this before grabbing the lock_ because it needs to be able to see the string
911 // InternTable. Native methods do not have roots.
912 DCheckRootsAreValid(roots);
913 }
914
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100915 OatQuickMethodHeader* method_header = nullptr;
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100916 uint8_t* code_ptr = nullptr;
Orion Hodson1d3fd082018-09-28 09:38:35 +0100917
Alex Light33b7b5d2018-08-07 19:13:51 +0000918 MutexLock mu(self, lock_);
919 // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
920 // finish.
921 WaitForPotentialCollectionToCompleteRunnable(self);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100922 {
Alex Light33b7b5d2018-08-07 19:13:51 +0000923 ScopedCodeCacheWrite scc(this);
Orion Hodson1d3fd082018-09-28 09:38:35 +0100924
925 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
926 // Ensure the header ends up at expected instruction alignment.
927 size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
928 size_t total_size = header_size + code_size;
929
930 // AllocateCode allocates memory in non-executable region for alignment header and code. The
931 // header size may include alignment padding.
932 uint8_t* nox_memory = AllocateCode(total_size);
933 if (nox_memory == nullptr) {
Alex Light33b7b5d2018-08-07 19:13:51 +0000934 return nullptr;
935 }
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000936
Orion Hodson1d3fd082018-09-28 09:38:35 +0100937 // code_ptr points to non-executable code.
938 code_ptr = nox_memory + header_size;
Alex Light33b7b5d2018-08-07 19:13:51 +0000939 std::copy(code, code + code_size, code_ptr);
940 method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
Orion Hodson1d3fd082018-09-28 09:38:35 +0100941
942 // From here code_ptr points to executable code.
943 if (HasDualCodeMapping()) {
944 code_ptr = TranslateAddress(code_ptr, non_exec_pages_, exec_pages_);
945 }
946
Alex Light33b7b5d2018-08-07 19:13:51 +0000947 new (method_header) OatQuickMethodHeader(
948 (stack_map != nullptr) ? code_ptr - stack_map : 0u,
949 code_size);
Orion Hodson1d3fd082018-09-28 09:38:35 +0100950
951 DCHECK(!Runtime::Current()->IsAotCompiler());
952 if (has_should_deoptimize_flag) {
953 method_header->SetHasShouldDeoptimizeFlag();
954 }
955
956 // Update method_header pointer to executable code region.
957 if (HasDualCodeMapping()) {
958 method_header = TranslateAddress(method_header, non_exec_pages_, exec_pages_);
959 }
960
961 // Both instruction and data caches need flushing to the point of unification where both share
962 // a common view of memory. Flushing the data cache ensures the dirty cachelines from the
963 // newly added code are written out to the point of unification. Flushing the instruction
964 // cache ensures the newly written code will be fetched from the point of unification before
965 // use. Memory in the code cache is re-cycled as code is added and removed. The flushes
966 // prevent stale code from residing in the instruction cache.
967 //
968 // Caches are flushed before write permission is removed because some ARMv8 Qualcomm kernels
969 // may trigger a segfault if a page fault occurs when requesting a cache maintenance
970 // operation. This is a kernel bug that we need to work around until affected devices
971 // (e.g. Nexus 5X and 6P) stop being supported or their kernels are fixed.
Alex Light33b7b5d2018-08-07 19:13:51 +0000972 //
973 // For reference, this behavior is caused by this commit:
974 // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
Orion Hodson1d3fd082018-09-28 09:38:35 +0100975 //
976 if (HasDualCodeMapping()) {
977 // Flush the data cache lines associated with the non-executable copy of the code just added.
978 FlushDataCache(nox_memory, nox_memory + total_size);
979 }
980 // FlushInstructionCache() flushes both data and instruction caches lines. The cacheline range
981 // flushed is for the executable mapping of the code just added.
Orion Hodson38d29fd2018-09-07 12:58:37 +0100982 FlushInstructionCache(code_ptr, code_ptr + code_size);
Orion Hodsonf2331362018-07-11 15:14:10 +0100983
984 // Ensure CPU instruction pipelines are flushed for all cores. This is necessary for
985 // correctness as code may still be in instruction pipelines despite the i-cache flush. It is
986 // not safe to assume that changing permissions with mprotect (RX->RWX->RX) will cause a TLB
987 // shootdown (incidentally invalidating the CPU pipelines by sending an IPI to all cores to
988 // notify them of the TLB invalidation). Some architectures, notably ARM and ARM64, have
989 // hardware support that broadcasts TLB invalidations and so their kernels have no software
Orion Hodson1d3fd082018-09-28 09:38:35 +0100990 // based TLB shootdown. The sync-core flavor of membarrier was introduced in Linux 4.16 to
991 // address this (see mbarrier(2)). The membarrier here will fail on prior kernels and on
992 // platforms lacking the appropriate support.
Orion Hodson563ada22018-09-04 11:28:31 +0100993 art::membarrier(art::MembarrierCommand::kPrivateExpeditedSyncCore);
Orion Hodson38d29fd2018-09-07 12:58:37 +0100994
Nicolas Geoffray0a522232016-01-19 09:34:58 +0000995 number_of_compilations_++;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100996 }
Orion Hodson1d3fd082018-09-28 09:38:35 +0100997
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000998 // We need to update the entry point in the runnable state for the instrumentation.
999 {
Alex Light33b7b5d2018-08-07 19:13:51 +00001000 // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
1001 // compiled code is considered invalidated by some class linking, but below we still make the
1002 // compiled code valid for the method. Need cha_lock_ for checking all single-implementation
1003 // flags and register dependencies.
Mingyao Yang063fc772016-08-02 11:02:54 -07001004 MutexLock cha_mu(self, *Locks::cha_lock_);
1005 bool single_impl_still_valid = true;
1006 for (ArtMethod* single_impl : cha_single_implementation_list) {
1007 if (!single_impl->HasSingleImplementation()) {
Jeff Hao00286db2017-05-30 16:53:07 -07001008 // Simply discard the compiled code. Clear the counter so that it may be recompiled later.
1009 // Hopefully the class hierarchy will be more stable when compilation is retried.
Mingyao Yang063fc772016-08-02 11:02:54 -07001010 single_impl_still_valid = false;
Mathieu Chartierf044c222017-05-31 15:27:54 -07001011 ClearMethodCounter(method, /*was_warm*/ false);
Mingyao Yang063fc772016-08-02 11:02:54 -07001012 break;
1013 }
1014 }
1015
1016 // Discard the code if any single-implementation assumptions are now invalid.
1017 if (!single_impl_still_valid) {
1018 VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
1019 return nullptr;
1020 }
Nicolas Geoffray433b79a2017-01-30 20:54:45 +00001021 DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
Alex Lightdba61482016-12-21 08:20:29 -08001022 << "Should not be using cha on debuggable apps/runs!";
1023
Mingyao Yang063fc772016-08-02 11:02:54 -07001024 for (ArtMethod* single_impl : cha_single_implementation_list) {
Andreas Gampec1ac9ee2017-07-24 22:35:49 -07001025 Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()->AddDependency(
Mingyao Yang063fc772016-08-02 11:02:54 -07001026 single_impl, method, method_header);
1027 }
1028
Vladimir Marko2196c652017-11-30 16:16:07 +00001029 if (UNLIKELY(method->IsNative())) {
Vladimir Marko2196c652017-11-30 16:16:07 +00001030 auto it = jni_stubs_map_.find(JniStubKey(method));
1031 DCHECK(it != jni_stubs_map_.end())
1032 << "Entry inserted in NotifyCompilationOf() should be alive.";
1033 JniStubData* data = &it->second;
1034 DCHECK(ContainsElement(data->GetMethods(), method))
1035 << "Entry inserted in NotifyCompilationOf() should contain this method.";
1036 data->SetCode(code_ptr);
1037 instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation();
1038 for (ArtMethod* m : data->GetMethods()) {
1039 instrum->UpdateMethodsCode(m, method_header->GetEntryPoint());
1040 }
Nicolas Geoffray480d5102016-04-18 12:09:30 +01001041 } else {
Vladimir Marko2196c652017-11-30 16:16:07 +00001042 // Fill the root table before updating the entry point.
1043 DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
1044 DCHECK_LE(roots_data, stack_map);
1045 FillRootTable(roots_data, roots);
1046 {
1047 // Flush data cache, as compiled code references literals in it.
Orion Hodson38d29fd2018-09-07 12:58:37 +01001048 FlushDataCache(roots_data, roots_data + data_size);
Vladimir Marko2196c652017-11-30 16:16:07 +00001049 }
1050 method_code_map_.Put(code_ptr, method);
1051 if (osr) {
1052 number_of_osr_compilations_++;
1053 osr_code_map_.Put(method, code_ptr);
1054 } else {
1055 Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
1056 method, method_header->GetEntryPoint());
1057 }
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +00001058 }
Nicolas Geoffraya5891e82015-11-06 14:18:27 +00001059 VLOG(jit)
Nicolas Geoffray71cd50f2016-04-14 15:00:33 +01001060 << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
David Sehr709b0702016-10-13 09:12:37 -07001061 << ArtMethod::PrettyMethod(method) << "@" << method
Nicolas Geoffraya5891e82015-11-06 14:18:27 +00001062 << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
1063 << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
1064 << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
Mingyao Yang063fc772016-08-02 11:02:54 -07001065 << reinterpret_cast<const void*>(method_header->GetEntryPoint() +
1066 method_header->GetCodeSize());
Nicolas Geoffray933330a2016-03-16 14:20:06 +00001067 histogram_code_memory_use_.AddValue(code_size);
1068 if (code_size > kCodeSizeLogThreshold) {
1069 LOG(INFO) << "JIT allocated "
1070 << PrettySize(code_size)
1071 << " for compiled code of "
David Sehr709b0702016-10-13 09:12:37 -07001072 << ArtMethod::PrettyMethod(method);
Nicolas Geoffray933330a2016-03-16 14:20:06 +00001073 }
Nicolas Geoffraya5891e82015-11-06 14:18:27 +00001074 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +01001075
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +01001076 return reinterpret_cast<uint8_t*>(method_header);
1077}
1078
1079size_t JitCodeCache::CodeCacheSize() {
1080 MutexLock mu(Thread::Current(), lock_);
Nicolas Geoffraya5891e82015-11-06 14:18:27 +00001081 return CodeCacheSizeLocked();
1082}
1083
Orion Hodsoneced6922017-06-01 10:54:28 +01001084bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
Vladimir Marko2196c652017-11-30 16:16:07 +00001085 // This function is used only for testing and only with non-native methods.
1086 CHECK(!method->IsNative());
1087
Orion Hodsoneced6922017-06-01 10:54:28 +01001088 MutexLock mu(Thread::Current(), lock_);
Orion Hodsoneced6922017-06-01 10:54:28 +01001089
Vladimir Marko2196c652017-11-30 16:16:07 +00001090 bool osr = osr_code_map_.find(method) != osr_code_map_.end();
1091 bool in_cache = RemoveMethodLocked(method, release_memory);
Orion Hodsoneced6922017-06-01 10:54:28 +01001092
1093 if (!in_cache) {
1094 return false;
1095 }
1096
Orion Hodsoneced6922017-06-01 10:54:28 +01001097 method->ClearCounter();
1098 Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
1099 method, GetQuickToInterpreterBridge());
1100 VLOG(jit)
1101 << "JIT removed (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
1102 << ArtMethod::PrettyMethod(method) << "@" << method
1103 << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
1104 << " dcache_size=" << PrettySize(DataCacheSizeLocked());
1105 return true;
1106}
1107
Vladimir Marko2196c652017-11-30 16:16:07 +00001108bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
1109 if (LIKELY(!method->IsNative())) {
1110 ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
1111 if (info != nullptr) {
1112 RemoveElement(profiling_infos_, info);
1113 }
1114 method->SetProfilingInfo(nullptr);
1115 }
1116
1117 bool in_cache = false;
Calin Juravle016fcbe22018-05-03 19:47:35 -07001118 ScopedCodeCacheWrite ccw(this);
Vladimir Marko2196c652017-11-30 16:16:07 +00001119 if (UNLIKELY(method->IsNative())) {
1120 auto it = jni_stubs_map_.find(JniStubKey(method));
1121 if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) {
1122 in_cache = true;
1123 if (it->second.GetMethods().empty()) {
1124 if (release_memory) {
Orion Hodson607624f2018-05-11 10:10:46 +01001125 FreeCodeAndData(it->second.GetCode());
Vladimir Marko2196c652017-11-30 16:16:07 +00001126 }
1127 jni_stubs_map_.erase(it);
1128 } else {
1129 it->first.UpdateShorty(it->second.GetMethods().front());
1130 }
1131 }
1132 } else {
1133 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
1134 if (it->second == method) {
1135 in_cache = true;
1136 if (release_memory) {
Orion Hodson607624f2018-05-11 10:10:46 +01001137 FreeCodeAndData(it->first);
Vladimir Marko2196c652017-11-30 16:16:07 +00001138 }
1139 it = method_code_map_.erase(it);
1140 } else {
1141 ++it;
1142 }
1143 }
1144
1145 auto osr_it = osr_code_map_.find(method);
1146 if (osr_it != osr_code_map_.end()) {
1147 osr_code_map_.erase(osr_it);
1148 }
1149 }
1150
1151 return in_cache;
1152}
1153
Alex Lightdba61482016-12-21 08:20:29 -08001154// This notifies the code cache that the given method has been redefined and that it should remove
1155// any cached information it has on the method. All threads must be suspended before calling this
1156// method. The compiled code for the method (if there is any) must not be in any threads call stack.
1157void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
1158 MutexLock mu(Thread::Current(), lock_);
Vladimir Marko2196c652017-11-30 16:16:07 +00001159 RemoveMethodLocked(method, /* release_memory */ true);
Alex Lightdba61482016-12-21 08:20:29 -08001160}
1161
1162// This invalidates old_method. Once this function returns one can no longer use old_method to
1163// execute code unless it is fixed up. This fixup will happen later in the process of installing a
1164// class redefinition.
1165// TODO We should add some info to ArtMethod to note that 'old_method' has been invalidated and
1166// shouldn't be used since it is no longer logically in the jit code cache.
1167// TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
1168void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
Vladimir Marko2196c652017-11-30 16:16:07 +00001169 MutexLock mu(Thread::Current(), lock_);
Alex Lighteee0bd42017-02-14 15:31:45 +00001170 if (old_method->IsNative()) {
Vladimir Marko2196c652017-11-30 16:16:07 +00001171 // Update methods in jni_stubs_map_.
1172 for (auto& entry : jni_stubs_map_) {
1173 JniStubData& data = entry.second;
1174 data.MoveObsoleteMethod(old_method, new_method);
1175 }
Alex Lighteee0bd42017-02-14 15:31:45 +00001176 return;
1177 }
Alex Lightdba61482016-12-21 08:20:29 -08001178 // Update ProfilingInfo to the new one and remove it from the old_method.
1179 if (old_method->GetProfilingInfo(kRuntimePointerSize) != nullptr) {
1180 DCHECK_EQ(old_method->GetProfilingInfo(kRuntimePointerSize)->GetMethod(), old_method);
1181 ProfilingInfo* info = old_method->GetProfilingInfo(kRuntimePointerSize);
1182 old_method->SetProfilingInfo(nullptr);
1183 // Since the JIT should be paused and all threads suspended by the time this is called these
1184 // checks should always pass.
1185 DCHECK(!info->IsInUseByCompiler());
1186 new_method->SetProfilingInfo(info);
Alex Light2d441b12018-06-08 15:33:21 -07001187 // Get rid of the old saved entrypoint if it is there.
1188 info->SetSavedEntryPoint(nullptr);
Alex Lightdba61482016-12-21 08:20:29 -08001189 info->method_ = new_method;
1190 }
1191 // Update method_code_map_ to point to the new method.
1192 for (auto& it : method_code_map_) {
1193 if (it.second == old_method) {
1194 it.second = new_method;
1195 }
1196 }
1197 // Update osr_code_map_ to point to the new method.
1198 auto code_map = osr_code_map_.find(old_method);
1199 if (code_map != osr_code_map_.end()) {
1200 osr_code_map_.Put(new_method, code_map->second);
1201 osr_code_map_.erase(old_method);
1202 }
1203}
1204
Nicolas Geoffraya5891e82015-11-06 14:18:27 +00001205size_t JitCodeCache::CodeCacheSizeLocked() {
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +00001206 return used_memory_for_code_;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +01001207}
1208
1209size_t JitCodeCache::DataCacheSize() {
1210 MutexLock mu(Thread::Current(), lock_);
Nicolas Geoffraya5891e82015-11-06 14:18:27 +00001211 return DataCacheSizeLocked();
1212}
1213
1214size_t JitCodeCache::DataCacheSizeLocked() {
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +00001215 return used_memory_for_data_;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001216}
1217
Nicolas Geoffrayf46501c2016-11-22 13:45:36 +00001218void JitCodeCache::ClearData(Thread* self,
1219 uint8_t* stack_map_data,
1220 uint8_t* roots_data) {
1221 DCHECK_EQ(FromStackMapToRoots(stack_map_data), roots_data);
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +00001222 MutexLock mu(self, lock_);
Nicolas Geoffrayf46501c2016-11-22 13:45:36 +00001223 FreeData(reinterpret_cast<uint8_t*>(roots_data));
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +00001224}
1225
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +00001226size_t JitCodeCache::ReserveData(Thread* self,
1227 size_t stack_map_size,
1228 size_t number_of_roots,
1229 ArtMethod* method,
1230 uint8_t** stack_map_data,
1231 uint8_t** roots_data) {
Nicolas Geoffray132d8362016-11-16 09:19:42 +00001232 size_t table_size = ComputeRootTableSize(number_of_roots);
David Srbecky8cd54542018-07-15 23:58:44 +01001233 size_t size = RoundUp(stack_map_size + table_size, sizeof(void*));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001234 uint8_t* result = nullptr;
1235
1236 {
1237 ScopedThreadSuspension sts(self, kSuspended);
1238 MutexLock mu(self, lock_);
1239 WaitForPotentialCollectionToComplete(self);
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +00001240 result = AllocateData(size);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001241 }
1242
1243 if (result == nullptr) {
1244 // Retry.
1245 GarbageCollectCache(self);
1246 ScopedThreadSuspension sts(self, kSuspended);
1247 MutexLock mu(self, lock_);
1248 WaitForPotentialCollectionToComplete(self);
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +00001249 result = AllocateData(size);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001250 }
1251
Nicolas Geoffray933330a2016-03-16 14:20:06 +00001252 MutexLock mu(self, lock_);
1253 histogram_stack_map_memory_use_.AddValue(size);
1254 if (size > kStackMapSizeLogThreshold) {
1255 LOG(INFO) << "JIT allocated "
1256 << PrettySize(size)
1257 << " for stack maps of "
David Sehr709b0702016-10-13 09:12:37 -07001258 << ArtMethod::PrettyMethod(method);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001259 }
Nicolas Geoffrayf4b94422016-12-05 00:10:09 +00001260 if (result != nullptr) {
1261 *roots_data = result;
1262 *stack_map_data = result + table_size;
1263 FillRootTableLength(*roots_data, number_of_roots);
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +00001264 return size;
Nicolas Geoffrayf4b94422016-12-05 00:10:09 +00001265 } else {
1266 *roots_data = nullptr;
1267 *stack_map_data = nullptr;
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +00001268 return 0;
Nicolas Geoffrayf4b94422016-12-05 00:10:09 +00001269 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001270}
1271
Roland Levillainbbc6e7e2018-08-24 16:58:47 +01001272class MarkCodeVisitor final : public StackVisitor {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001273 public:
1274 MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
1275 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
1276 code_cache_(code_cache_in),
1277 bitmap_(code_cache_->GetLiveBitmap()) {}
1278
Roland Levillainbbc6e7e2018-08-24 16:58:47 +01001279 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001280 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
1281 if (method_header == nullptr) {
1282 return true;
1283 }
1284 const void* code = method_header->GetCode();
1285 if (code_cache_->ContainsPc(code)) {
1286 // Use the atomic set version, as multiple threads are executing this code.
1287 bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
1288 }
1289 return true;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001290 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001291
1292 private:
1293 JitCodeCache* const code_cache_;
1294 CodeCacheBitmap* const bitmap_;
1295};
1296
Roland Levillainbbc6e7e2018-08-24 16:58:47 +01001297class MarkCodeClosure final : public Closure {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001298 public:
1299 MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
1300 : code_cache_(code_cache), barrier_(barrier) {}
1301
Roland Levillainbbc6e7e2018-08-24 16:58:47 +01001302 void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001303 ScopedTrace trace(__PRETTY_FUNCTION__);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001304 DCHECK(thread == Thread::Current() || thread->IsSuspended());
1305 MarkCodeVisitor visitor(thread, code_cache_);
1306 visitor.WalkStack();
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +00001307 if (kIsDebugBuild) {
1308 // The stack walking code queries the side instrumentation stack if it
1309 // sees an instrumentation exit pc, so the JIT code of methods in that stack
1310 // must have been seen. We sanity check this below.
1311 for (const instrumentation::InstrumentationStackFrame& frame
1312 : *thread->GetInstrumentationStack()) {
1313 // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in
1314 // its stack frame, it is not the method owning return_pc_. We just pass null to
1315 // LookupMethodHeader: the method is only checked against in debug builds.
1316 OatQuickMethodHeader* method_header =
Vladimir Marko2196c652017-11-30 16:16:07 +00001317 code_cache_->LookupMethodHeader(frame.return_pc_, /* method */ nullptr);
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +00001318 if (method_header != nullptr) {
1319 const void* code = method_header->GetCode();
1320 CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
1321 }
1322 }
1323 }
Mathieu Chartier10d25082015-10-28 18:36:09 -07001324 barrier_->Pass(Thread::Current());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001325 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001326
1327 private:
1328 JitCodeCache* const code_cache_;
1329 Barrier* const barrier_;
1330};
1331
Nicolas Geoffray0a3be162015-11-18 11:15:22 +00001332void JitCodeCache::NotifyCollectionDone(Thread* self) {
1333 collection_in_progress_ = false;
1334 lock_cond_.Broadcast(self);
1335}
1336
1337void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
1338 size_t per_space_footprint = new_footprint / 2;
Orion Hodsondbd05fe2017-08-10 11:41:35 +01001339 DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
Nicolas Geoffray0a3be162015-11-18 11:15:22 +00001340 DCHECK_EQ(per_space_footprint * 2, new_footprint);
1341 mspace_set_footprint_limit(data_mspace_, per_space_footprint);
Orion Hodson1d3fd082018-09-28 09:38:35 +01001342 if (HasCodeMapping()) {
Calin Juravle016fcbe22018-05-03 19:47:35 -07001343 ScopedCodeCacheWrite scc(this);
Orion Hodson1d3fd082018-09-28 09:38:35 +01001344 mspace_set_footprint_limit(exec_mspace_, per_space_footprint);
Nicolas Geoffray0a3be162015-11-18 11:15:22 +00001345 }
1346}
1347
1348bool JitCodeCache::IncreaseCodeCacheCapacity() {
1349 if (current_capacity_ == max_capacity_) {
1350 return false;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001351 }
1352
Nicolas Geoffray0a3be162015-11-18 11:15:22 +00001353 // Double the capacity if we're below 1MB, or increase it by 1MB if
1354 // we're above.
1355 if (current_capacity_ < 1 * MB) {
1356 current_capacity_ *= 2;
1357 } else {
1358 current_capacity_ += 1 * MB;
1359 }
1360 if (current_capacity_ > max_capacity_) {
1361 current_capacity_ = max_capacity_;
1362 }
1363
Nicolas Geoffray646d6382017-08-09 10:50:00 +01001364 VLOG(jit) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
Nicolas Geoffray0a3be162015-11-18 11:15:22 +00001365
1366 SetFootprintLimit(current_capacity_);
1367
1368 return true;
1369}
1370
Nicolas Geoffray8d372502016-02-23 13:56:43 +00001371void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
1372 Barrier barrier(0);
1373 size_t threads_running_checkpoint = 0;
1374 MarkCodeClosure closure(this, &barrier);
1375 threads_running_checkpoint = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1376 // Now that we have run our checkpoint, move to a suspended state and wait
1377 // for other threads to run the checkpoint.
1378 ScopedThreadSuspension sts(self, kSuspended);
1379 if (threads_running_checkpoint != 0) {
1380 barrier.Increment(self, threads_running_checkpoint);
1381 }
1382}
1383
Nicolas Geoffray35122442016-03-02 12:05:30 +00001384bool JitCodeCache::ShouldDoFullCollection() {
1385 if (current_capacity_ == max_capacity_) {
1386 // Always do a full collection when the code cache is full.
1387 return true;
1388 } else if (current_capacity_ < kReservedCapacity) {
1389 // Always do partial collection when the code cache size is below the reserved
1390 // capacity.
1391 return false;
1392 } else if (last_collection_increased_code_cache_) {
1393 // This time do a full collection.
1394 return true;
1395 } else {
1396 // This time do a partial collection.
1397 return false;
Nicolas Geoffray8d372502016-02-23 13:56:43 +00001398 }
1399}
1400
Nicolas Geoffray0a3be162015-11-18 11:15:22 +00001401void JitCodeCache::GarbageCollectCache(Thread* self) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001402 ScopedTrace trace(__FUNCTION__);
Nicolas Geoffray8d372502016-02-23 13:56:43 +00001403 if (!garbage_collect_code_) {
1404 MutexLock mu(self, lock_);
1405 IncreaseCodeCacheCapacity();
1406 return;
1407 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001408
Nicolas Geoffraya5891e82015-11-06 14:18:27 +00001409 // Wait for an existing collection, or let everyone know we are starting one.
1410 {
1411 ScopedThreadSuspension sts(self, kSuspended);
1412 MutexLock mu(self, lock_);
1413 if (WaitForPotentialCollectionToComplete(self)) {
1414 return;
1415 } else {
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00001416 number_of_collections_++;
Nicolas Geoffray0a3be162015-11-18 11:15:22 +00001417 live_bitmap_.reset(CodeCacheBitmap::Create(
1418 "code-cache-bitmap",
Orion Hodson1d3fd082018-09-28 09:38:35 +01001419 reinterpret_cast<uintptr_t>(exec_pages_.Begin()),
1420 reinterpret_cast<uintptr_t>(exec_pages_.Begin() + current_capacity_ / 2)));
Nicolas Geoffray8d372502016-02-23 13:56:43 +00001421 collection_in_progress_ = true;
1422 }
1423 }
1424
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00001425 TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit));
Nicolas Geoffray8d372502016-02-23 13:56:43 +00001426 {
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00001427 TimingLogger::ScopedTiming st("Code cache collection", &logger);
Nicolas Geoffray0a3be162015-11-18 11:15:22 +00001428
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00001429 bool do_full_collection = false;
1430 {
1431 MutexLock mu(self, lock_);
1432 do_full_collection = ShouldDoFullCollection();
Nicolas Geoffraya96917a2016-03-01 22:18:02 +00001433 }
1434
Nicolas Geoffray646d6382017-08-09 10:50:00 +01001435 VLOG(jit) << "Do "
1436 << (do_full_collection ? "full" : "partial")
1437 << " code cache collection, code="
1438 << PrettySize(CodeCacheSize())
1439 << ", data=" << PrettySize(DataCacheSize());
Nicolas Geoffray35122442016-03-02 12:05:30 +00001440
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00001441 DoCollection(self, /* collect_profiling_info */ do_full_collection);
1442
Nicolas Geoffray646d6382017-08-09 10:50:00 +01001443 VLOG(jit) << "After code cache collection, code="
1444 << PrettySize(CodeCacheSize())
1445 << ", data=" << PrettySize(DataCacheSize());
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00001446
1447 {
1448 MutexLock mu(self, lock_);
1449
1450 // Increase the code cache only when we do partial collections.
1451 // TODO: base this strategy on how full the code cache is?
1452 if (do_full_collection) {
1453 last_collection_increased_code_cache_ = false;
1454 } else {
1455 last_collection_increased_code_cache_ = true;
1456 IncreaseCodeCacheCapacity();
Nicolas Geoffray35122442016-03-02 12:05:30 +00001457 }
1458
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00001459 bool next_collection_will_be_full = ShouldDoFullCollection();
1460
1461 // Start polling the liveness of compiled code to prepare for the next full collection.
Nicolas Geoffray480d5102016-04-18 12:09:30 +01001462 if (next_collection_will_be_full) {
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00001463 // Save the entry point of methods we have compiled, and update the entry
1464 // point of those methods to the interpreter. If the method is invoked, the
1465 // interpreter will update its entry point to the compiled code and call it.
1466 for (ProfilingInfo* info : profiling_infos_) {
1467 const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
1468 if (ContainsPc(entry_point)) {
1469 info->SetSavedEntryPoint(entry_point);
Vladimir Marko2196c652017-11-30 16:16:07 +00001470 // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring
Nicolas Geoffray3b1a7f42017-02-22 10:21:00 +00001471 // class of the method. We may be concurrently running a GC which makes accessing
1472 // the class unsafe. We know it is OK to bypass the instrumentation as we've just
1473 // checked that the current entry point is JIT compiled code.
1474 info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00001475 }
1476 }
1477
1478 DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
Vladimir Marko2196c652017-11-30 16:16:07 +00001479
1480 // Change entry points of native methods back to the GenericJNI entrypoint.
1481 for (const auto& entry : jni_stubs_map_) {
1482 const JniStubData& data = entry.second;
1483 if (!data.IsCompiled()) {
1484 continue;
1485 }
1486 // Make sure a single invocation of the GenericJNI trampoline tries to recompile.
1487 uint16_t new_counter = Runtime::Current()->GetJit()->HotMethodThreshold() - 1u;
1488 const OatQuickMethodHeader* method_header =
1489 OatQuickMethodHeader::FromCodePointer(data.GetCode());
1490 for (ArtMethod* method : data.GetMethods()) {
1491 if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) {
1492 // Don't call Instrumentation::UpdateMethodsCode(), same as for normal methods above.
1493 method->SetCounter(new_counter);
1494 method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
1495 }
1496 }
1497 }
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00001498 }
1499 live_bitmap_.reset(nullptr);
1500 NotifyCollectionDone(self);
Nicolas Geoffray35122442016-03-02 12:05:30 +00001501 }
Nicolas Geoffray35122442016-03-02 12:05:30 +00001502 }
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00001503 Runtime::Current()->GetJit()->AddTimingLogger(logger);
Nicolas Geoffray35122442016-03-02 12:05:30 +00001504}
1505
Nicolas Geoffray9abb2972016-03-04 14:32:59 +00001506void JitCodeCache::RemoveUnmarkedCode(Thread* self) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001507 ScopedTrace trace(__FUNCTION__);
Mingyao Yang063fc772016-08-02 11:02:54 -07001508 std::unordered_set<OatQuickMethodHeader*> method_headers;
1509 {
1510 MutexLock mu(self, lock_);
Calin Juravle016fcbe22018-05-03 19:47:35 -07001511 ScopedCodeCacheWrite scc(this);
Mingyao Yang063fc772016-08-02 11:02:54 -07001512 // Iterate over all compiled code and remove entries that are not marked.
Vladimir Marko2196c652017-11-30 16:16:07 +00001513 for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
1514 JniStubData* data = &it->second;
1515 if (!data->IsCompiled() || GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
1516 ++it;
1517 } else {
1518 method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode()));
1519 it = jni_stubs_map_.erase(it);
1520 }
1521 }
Mingyao Yang063fc772016-08-02 11:02:54 -07001522 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
1523 const void* code_ptr = it->first;
1524 uintptr_t allocation = FromCodeToAllocation(code_ptr);
1525 if (GetLiveBitmap()->Test(allocation)) {
1526 ++it;
1527 } else {
Alex Light2d441b12018-06-08 15:33:21 -07001528 OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1529 method_headers.insert(header);
Mingyao Yang063fc772016-08-02 11:02:54 -07001530 it = method_code_map_.erase(it);
1531 }
Nicolas Geoffray35122442016-03-02 12:05:30 +00001532 }
1533 }
Mingyao Yang063fc772016-08-02 11:02:54 -07001534 FreeAllMethodHeaders(method_headers);
Nicolas Geoffray35122442016-03-02 12:05:30 +00001535}
1536
1537void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001538 ScopedTrace trace(__FUNCTION__);
Nicolas Geoffray35122442016-03-02 12:05:30 +00001539 {
1540 MutexLock mu(self, lock_);
1541 if (collect_profiling_info) {
1542 // Clear the profiling info of methods that do not have compiled code as entrypoint.
1543 // Also remove the saved entry point from the ProfilingInfo objects.
1544 for (ProfilingInfo* info : profiling_infos_) {
1545 const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +00001546 if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) {
Nicolas Geoffray35122442016-03-02 12:05:30 +00001547 info->GetMethod()->SetProfilingInfo(nullptr);
1548 }
Nicolas Geoffrayb9a639d2016-03-22 11:25:20 +00001549
1550 if (info->GetSavedEntryPoint() != nullptr) {
1551 info->SetSavedEntryPoint(nullptr);
1552 // We are going to move this method back to interpreter. Clear the counter now to
Mathieu Chartierf044c222017-05-31 15:27:54 -07001553 // give it a chance to be hot again.
1554 ClearMethodCounter(info->GetMethod(), /*was_warm*/ true);
Nicolas Geoffrayb9a639d2016-03-22 11:25:20 +00001555 }
Nicolas Geoffray35122442016-03-02 12:05:30 +00001556 }
1557 } else if (kIsDebugBuild) {
1558 // Sanity check that the profiling infos do not have a dangling entry point.
1559 for (ProfilingInfo* info : profiling_infos_) {
1560 DCHECK(info->GetSavedEntryPoint() == nullptr);
Nicolas Geoffray73be1e82015-09-17 15:22:56 +01001561 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +00001562 }
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +00001563
Nicolas Geoffray9abb2972016-03-04 14:32:59 +00001564 // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not
1565 // an entry point is either:
1566 // - an osr compiled code, that will be removed if not in a thread call stack.
1567 // - discarded compiled code, that will be removed if not in a thread call stack.
Vladimir Marko2196c652017-11-30 16:16:07 +00001568 for (const auto& entry : jni_stubs_map_) {
1569 const JniStubData& data = entry.second;
1570 const void* code_ptr = data.GetCode();
1571 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1572 for (ArtMethod* method : data.GetMethods()) {
1573 if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
1574 GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
1575 break;
1576 }
1577 }
1578 }
Nicolas Geoffray9abb2972016-03-04 14:32:59 +00001579 for (const auto& it : method_code_map_) {
1580 ArtMethod* method = it.second;
1581 const void* code_ptr = it.first;
1582 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1583 if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
1584 GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
1585 }
1586 }
1587
Nicolas Geoffrayd9994f02016-02-11 17:35:55 +00001588 // Empty osr method map, as osr compiled code will be deleted (except the ones
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +00001589 // on thread stacks).
1590 osr_code_map_.clear();
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001591 }
1592
1593 // Run a checkpoint on all threads to mark the JIT compiled code they are running.
Nicolas Geoffray8d372502016-02-23 13:56:43 +00001594 MarkCompiledCodeOnThreadStacks(self);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001595
Nicolas Geoffray9abb2972016-03-04 14:32:59 +00001596 // At this point, mutator threads are still running, and entrypoints of methods can
1597 // change. We do know they cannot change to a code cache entry that is not marked,
1598 // therefore we can safely remove those entries.
1599 RemoveUnmarkedCode(self);
Nicolas Geoffraya96917a2016-03-01 22:18:02 +00001600
Nicolas Geoffray35122442016-03-02 12:05:30 +00001601 if (collect_profiling_info) {
1602 MutexLock mu(self, lock_);
1603 // Free all profiling infos of methods not compiled nor being compiled.
Nicolas Geoffray73be1e82015-09-17 15:22:56 +01001604 auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +00001605 [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
Nicolas Geoffray35122442016-03-02 12:05:30 +00001606 const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
Nicolas Geoffray511e41b2016-03-02 17:09:35 +00001607 // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope
1608 // that the compiled code would not get revived. As mutator threads run concurrently,
1609 // they may have revived the compiled code, and now we are in the situation where
1610 // a method has compiled code but no ProfilingInfo.
1611 // We make sure compiled methods have a ProfilingInfo object. It is needed for
1612 // code cache collection.
Andreas Gampe542451c2016-07-26 09:02:02 -07001613 if (ContainsPc(ptr) &&
1614 info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
Nicolas Geoffray35122442016-03-02 12:05:30 +00001615 info->GetMethod()->SetProfilingInfo(info);
Andreas Gampe542451c2016-07-26 09:02:02 -07001616 } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) {
Nicolas Geoffray35122442016-03-02 12:05:30 +00001617 // No need for this ProfilingInfo object anymore.
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +00001618 FreeData(reinterpret_cast<uint8_t*>(info));
Nicolas Geoffray73be1e82015-09-17 15:22:56 +01001619 return true;
1620 }
1621 return false;
1622 });
1623 profiling_infos_.erase(profiling_kept_end, profiling_infos_.end());
Nicolas Geoffray35122442016-03-02 12:05:30 +00001624 DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001625 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001626}
1627
Nicolas Geoffray35122442016-03-02 12:05:30 +00001628bool JitCodeCache::CheckLiveCompiledCodeHasProfilingInfo() {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001629 ScopedTrace trace(__FUNCTION__);
Nicolas Geoffray35122442016-03-02 12:05:30 +00001630 // Check that methods we have compiled do have a ProfilingInfo object. We would
1631 // have memory leaks of compiled code otherwise.
1632 for (const auto& it : method_code_map_) {
1633 ArtMethod* method = it.second;
Andreas Gampe542451c2016-07-26 09:02:02 -07001634 if (method->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
Nicolas Geoffray35122442016-03-02 12:05:30 +00001635 const void* code_ptr = it.first;
1636 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1637 if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
1638 // If the code is not dead, then we have a problem. Note that this can even
1639 // happen just after a collection, as mutator threads are running in parallel
1640 // and could deoptimize an existing compiled code.
1641 return false;
1642 }
1643 }
1644 }
1645 return true;
1646}
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001647
1648OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
Vladimir Marko33bff252017-11-01 14:35:42 +00001649 static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
1650 if (kRuntimeISA == InstructionSet::kArm) {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001651 // On Thumb-2, the pc is offset by one.
1652 --pc;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001653 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001654 if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
1655 return nullptr;
1656 }
1657
Vladimir Marko2196c652017-11-30 16:16:07 +00001658 if (!kIsDebugBuild) {
1659 // Called with null `method` only from MarkCodeClosure::Run() in debug build.
1660 CHECK(method != nullptr);
Vladimir Marko47d31852017-11-28 18:36:12 +00001661 }
Vladimir Markoe7441632017-11-29 13:00:56 +00001662
Vladimir Marko2196c652017-11-30 16:16:07 +00001663 MutexLock mu(Thread::Current(), lock_);
1664 OatQuickMethodHeader* method_header = nullptr;
1665 ArtMethod* found_method = nullptr; // Only for DCHECK(), not for JNI stubs.
1666 if (method != nullptr && UNLIKELY(method->IsNative())) {
1667 auto it = jni_stubs_map_.find(JniStubKey(method));
1668 if (it == jni_stubs_map_.end() || !ContainsElement(it->second.GetMethods(), method)) {
1669 return nullptr;
1670 }
1671 const void* code_ptr = it->second.GetCode();
1672 method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1673 if (!method_header->Contains(pc)) {
1674 return nullptr;
1675 }
1676 } else {
1677 auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
1678 if (it != method_code_map_.begin()) {
1679 --it;
1680 const void* code_ptr = it->first;
1681 if (OatQuickMethodHeader::FromCodePointer(code_ptr)->Contains(pc)) {
1682 method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
1683 found_method = it->second;
1684 }
1685 }
1686 if (method_header == nullptr && method == nullptr) {
1687 // Scan all compiled JNI stubs as well. This slow search is used only
1688 // for checks in debug build, for release builds the `method` is not null.
1689 for (auto&& entry : jni_stubs_map_) {
1690 const JniStubData& data = entry.second;
1691 if (data.IsCompiled() &&
1692 OatQuickMethodHeader::FromCodePointer(data.GetCode())->Contains(pc)) {
1693 method_header = OatQuickMethodHeader::FromCodePointer(data.GetCode());
1694 }
1695 }
1696 }
1697 if (method_header == nullptr) {
1698 return nullptr;
1699 }
Nicolas Geoffray056d7752017-11-30 09:12:13 +00001700 }
Vladimir Marko2196c652017-11-30 16:16:07 +00001701
1702 if (kIsDebugBuild && method != nullptr && !method->IsNative()) {
Alex Light1ebe4fe2017-01-30 14:57:11 -08001703 // When we are walking the stack to redefine classes and creating obsolete methods it is
1704 // possible that we might have updated the method_code_map by making this method obsolete in a
1705 // previous frame. Therefore we should just check that the non-obsolete version of this method
1706 // is the one we expect. We change to the non-obsolete versions in the error message since the
1707 // obsolete version of the method might not be fully initialized yet. This situation can only
1708 // occur when we are in the process of allocating and setting up obsolete methods. Otherwise
Andreas Gampe06c42a52017-07-26 14:17:14 -07001709 // method and it->second should be identical. (See openjdkjvmti/ti_redefine.cc for more
Alex Light1ebe4fe2017-01-30 14:57:11 -08001710 // information.)
Vladimir Marko2196c652017-11-30 16:16:07 +00001711 DCHECK_EQ(found_method->GetNonObsoleteMethod(), method->GetNonObsoleteMethod())
Alex Light1ebe4fe2017-01-30 14:57:11 -08001712 << ArtMethod::PrettyMethod(method->GetNonObsoleteMethod()) << " "
Vladimir Marko2196c652017-11-30 16:16:07 +00001713 << ArtMethod::PrettyMethod(found_method->GetNonObsoleteMethod()) << " "
David Sehr709b0702016-10-13 09:12:37 -07001714 << std::hex << pc;
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +00001715 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +01001716 return method_header;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001717}
1718
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +00001719OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
1720 MutexLock mu(Thread::Current(), lock_);
1721 auto it = osr_code_map_.find(method);
1722 if (it == osr_code_map_.end()) {
1723 return nullptr;
1724 }
1725 return OatQuickMethodHeader::FromCodePointer(it->second);
1726}
1727
Nicolas Geoffray26705e22015-10-28 12:50:11 +00001728ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
1729 ArtMethod* method,
1730 const std::vector<uint32_t>& entries,
Nicolas Geoffray1e7da9b2016-03-01 14:11:40 +00001731 bool retry_allocation)
1732 // No thread safety analysis as we are using TryLock/Unlock explicitly.
1733 NO_THREAD_SAFETY_ANALYSIS {
1734 ProfilingInfo* info = nullptr;
1735 if (!retry_allocation) {
1736 // If we are allocating for the interpreter, just try to lock, to avoid
1737 // lock contention with the JIT.
1738 if (lock_.ExclusiveTryLock(self)) {
1739 info = AddProfilingInfoInternal(self, method, entries);
1740 lock_.ExclusiveUnlock(self);
1741 }
1742 } else {
1743 {
1744 MutexLock mu(self, lock_);
1745 info = AddProfilingInfoInternal(self, method, entries);
1746 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +00001747
Nicolas Geoffray1e7da9b2016-03-01 14:11:40 +00001748 if (info == nullptr) {
1749 GarbageCollectCache(self);
1750 MutexLock mu(self, lock_);
1751 info = AddProfilingInfoInternal(self, method, entries);
1752 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +00001753 }
1754 return info;
1755}
1756
Nicolas Geoffray1e7da9b2016-03-01 14:11:40 +00001757ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED,
Nicolas Geoffray26705e22015-10-28 12:50:11 +00001758 ArtMethod* method,
1759 const std::vector<uint32_t>& entries) {
1760 size_t profile_info_size = RoundUp(
Nicolas Geoffray73be1e82015-09-17 15:22:56 +01001761 sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(),
Nicolas Geoffray26705e22015-10-28 12:50:11 +00001762 sizeof(void*));
Nicolas Geoffray26705e22015-10-28 12:50:11 +00001763
1764 // Check whether some other thread has concurrently created it.
Andreas Gampe542451c2016-07-26 09:02:02 -07001765 ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
Nicolas Geoffray26705e22015-10-28 12:50:11 +00001766 if (info != nullptr) {
1767 return info;
1768 }
1769
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +00001770 uint8_t* data = AllocateData(profile_info_size);
Nicolas Geoffray26705e22015-10-28 12:50:11 +00001771 if (data == nullptr) {
1772 return nullptr;
1773 }
1774 info = new (data) ProfilingInfo(method, entries);
Nicolas Geoffray07f35642016-01-04 16:06:51 +00001775
1776 // Make sure other threads see the data in the profiling info object before the
1777 // store in the ArtMethod's ProfilingInfo pointer.
Orion Hodson27b96762018-03-13 16:06:57 +00001778 std::atomic_thread_fence(std::memory_order_release);
Nicolas Geoffray07f35642016-01-04 16:06:51 +00001779
Nicolas Geoffray26705e22015-10-28 12:50:11 +00001780 method->SetProfilingInfo(info);
1781 profiling_infos_.push_back(info);
Nicolas Geoffray933330a2016-03-16 14:20:06 +00001782 histogram_profiling_info_memory_use_.AddValue(profile_info_size);
Nicolas Geoffray26705e22015-10-28 12:50:11 +00001783 return info;
1784}
1785
Nicolas Geoffray0a3be162015-11-18 11:15:22 +00001786// NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
1787// is already held.
1788void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
Orion Hodson1d3fd082018-09-28 09:38:35 +01001789 if (mspace == exec_mspace_) {
1790 DCHECK(exec_mspace_ != nullptr);
1791 const MemMap* const code_pages = GetUpdatableCodeMapping();
1792 void* result = code_pages->Begin() + exec_end_;
1793 exec_end_ += increment;
1794 return result;
Nicolas Geoffray0a3be162015-11-18 11:15:22 +00001795 } else {
1796 DCHECK_EQ(data_mspace_, mspace);
Orion Hodson1d3fd082018-09-28 09:38:35 +01001797 void* result = data_pages_.Begin() + data_end_;
Nicolas Geoffray0a3be162015-11-18 11:15:22 +00001798 data_end_ += increment;
Orion Hodson1d3fd082018-09-28 09:38:35 +01001799 return result;
Nicolas Geoffray0a3be162015-11-18 11:15:22 +00001800 }
1801}
1802
Calin Juravle99629622016-04-19 16:33:46 +01001803void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
Calin Juravle940eb0c2017-01-30 19:30:44 -08001804 std::vector<ProfileMethodInfo>& methods) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001805 ScopedTrace trace(__FUNCTION__);
Calin Juravle31f2c152015-10-23 17:56:15 +01001806 MutexLock mu(Thread::Current(), lock_);
Calin Juravlea39fd982017-05-18 10:15:52 -07001807 uint16_t jit_compile_threshold = Runtime::Current()->GetJITOptions()->GetCompileThreshold();
Calin Juravle99629622016-04-19 16:33:46 +01001808 for (const ProfilingInfo* info : profiling_infos_) {
1809 ArtMethod* method = info->GetMethod();
1810 const DexFile* dex_file = method->GetDexFile();
Mathieu Chartier79c87da2017-10-10 11:54:29 -07001811 const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
1812 if (!ContainsElement(dex_base_locations, base_location)) {
Calin Juravle940eb0c2017-01-30 19:30:44 -08001813 // Skip dex files which are not profiled.
1814 continue;
Calin Juravle31f2c152015-10-23 17:56:15 +01001815 }
Calin Juravle940eb0c2017-01-30 19:30:44 -08001816 std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches;
Calin Juravlea39fd982017-05-18 10:15:52 -07001817
1818 // If the method didn't reach the compilation threshold don't save the inline caches.
1819 // They might be incomplete and cause unnecessary deoptimizations.
1820 // If the inline cache is empty the compiler will generate a regular invoke virtual/interface.
1821 if (method->GetCounter() < jit_compile_threshold) {
1822 methods.emplace_back(/*ProfileMethodInfo*/
Mathieu Chartierbbe3a5e2017-06-13 16:36:17 -07001823 MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
Calin Juravlea39fd982017-05-18 10:15:52 -07001824 continue;
1825 }
1826
Calin Juravle940eb0c2017-01-30 19:30:44 -08001827 for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
Mathieu Chartierdbddc222017-05-24 12:04:13 -07001828 std::vector<TypeReference> profile_classes;
Calin Juravle940eb0c2017-01-30 19:30:44 -08001829 const InlineCache& cache = info->cache_[i];
Calin Juravle13439f02017-02-21 01:17:21 -08001830 ArtMethod* caller = info->GetMethod();
Calin Juravle589e71e2017-03-03 16:05:05 -08001831 bool is_missing_types = false;
Calin Juravle940eb0c2017-01-30 19:30:44 -08001832 for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) {
1833 mirror::Class* cls = cache.classes_[k].Read();
1834 if (cls == nullptr) {
1835 break;
1836 }
Calin Juravle4ca70a32017-02-21 16:22:24 -08001837
Calin Juravle13439f02017-02-21 01:17:21 -08001838 // Check if the receiver is in the boot class path or if it's in the
1839 // same class loader as the caller. If not, skip it, as there is not
1840 // much we can do during AOT.
1841 if (!cls->IsBootStrapClassLoaded() &&
1842 caller->GetClassLoader() != cls->GetClassLoader()) {
1843 is_missing_types = true;
1844 continue;
1845 }
1846
Calin Juravle4ca70a32017-02-21 16:22:24 -08001847 const DexFile* class_dex_file = nullptr;
1848 dex::TypeIndex type_index;
1849
1850 if (cls->GetDexCache() == nullptr) {
1851 DCHECK(cls->IsArrayClass()) << cls->PrettyClass();
Calin Juravlee21806f2017-02-22 11:49:43 -08001852 // Make a best effort to find the type index in the method's dex file.
1853 // We could search all open dex files but that might turn expensive
1854 // and probably not worth it.
Calin Juravle4ca70a32017-02-21 16:22:24 -08001855 class_dex_file = dex_file;
1856 type_index = cls->FindTypeIndexInOtherDexFile(*dex_file);
1857 } else {
1858 class_dex_file = &(cls->GetDexFile());
1859 type_index = cls->GetDexTypeIndex();
1860 }
1861 if (!type_index.IsValid()) {
1862 // Could be a proxy class or an array for which we couldn't find the type index.
Calin Juravle589e71e2017-03-03 16:05:05 -08001863 is_missing_types = true;
Calin Juravle4ca70a32017-02-21 16:22:24 -08001864 continue;
1865 }
Mathieu Chartier79c87da2017-10-10 11:54:29 -07001866 if (ContainsElement(dex_base_locations,
1867 DexFileLoader::GetBaseLocation(class_dex_file->GetLocation()))) {
Calin Juravle940eb0c2017-01-30 19:30:44 -08001868 // Only consider classes from the same apk (including multidex).
1869 profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/
Calin Juravle4ca70a32017-02-21 16:22:24 -08001870 class_dex_file, type_index);
Calin Juravle589e71e2017-03-03 16:05:05 -08001871 } else {
1872 is_missing_types = true;
Calin Juravle940eb0c2017-01-30 19:30:44 -08001873 }
1874 }
1875 if (!profile_classes.empty()) {
1876 inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/
Calin Juravle589e71e2017-03-03 16:05:05 -08001877 cache.dex_pc_, is_missing_types, profile_classes);
Calin Juravle940eb0c2017-01-30 19:30:44 -08001878 }
1879 }
1880 methods.emplace_back(/*ProfileMethodInfo*/
Mathieu Chartierbbe3a5e2017-06-13 16:36:17 -07001881 MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
Calin Juravle31f2c152015-10-23 17:56:15 +01001882 }
1883}
1884
Nicolas Geoffray71cd50f2016-04-14 15:00:33 +01001885bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
1886 MutexLock mu(Thread::Current(), lock_);
1887 return osr_code_map_.find(method) != osr_code_map_.end();
1888}
1889
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +00001890bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr) {
1891 if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
Nicolas Geoffray73be1e82015-09-17 15:22:56 +01001892 return false;
1893 }
Nicolas Geoffraya42363f2015-12-17 14:57:09 +00001894
Nicolas Geoffraya42363f2015-12-17 14:57:09 +00001895 MutexLock mu(self, lock_);
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +00001896 if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) {
1897 return false;
1898 }
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00001899
Vladimir Marko2196c652017-11-30 16:16:07 +00001900 if (UNLIKELY(method->IsNative())) {
1901 JniStubKey key(method);
1902 auto it = jni_stubs_map_.find(key);
1903 bool new_compilation = false;
1904 if (it == jni_stubs_map_.end()) {
1905 // Create a new entry to mark the stub as being compiled.
1906 it = jni_stubs_map_.Put(key, JniStubData{});
1907 new_compilation = true;
1908 }
1909 JniStubData* data = &it->second;
1910 data->AddMethod(method);
1911 if (data->IsCompiled()) {
1912 OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(data->GetCode());
1913 const void* entrypoint = method_header->GetEntryPoint();
1914 // Update also entrypoints of other methods held by the JniStubData.
1915 // We could simply update the entrypoint of `method` but if the last JIT GC has
1916 // changed these entrypoints to GenericJNI in preparation for a full GC, we may
1917 // as well change them back as this stub shall not be collected anyway and this
1918 // can avoid a few expensive GenericJNI calls.
1919 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
1920 for (ArtMethod* m : data->GetMethods()) {
Nicolas Geoffraya6e0e7d2018-01-26 13:16:50 +00001921 // Call the dedicated method instead of the more generic UpdateMethodsCode, because
1922 // `m` might be in the process of being deleted.
1923 instrumentation->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
Vladimir Marko2196c652017-11-30 16:16:07 +00001924 }
1925 if (collection_in_progress_) {
1926 GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
1927 }
1928 }
1929 return new_compilation;
1930 } else {
1931 ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
1932 if (info == nullptr) {
1933 VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
1934 // Because the counter is not atomic, there are some rare cases where we may not hit the
1935 // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
1936 ClearMethodCounter(method, /*was_warm*/ false);
1937 return false;
1938 }
Nicolas Geoffray056d7752017-11-30 09:12:13 +00001939
Vladimir Marko2196c652017-11-30 16:16:07 +00001940 if (info->IsMethodBeingCompiled(osr)) {
1941 return false;
1942 }
Nicolas Geoffray056d7752017-11-30 09:12:13 +00001943
Vladimir Marko2196c652017-11-30 16:16:07 +00001944 info->SetIsMethodBeingCompiled(true, osr);
1945 return true;
1946 }
Nicolas Geoffray73be1e82015-09-17 15:22:56 +01001947}
1948
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +00001949ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +00001950 MutexLock mu(self, lock_);
Andreas Gampe542451c2016-07-26 09:02:02 -07001951 ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +00001952 if (info != nullptr) {
Nicolas Geoffrayf6d46682017-02-28 17:41:45 +00001953 if (!info->IncrementInlineUse()) {
1954 // Overflow of inlining uses, just bail.
1955 return nullptr;
1956 }
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +00001957 }
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +00001958 return info;
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +00001959}
1960
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +00001961void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +00001962 MutexLock mu(self, lock_);
Andreas Gampe542451c2016-07-26 09:02:02 -07001963 ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +00001964 DCHECK(info != nullptr);
1965 info->DecrementInlineUse();
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +00001966}
1967
Vladimir Marko2196c652017-11-30 16:16:07 +00001968void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr) {
1969 DCHECK_EQ(Thread::Current(), self);
1970 MutexLock mu(self, lock_);
1971 if (UNLIKELY(method->IsNative())) {
1972 auto it = jni_stubs_map_.find(JniStubKey(method));
1973 DCHECK(it != jni_stubs_map_.end());
1974 JniStubData* data = &it->second;
1975 DCHECK(ContainsElement(data->GetMethods(), method));
1976 if (UNLIKELY(!data->IsCompiled())) {
1977 // Failed to compile; the JNI compiler never fails, but the cache may be full.
1978 jni_stubs_map_.erase(it); // Remove the entry added in NotifyCompilationOf().
1979 } // else CommitCodeInternal() updated entrypoints of all methods in the JniStubData.
1980 } else {
1981 ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
1982 DCHECK(info->IsMethodBeingCompiled(osr));
1983 info->SetIsMethodBeingCompiled(false, osr);
1984 }
Nicolas Geoffray73be1e82015-09-17 15:22:56 +01001985}
1986
Nicolas Geoffraya25dce92016-01-12 16:41:10 +00001987size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) {
1988 MutexLock mu(Thread::Current(), lock_);
1989 return mspace_usable_size(reinterpret_cast<const void*>(FromCodeToAllocation(ptr)));
1990}
1991
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +00001992void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
1993 const OatQuickMethodHeader* header) {
Vladimir Marko2196c652017-11-30 16:16:07 +00001994 DCHECK(!method->IsNative());
Andreas Gampe542451c2016-07-26 09:02:02 -07001995 ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
Alex Light2d441b12018-06-08 15:33:21 -07001996 const void* method_entrypoint = method->GetEntryPointFromQuickCompiledCode();
Nicolas Geoffray35122442016-03-02 12:05:30 +00001997 if ((profiling_info != nullptr) &&
1998 (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) {
Alex Light2d441b12018-06-08 15:33:21 -07001999 // When instrumentation is set, the actual entrypoint is the one in the profiling info.
2000 method_entrypoint = profiling_info->GetSavedEntryPoint();
Nicolas Geoffray35122442016-03-02 12:05:30 +00002001 // Prevent future uses of the compiled code.
2002 profiling_info->SetSavedEntryPoint(nullptr);
2003 }
2004
Alex Light2d441b12018-06-08 15:33:21 -07002005 // Clear the method counter if we are running jitted code since we might want to jit this again in
2006 // the future.
2007 if (method_entrypoint == header->GetEntryPoint()) {
Jeff Hao00286db2017-05-30 16:53:07 -07002008 // The entrypoint is the one to invalidate, so we just update it to the interpreter entry point
Mathieu Chartierf044c222017-05-31 15:27:54 -07002009 // and clear the counter to get the method Jitted again.
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +00002010 Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
2011 method, GetQuickToInterpreterBridge());
Mathieu Chartierf044c222017-05-31 15:27:54 -07002012 ClearMethodCounter(method, /*was_warm*/ profiling_info != nullptr);
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +00002013 } else {
2014 MutexLock mu(Thread::Current(), lock_);
2015 auto it = osr_code_map_.find(method);
2016 if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) {
2017 // Remove the OSR method, to avoid using it again.
2018 osr_code_map_.erase(it);
2019 }
2020 }
2021}
2022
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +00002023uint8_t* JitCodeCache::AllocateCode(size_t code_size) {
2024 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
2025 uint8_t* result = reinterpret_cast<uint8_t*>(
Orion Hodson1d3fd082018-09-28 09:38:35 +01002026 mspace_memalign(exec_mspace_, alignment, code_size));
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +00002027 size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
2028 // Ensure the header ends up at expected instruction alignment.
2029 DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
2030 used_memory_for_code_ += mspace_usable_size(result);
2031 return result;
2032}
2033
Orion Hodsondbd05fe2017-08-10 11:41:35 +01002034void JitCodeCache::FreeCode(uint8_t* code) {
2035 used_memory_for_code_ -= mspace_usable_size(code);
Orion Hodson1d3fd082018-09-28 09:38:35 +01002036 mspace_free(exec_mspace_, code);
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +00002037}
2038
2039uint8_t* JitCodeCache::AllocateData(size_t data_size) {
2040 void* result = mspace_malloc(data_mspace_, data_size);
2041 used_memory_for_data_ += mspace_usable_size(result);
2042 return reinterpret_cast<uint8_t*>(result);
2043}
2044
2045void JitCodeCache::FreeData(uint8_t* data) {
2046 used_memory_for_data_ -= mspace_usable_size(data);
2047 mspace_free(data_mspace_, data);
2048}
2049
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00002050void JitCodeCache::Dump(std::ostream& os) {
2051 MutexLock mu(Thread::Current(), lock_);
David Srbeckyfb3de3d2018-01-29 16:11:49 +00002052 MutexLock mu2(Thread::Current(), *Locks::native_debug_interface_lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00002053 os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
2054 << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
David Srbecky440a9b32018-02-15 17:47:29 +00002055 << "Current JIT mini-debug-info size: " << PrettySize(GetJitNativeDebugInfoMemUsage()) << "\n"
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00002056 << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n"
Vladimir Marko2196c652017-11-30 16:16:07 +00002057 << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00002058 << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
2059 << "Total number of JIT compilations: " << number_of_compilations_ << "\n"
2060 << "Total number of JIT compilations for on stack replacement: "
2061 << number_of_osr_compilations_ << "\n"
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00002062 << "Total number of JIT code cache collections: " << number_of_collections_ << std::endl;
Nicolas Geoffray933330a2016-03-16 14:20:06 +00002063 histogram_stack_map_memory_use_.PrintMemoryUse(os);
2064 histogram_code_memory_use_.PrintMemoryUse(os);
2065 histogram_profiling_info_memory_use_.PrintMemoryUse(os);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +00002066}
2067
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08002068} // namespace jit
2069} // namespace art