blob: 3078e2be8f7d4891619c0ff0e6d08f205f330485 [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
18#define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
19
Andreas Gampef0f3c592018-06-26 13:28:00 -070020#include <iosfwd>
21#include <memory>
22#include <set>
23#include <string>
24#include <unordered_set>
25#include <vector>
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080026
Mingyao Yang063fc772016-08-02 11:02:54 -070027#include "base/arena_containers.h"
David Sehrc431b9d2018-03-02 12:01:51 -080028#include "base/atomic.h"
Andreas Gampef0f3c592018-06-26 13:28:00 -070029#include "base/histogram.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080030#include "base/macros.h"
Vladimir Markoc34bebf2018-08-16 16:12:49 +010031#include "base/mem_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080032#include "base/mutex.h"
David Sehr67bf42e2018-02-26 16:43:04 -080033#include "base/safe_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080034
35namespace art {
36
Mathieu Chartiere401d142015-04-22 13:56:20 -070037class ArtMethod;
Vladimir Markob0b68cf2017-11-14 18:11:50 +000038template<class T> class Handle;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010039class LinearAlloc;
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +000040class InlineCache;
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070041class IsMarkedVisitor;
Vladimir Marko2196c652017-11-30 16:16:07 +000042class JitJniStubTestHelper;
Andreas Gampe513061a2017-06-01 09:17:34 -070043class OatQuickMethodHeader;
Vladimir Markob0b68cf2017-11-14 18:11:50 +000044struct ProfileMethodInfo;
Nicolas Geoffray26705e22015-10-28 12:50:11 +000045class ProfilingInfo;
Vladimir Marko2196c652017-11-30 16:16:07 +000046class Thread;
47
48namespace gc {
49namespace accounting {
50template<size_t kAlignment> class MemoryRangeBitmap;
51} // namespace accounting
52} // namespace gc
53
54namespace mirror {
55class Class;
56class Object;
57template<class T> class ObjectArray;
58} // namespace mirror
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080059
Vladimir Markob0b68cf2017-11-14 18:11:50 +000060namespace gc {
61namespace accounting {
62template<size_t kAlignment> class MemoryRangeBitmap;
63} // namespace accounting
64} // namespace gc
65
66namespace mirror {
67class Class;
68class Object;
69template<class T> class ObjectArray;
70} // namespace mirror
71
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080072namespace jit {
73
Nicolas Geoffrayce9ed362018-11-29 03:19:28 +000074class MarkCodeClosure;
Calin Juravle016fcbe22018-05-03 19:47:35 -070075class ScopedCodeCacheWrite;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080076
Orion Hodsondf1ab202019-06-02 16:45:03 +010077// Alignment in bytes that will suit all architectures for JIT code cache allocations. The
78// allocated block is used for method header followed by generated code. Allocations should be
79// aligned to avoid sharing cache lines between different allocations. The alignment should be
80// determined from the hardware, but this isn't readily exposed in userland plus some hardware
81// misreports.
82static constexpr int kJitCodeAlignment = 64;
83
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010084using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
85
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080086class JitCodeCache {
87 public:
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000088 static constexpr size_t kMaxCapacity = 64 * MB;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010089 // Put the default to a very low amount for debug builds to stress the code cache
90 // collection.
Nicolas Geoffray7ca4b772016-02-23 13:52:01 +000091 static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;
Nicolas Geoffray65b83d82016-02-22 13:14:04 +000092
93 // By default, do not GC until reaching 256KB.
94 static constexpr size_t kReservedCapacity = kInitialCapacity * 4;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080095
Mathieu Chartierbce416f2015-03-23 12:37:35 -070096 // Create the code cache with a code + data capacity equal to "capacity", error message is passed
97 // in the out arg error_msg.
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +000098 static JitCodeCache* Create(bool used_only_for_profile_data,
Orion Hodsonad28f5e2018-10-17 09:08:17 +010099 bool rwx_memory_allowed,
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +0000100 bool is_zygote,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000101 std::string* error_msg);
Vladimir Markob0b68cf2017-11-14 18:11:50 +0000102 ~JitCodeCache();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800103
Nicolas Geoffrayd2f13ba2019-06-04 16:48:58 +0100104 bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700105 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100106 REQUIRES(!lock_);
107
Alex Lightdba61482016-12-21 08:20:29 -0800108 void NotifyMethodRedefined(ArtMethod* method)
109 REQUIRES(Locks::mutator_lock_)
110 REQUIRES(!lock_);
111
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +0000112 // Notify to the code cache that the compiler wants to use the
113 // profiling info of `method` to drive optimizations,
114 // and therefore ensure the returned profiling info object is not
115 // collected.
116 ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700117 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000118 REQUIRES(!lock_);
119
buzbee454b3b62016-04-07 14:42:47 -0700120 void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700121 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100122 REQUIRES(!lock_);
123
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +0000124 void DoneCompilerUse(ArtMethod* method, Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700125 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000126 REQUIRES(!lock_);
127
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100128 // Allocate and write code and its metadata to the code cache.
Mingyao Yang063fc772016-08-02 11:02:54 -0700129 // `cha_single_implementation_list` needs to be registered via CHA (if it's
130 // still valid), since the compiled code still needs to be invalidated if the
131 // single-implementation assumptions are violated later. This needs to be done
132 // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
133 // guard elimination.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100134 uint8_t* CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100135 ArtMethod* method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000136 uint8_t* stack_map,
137 uint8_t* roots_data,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100138 const uint8_t* code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000139 size_t code_size,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100140 size_t data_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000141 bool osr,
Vladimir Markoac3ac682018-09-20 11:01:43 +0100142 const std::vector<Handle<mirror::Object>>& roots,
Mingyao Yang063fc772016-08-02 11:02:54 -0700143 bool has_should_deoptimize_flag,
144 const ArenaSet<ArtMethod*>& cha_single_implementation_list)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700145 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100146 REQUIRES(!lock_);
147
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100148 // Return true if the code cache contains this pc.
149 bool ContainsPc(const void* pc) const;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800150
Alex Light2d441b12018-06-08 15:33:21 -0700151 // Returns true if either the method's entrypoint is JIT compiled code or it is the
152 // instrumentation entrypoint and we can jump to jit code for this method. For testing use only.
153 bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!lock_);
154
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000155 // Return true if the code cache contains this method.
156 bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
157
Vladimir Marko2196c652017-11-30 16:16:07 +0000158 // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
159 const void* GetJniStubCode(ArtMethod* method) REQUIRES(!lock_);
160
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000161 // Allocate a region of data that contain `size` bytes, and potentially space
162 // for storing `number_of_roots` roots. Returns null if there is no more room.
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000163 // Return the number of bytes allocated.
164 size_t ReserveData(Thread* self,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700165 size_t stack_map_size,
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000166 size_t number_of_roots,
167 ArtMethod* method,
168 uint8_t** stack_map_data,
169 uint8_t** roots_data)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700170 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100171 REQUIRES(!lock_);
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100172
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000173 // Clear data from the data portion of the code cache.
Nicolas Geoffrayf46501c2016-11-22 13:45:36 +0000174 void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700175 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000176 REQUIRES(!lock_);
177
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100178 // Perform a collection on the code cache.
179 void GarbageCollectCache(Thread* self)
180 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700181 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100182
183 // Given the 'pc', try to find the JIT compiled code associated with it.
184 // Return null if 'pc' is not in the code cache. 'method' is passed for
185 // sanity check.
186 OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
187 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700188 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100189
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000190 OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
191 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700192 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000193
Orion Hodsoneced6922017-06-01 10:54:28 +0100194 // Removes method from the cache for testing purposes. The caller
195 // must ensure that all threads are suspended and the method should
196 // not be in any thread's stack.
197 bool RemoveMethod(ArtMethod* method, bool release_memory)
198 REQUIRES(!lock_)
199 REQUIRES(Locks::mutator_lock_);
200
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000201 // Remove all methods in our cache that were allocated by 'alloc'.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100202 void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
203 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700204 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800205
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000206 void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
207 REQUIRES(!lock_)
208 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000209
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000210 // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
211 // will collect and retry if the first allocation is unsuccessful.
212 ProfilingInfo* AddProfilingInfo(Thread* self,
213 ArtMethod* method,
214 const std::vector<uint32_t>& entries,
215 bool retry_allocation)
216 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700217 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000218
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000219 bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
Orion Hodson1d3fd082018-09-28 09:38:35 +0100220 return mspace == data_mspace_ || mspace == exec_mspace_;
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000221 }
222
223 void* MoreCore(const void* mspace, intptr_t increment);
224
Calin Juravle99629622016-04-19 16:33:46 +0100225 // Adds to `methods` all profiled methods which are part of any of the given dex locations.
226 void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
Calin Juravle940eb0c2017-01-30 19:30:44 -0800227 std::vector<ProfileMethodInfo>& methods)
Calin Juravle31f2c152015-10-23 17:56:15 +0100228 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700229 REQUIRES_SHARED(Locks::mutator_lock_);
Calin Juravle31f2c152015-10-23 17:56:15 +0100230
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +0000231 void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
232 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700233 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +0000234
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000235 void Dump(std::ostream& os) REQUIRES(!lock_);
236
Nicolas Geoffray71cd50f2016-04-14 15:00:33 +0100237 bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
238
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000239 void SweepRootTables(IsMarkedVisitor* visitor)
240 REQUIRES(!lock_)
241 REQUIRES_SHARED(Locks::mutator_lock_);
242
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000243 // The GC needs to disallow the reading of inline caches when it processes them,
244 // to avoid having a class being used while it is being deleted.
245 void AllowInlineCacheAccess() REQUIRES(!lock_);
246 void DisallowInlineCacheAccess() REQUIRES(!lock_);
247 void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
248
Alex Lightdba61482016-12-21 08:20:29 -0800249 // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
250 // 'new_method' since it is being made obsolete.
251 void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
252 REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
253
Nicolas Geoffray226805d2018-12-14 10:59:02 +0000254 // Dynamically change whether we want to garbage collect code.
255 void SetGarbageCollectCode(bool value) REQUIRES(!lock_);
Nicolas Geoffray169722b2017-02-27 14:01:59 +0000256
Nicolas Geoffray226805d2018-12-14 10:59:02 +0000257 bool GetGarbageCollectCode() REQUIRES(!lock_);
258
259 // Unsafe variant for debug checks.
260 bool GetGarbageCollectCodeUnsafe() const NO_THREAD_SAFETY_ANALYSIS {
Alex Light2d441b12018-06-08 15:33:21 -0700261 return garbage_collect_code_;
262 }
263
264 // If Jit-gc has been disabled (and instrumentation has been enabled) this will return the
265 // jit-compiled entrypoint for this method. Otherwise it will return null.
266 const void* FindCompiledCodeForInstrumentation(ArtMethod* method)
267 REQUIRES(!lock_)
268 REQUIRES_SHARED(Locks::mutator_lock_);
269
Nicolas Geoffray7989ac92019-04-10 12:42:30 +0100270 // Fetch the entrypoint that zygote may have saved for a method. The zygote saves an entrypoint
271 // only for the case when the method's declaring class is not initialized.
272 const void* GetZygoteSavedEntryPoint(ArtMethod* method)
273 REQUIRES(!lock_)
274 REQUIRES_SHARED(Locks::mutator_lock_);
275
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +0000276 void PostForkChildAction(bool is_system_server, bool is_zygote);
277
Nicolas Geoffray226805d2018-12-14 10:59:02 +0000278 // Clear the entrypoints of JIT compiled methods that belong in the zygote space.
279 // This is used for removing non-debuggable JIT code at the point we realize the runtime
280 // is debuggable.
281 void ClearEntryPointsInZygoteExecSpace() REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
282
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800283 private:
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +0000284 JitCodeCache();
285
286 void InitializeState(size_t initial_capacity, size_t max_capacity) REQUIRES(lock_);
287
288 bool InitializeMappings(bool rwx_memory_allowed, bool is_zygote, std::string* error_msg)
289 REQUIRES(lock_);
290
291 void InitializeSpaces() REQUIRES(lock_);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800292
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100293 // Internal version of 'CommitCode' that will not retry if the
294 // allocation fails. Return null if the allocation fails.
295 uint8_t* CommitCodeInternal(Thread* self,
296 ArtMethod* method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000297 uint8_t* stack_map,
298 uint8_t* roots_data,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100299 const uint8_t* code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000300 size_t code_size,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100301 size_t data_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000302 bool osr,
Vladimir Markoac3ac682018-09-20 11:01:43 +0100303 const std::vector<Handle<mirror::Object>>& roots,
Mingyao Yang063fc772016-08-02 11:02:54 -0700304 bool has_should_deoptimize_flag,
305 const ArenaSet<ArtMethod*>& cha_single_implementation_list)
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100306 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700307 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100308
Alex Light3e36a9c2018-06-19 09:45:05 -0700309 // Adds the given roots to the roots_data. Only a member for annotalysis.
Vladimir Markoac3ac682018-09-20 11:01:43 +0100310 void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots)
Alex Light3e36a9c2018-06-19 09:45:05 -0700311 REQUIRES(lock_)
312 REQUIRES_SHARED(Locks::mutator_lock_);
313
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000314 ProfilingInfo* AddProfilingInfoInternal(Thread* self,
315 ArtMethod* method,
316 const std::vector<uint32_t>& entries)
Nicolas Geoffray1e7da9b2016-03-01 14:11:40 +0000317 REQUIRES(lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700318 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000319
Alex Light33b7b5d2018-08-07 19:13:51 +0000320 // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
321 // The non-mutator lock version should be used if possible. This method will release then
322 // re-acquire the mutator lock.
323 void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
324 REQUIRES(lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
325
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100326 // If a collection is in progress, wait for it to finish. Return
327 // whether the thread actually waited.
328 bool WaitForPotentialCollectionToComplete(Thread* self)
329 REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
330
Mingyao Yang063fc772016-08-02 11:02:54 -0700331 // Remove CHA dependents and underlying allocations for entries in `method_headers`.
332 void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
333 REQUIRES(!lock_)
334 REQUIRES(!Locks::cha_lock_);
335
Vladimir Marko2196c652017-11-30 16:16:07 +0000336 // Removes method from the cache. The caller must ensure that all threads
337 // are suspended and the method should not be in any thread's stack.
338 bool RemoveMethodLocked(ArtMethod* method, bool release_memory)
339 REQUIRES(lock_)
340 REQUIRES(Locks::mutator_lock_);
341
Orion Hodson607624f2018-05-11 10:10:46 +0100342 // Free code and data allocations for `code_ptr`.
343 void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100344
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000345 // Number of bytes allocated in the code cache.
Nicolas Geoffrayb9f1af52018-11-16 10:30:29 +0000346 size_t CodeCacheSize() REQUIRES(!lock_);
347
348 // Number of bytes allocated in the data cache.
349 size_t DataCacheSize() REQUIRES(!lock_);
350
351 // Number of bytes allocated in the code cache.
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000352 size_t CodeCacheSizeLocked() REQUIRES(lock_);
353
354 // Number of bytes allocated in the data cache.
355 size_t DataCacheSizeLocked() REQUIRES(lock_);
356
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000357 // Notify all waiting threads that a collection is done.
358 void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
359
360 // Try to increase the current capacity of the code cache. Return whether we
361 // succeeded at doing so.
362 bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
363
364 // Set the footprint limit of the code cache.
365 void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
366
Vladimir Marko2196c652017-11-30 16:16:07 +0000367 // Return whether we should do a full collection given the current state of the cache.
368 bool ShouldDoFullCollection()
369 REQUIRES(lock_)
370 REQUIRES_SHARED(Locks::mutator_lock_);
371
Nicolas Geoffray35122442016-03-02 12:05:30 +0000372 void DoCollection(Thread* self, bool collect_profiling_info)
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000373 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700374 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000375
Nicolas Geoffray9abb2972016-03-04 14:32:59 +0000376 void RemoveUnmarkedCode(Thread* self)
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000377 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700378 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000379
380 void MarkCompiledCodeOnThreadStacks(Thread* self)
381 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700382 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000383
Nicolas Geoffrayb9f1af52018-11-16 10:30:29 +0000384 CodeCacheBitmap* GetLiveBitmap() const {
385 return live_bitmap_.get();
386 }
387
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000388 uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
Orion Hodson607624f2018-05-11 10:10:46 +0100389 void FreeCode(uint8_t* code) REQUIRES(lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000390 uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
Orion Hodson607624f2018-05-11 10:10:46 +0100391 void FreeData(uint8_t* data) REQUIRES(lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000392
Orion Hodson1d3fd082018-09-28 09:38:35 +0100393 bool HasDualCodeMapping() const {
394 return non_exec_pages_.IsValid();
395 }
396
397 bool HasCodeMapping() const {
398 return exec_pages_.IsValid();
399 }
400
401 const MemMap* GetUpdatableCodeMapping() const;
402
Nicolas Geoffrayce9ed362018-11-29 03:19:28 +0000403 bool IsInZygoteDataSpace(const void* ptr) const {
404 return zygote_data_pages_.HasAddress(ptr);
405 }
406
407 bool IsInZygoteExecSpace(const void* ptr) const {
408 return zygote_exec_pages_.HasAddress(ptr);
409 }
410
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000411 bool IsWeakAccessEnabled(Thread* self) const;
412 void WaitUntilInlineCacheAccessible(Thread* self)
413 REQUIRES(!lock_)
414 REQUIRES_SHARED(Locks::mutator_lock_);
415
Vladimir Marko2196c652017-11-30 16:16:07 +0000416 class JniStubKey;
417 class JniStubData;
418
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100419 // Lock for guarding allocations, collections, and the method_code_map_.
Alex Light3e36a9c2018-06-19 09:45:05 -0700420 Mutex lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100421 // Condition to wait on during collection.
422 ConditionVariable lock_cond_ GUARDED_BY(lock_);
423 // Whether there is a code cache collection in progress.
424 bool collection_in_progress_ GUARDED_BY(lock_);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100425 // Mem map which holds data (stack maps and profiling info).
Orion Hodson1d3fd082018-09-28 09:38:35 +0100426 MemMap data_pages_;
427 // Mem map which holds code and has executable permission.
428 MemMap exec_pages_;
429 // Mem map which holds code with non executable permission. Only valid for dual view JIT when
430 // this is the non-executable view of code used to write updates.
431 MemMap non_exec_pages_;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100432 // The opaque mspace for allocating data.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100433 void* data_mspace_ GUARDED_BY(lock_);
Orion Hodson1d3fd082018-09-28 09:38:35 +0100434 // The opaque mspace for allocating code.
435 void* exec_mspace_ GUARDED_BY(lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100436 // Bitmap for collecting code and data.
437 std::unique_ptr<CodeCacheBitmap> live_bitmap_;
Vladimir Marko2196c652017-11-30 16:16:07 +0000438 // Holds compiled code associated with the shorty for a JNI stub.
439 SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(lock_);
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100440 // Holds compiled code associated to the ArtMethod.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100441 SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100442 // Holds osr compiled code associated to the ArtMethod.
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000443 SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000444 // ProfilingInfo objects we have allocated.
445 std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800446
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +0000447 // The initial capacity in bytes this code cache starts with.
448 size_t initial_capacity_ GUARDED_BY(lock_);
449
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000450 // The maximum capacity in bytes this code cache can go to.
451 size_t max_capacity_ GUARDED_BY(lock_);
452
453 // The current capacity in bytes of the code cache.
454 size_t current_capacity_ GUARDED_BY(lock_);
455
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000456 // The current footprint in bytes of the data portion of the code cache.
457 size_t data_end_ GUARDED_BY(lock_);
458
Orion Hodson1d3fd082018-09-28 09:38:35 +0100459 // The current footprint in bytes of the code portion of the code cache.
460 size_t exec_end_ GUARDED_BY(lock_);
461
Nicolas Geoffray35122442016-03-02 12:05:30 +0000462 // Whether the last collection round increased the code cache.
463 bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000464
Nicolas Geoffray169722b2017-02-27 14:01:59 +0000465 // Whether we can do garbage collection. Not 'const' as tests may override this.
Nicolas Geoffray226805d2018-12-14 10:59:02 +0000466 bool garbage_collect_code_ GUARDED_BY(lock_);
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000467
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +0000468 // The size in bytes of used memory for the data portion of the code cache.
469 size_t used_memory_for_data_ GUARDED_BY(lock_);
470
471 // The size in bytes of used memory for the code portion of the code cache.
472 size_t used_memory_for_code_ GUARDED_BY(lock_);
473
Nicolas Geoffray0a522232016-01-19 09:34:58 +0000474 // Number of compilations done throughout the lifetime of the JIT.
475 size_t number_of_compilations_ GUARDED_BY(lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000476
477 // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
Nicolas Geoffrayfcdd7292016-02-25 13:27:47 +0000478 size_t number_of_osr_compilations_ GUARDED_BY(lock_);
Nicolas Geoffray0a522232016-01-19 09:34:58 +0000479
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000480 // Number of code cache collections done throughout the lifetime of the JIT.
481 size_t number_of_collections_ GUARDED_BY(lock_);
482
Nicolas Geoffray933330a2016-03-16 14:20:06 +0000483 // Histograms for keeping track of stack map size statistics.
484 Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
485
486 // Histograms for keeping track of code size statistics.
487 Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
488
489 // Histograms for keeping track of profiling info statistics.
490 Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
491
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000492 // Whether the GC allows accessing weaks in inline caches. Note that this
493 // is not used by the concurrent collector, which uses
494 // Thread::SetWeakRefAccessEnabled instead.
495 Atomic<bool> is_weak_access_enabled_;
496
497 // Condition to wait on for accessing inline caches.
498 ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
499
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +0000500 // Mem map which holds zygote data (stack maps and profiling info).
501 MemMap zygote_data_pages_;
502 // Mem map which holds zygote code and has executable permission.
503 MemMap zygote_exec_pages_;
504 // The opaque mspace for allocating zygote data.
505 void* zygote_data_mspace_ GUARDED_BY(lock_);
506 // The opaque mspace for allocating zygote code.
507 void* zygote_exec_mspace_ GUARDED_BY(lock_);
508
Vladimir Marko2196c652017-11-30 16:16:07 +0000509 friend class art::JitJniStubTestHelper;
Calin Juravle016fcbe22018-05-03 19:47:35 -0700510 friend class ScopedCodeCacheWrite;
Nicolas Geoffrayce9ed362018-11-29 03:19:28 +0000511 friend class MarkCodeClosure;
Calin Juravle016fcbe22018-05-03 19:47:35 -0700512
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +0000513 DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800514};
515
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800516} // namespace jit
517} // namespace art
518
519#endif // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_