blob: 8a6cebe8dada0616b741e9f40c428da5dfa3a212 [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
18#define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
19
Andreas Gampef0f3c592018-06-26 13:28:00 -070020#include <iosfwd>
21#include <memory>
22#include <set>
23#include <string>
24#include <unordered_set>
25#include <vector>
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080026
Mingyao Yang063fc772016-08-02 11:02:54 -070027#include "base/arena_containers.h"
David Sehrc431b9d2018-03-02 12:01:51 -080028#include "base/atomic.h"
Andreas Gampef0f3c592018-06-26 13:28:00 -070029#include "base/histogram.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080030#include "base/macros.h"
Vladimir Markoc34bebf2018-08-16 16:12:49 +010031#include "base/mem_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080032#include "base/mutex.h"
David Sehr67bf42e2018-02-26 16:43:04 -080033#include "base/safe_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080034
35namespace art {
36
Mathieu Chartiere401d142015-04-22 13:56:20 -070037class ArtMethod;
Vladimir Markob0b68cf2017-11-14 18:11:50 +000038template<class T> class Handle;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010039class LinearAlloc;
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +000040class InlineCache;
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070041class IsMarkedVisitor;
Vladimir Marko2196c652017-11-30 16:16:07 +000042class JitJniStubTestHelper;
Andreas Gampe513061a2017-06-01 09:17:34 -070043class OatQuickMethodHeader;
Vladimir Markob0b68cf2017-11-14 18:11:50 +000044struct ProfileMethodInfo;
Nicolas Geoffray26705e22015-10-28 12:50:11 +000045class ProfilingInfo;
Vladimir Marko2196c652017-11-30 16:16:07 +000046class Thread;
47
48namespace gc {
49namespace accounting {
50template<size_t kAlignment> class MemoryRangeBitmap;
51} // namespace accounting
52} // namespace gc
53
54namespace mirror {
55class Class;
56class Object;
57template<class T> class ObjectArray;
58} // namespace mirror
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080059
Vladimir Markob0b68cf2017-11-14 18:11:50 +000060namespace gc {
61namespace accounting {
62template<size_t kAlignment> class MemoryRangeBitmap;
63} // namespace accounting
64} // namespace gc
65
66namespace mirror {
67class Class;
68class Object;
69template<class T> class ObjectArray;
70} // namespace mirror
71
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080072namespace jit {
73
Nicolas Geoffrayce9ed362018-11-29 03:19:28 +000074class MarkCodeClosure;
Calin Juravle016fcbe22018-05-03 19:47:35 -070075class ScopedCodeCacheWrite;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080076
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000077// Alignment in bits that will suit all architectures.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010078static constexpr int kJitCodeAlignment = 16;
79using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
80
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080081class JitCodeCache {
82 public:
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000083 static constexpr size_t kMaxCapacity = 64 * MB;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010084 // Put the default to a very low amount for debug builds to stress the code cache
85 // collection.
Nicolas Geoffray7ca4b772016-02-23 13:52:01 +000086 static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;
Nicolas Geoffray65b83d82016-02-22 13:14:04 +000087
88 // By default, do not GC until reaching 256KB.
89 static constexpr size_t kReservedCapacity = kInitialCapacity * 4;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080090
Mathieu Chartierbce416f2015-03-23 12:37:35 -070091 // Create the code cache with a code + data capacity equal to "capacity", error message is passed
92 // in the out arg error_msg.
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +000093 static JitCodeCache* Create(bool used_only_for_profile_data,
Orion Hodsonad28f5e2018-10-17 09:08:17 +010094 bool rwx_memory_allowed,
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +000095 bool is_zygote,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +000096 std::string* error_msg);
Vladimir Markob0b68cf2017-11-14 18:11:50 +000097 ~JitCodeCache();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080098
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +000099 bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700100 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100101 REQUIRES(!lock_);
102
Alex Lightdba61482016-12-21 08:20:29 -0800103 void NotifyMethodRedefined(ArtMethod* method)
104 REQUIRES(Locks::mutator_lock_)
105 REQUIRES(!lock_);
106
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +0000107 // Notify to the code cache that the compiler wants to use the
108 // profiling info of `method` to drive optimizations,
109 // and therefore ensure the returned profiling info object is not
110 // collected.
111 ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700112 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000113 REQUIRES(!lock_);
114
buzbee454b3b62016-04-07 14:42:47 -0700115 void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700116 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100117 REQUIRES(!lock_);
118
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +0000119 void DoneCompilerUse(ArtMethod* method, Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700120 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000121 REQUIRES(!lock_);
122
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100123 // Allocate and write code and its metadata to the code cache.
Mingyao Yang063fc772016-08-02 11:02:54 -0700124 // `cha_single_implementation_list` needs to be registered via CHA (if it's
125 // still valid), since the compiled code still needs to be invalidated if the
126 // single-implementation assumptions are violated later. This needs to be done
127 // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
128 // guard elimination.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100129 uint8_t* CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100130 ArtMethod* method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000131 uint8_t* stack_map,
132 uint8_t* roots_data,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100133 const uint8_t* code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000134 size_t code_size,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100135 size_t data_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000136 bool osr,
Vladimir Markoac3ac682018-09-20 11:01:43 +0100137 const std::vector<Handle<mirror::Object>>& roots,
Mingyao Yang063fc772016-08-02 11:02:54 -0700138 bool has_should_deoptimize_flag,
139 const ArenaSet<ArtMethod*>& cha_single_implementation_list)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700140 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100141 REQUIRES(!lock_);
142
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100143 // Return true if the code cache contains this pc.
144 bool ContainsPc(const void* pc) const;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800145
Alex Light2d441b12018-06-08 15:33:21 -0700146 // Returns true if either the method's entrypoint is JIT compiled code or it is the
147 // instrumentation entrypoint and we can jump to jit code for this method. For testing use only.
148 bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!lock_);
149
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000150 // Return true if the code cache contains this method.
151 bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
152
Vladimir Marko2196c652017-11-30 16:16:07 +0000153 // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
154 const void* GetJniStubCode(ArtMethod* method) REQUIRES(!lock_);
155
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000156 // Allocate a region of data that contain `size` bytes, and potentially space
157 // for storing `number_of_roots` roots. Returns null if there is no more room.
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000158 // Return the number of bytes allocated.
159 size_t ReserveData(Thread* self,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700160 size_t stack_map_size,
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000161 size_t number_of_roots,
162 ArtMethod* method,
163 uint8_t** stack_map_data,
164 uint8_t** roots_data)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700165 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100166 REQUIRES(!lock_);
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100167
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000168 // Clear data from the data portion of the code cache.
Nicolas Geoffrayf46501c2016-11-22 13:45:36 +0000169 void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700170 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000171 REQUIRES(!lock_);
172
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100173 // Perform a collection on the code cache.
174 void GarbageCollectCache(Thread* self)
175 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700176 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100177
178 // Given the 'pc', try to find the JIT compiled code associated with it.
179 // Return null if 'pc' is not in the code cache. 'method' is passed for
180 // sanity check.
181 OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
182 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700183 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100184
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000185 OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
186 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700187 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000188
Orion Hodsoneced6922017-06-01 10:54:28 +0100189 // Removes method from the cache for testing purposes. The caller
190 // must ensure that all threads are suspended and the method should
191 // not be in any thread's stack.
192 bool RemoveMethod(ArtMethod* method, bool release_memory)
193 REQUIRES(!lock_)
194 REQUIRES(Locks::mutator_lock_);
195
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000196 // Remove all methods in our cache that were allocated by 'alloc'.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100197 void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
198 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700199 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800200
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000201 void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
202 REQUIRES(!lock_)
203 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000204
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000205 // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
206 // will collect and retry if the first allocation is unsuccessful.
207 ProfilingInfo* AddProfilingInfo(Thread* self,
208 ArtMethod* method,
209 const std::vector<uint32_t>& entries,
210 bool retry_allocation)
211 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700212 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000213
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000214 bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
Orion Hodson1d3fd082018-09-28 09:38:35 +0100215 return mspace == data_mspace_ || mspace == exec_mspace_;
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000216 }
217
218 void* MoreCore(const void* mspace, intptr_t increment);
219
Calin Juravle99629622016-04-19 16:33:46 +0100220 // Adds to `methods` all profiled methods which are part of any of the given dex locations.
221 void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
Calin Juravle940eb0c2017-01-30 19:30:44 -0800222 std::vector<ProfileMethodInfo>& methods)
Calin Juravle31f2c152015-10-23 17:56:15 +0100223 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700224 REQUIRES_SHARED(Locks::mutator_lock_);
Calin Juravle31f2c152015-10-23 17:56:15 +0100225
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +0000226 void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
227 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700228 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +0000229
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000230 void Dump(std::ostream& os) REQUIRES(!lock_);
231
Nicolas Geoffray71cd50f2016-04-14 15:00:33 +0100232 bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
233
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000234 void SweepRootTables(IsMarkedVisitor* visitor)
235 REQUIRES(!lock_)
236 REQUIRES_SHARED(Locks::mutator_lock_);
237
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000238 // The GC needs to disallow the reading of inline caches when it processes them,
239 // to avoid having a class being used while it is being deleted.
240 void AllowInlineCacheAccess() REQUIRES(!lock_);
241 void DisallowInlineCacheAccess() REQUIRES(!lock_);
242 void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
243
Alex Lightdba61482016-12-21 08:20:29 -0800244 // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
245 // 'new_method' since it is being made obsolete.
246 void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
247 REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
248
Nicolas Geoffray226805d2018-12-14 10:59:02 +0000249 // Dynamically change whether we want to garbage collect code.
250 void SetGarbageCollectCode(bool value) REQUIRES(!lock_);
Nicolas Geoffray169722b2017-02-27 14:01:59 +0000251
Nicolas Geoffray226805d2018-12-14 10:59:02 +0000252 bool GetGarbageCollectCode() REQUIRES(!lock_);
253
254 // Unsafe variant for debug checks.
255 bool GetGarbageCollectCodeUnsafe() const NO_THREAD_SAFETY_ANALYSIS {
Alex Light2d441b12018-06-08 15:33:21 -0700256 return garbage_collect_code_;
257 }
258
259 // If Jit-gc has been disabled (and instrumentation has been enabled) this will return the
260 // jit-compiled entrypoint for this method. Otherwise it will return null.
261 const void* FindCompiledCodeForInstrumentation(ArtMethod* method)
262 REQUIRES(!lock_)
263 REQUIRES_SHARED(Locks::mutator_lock_);
264
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +0000265 void PostForkChildAction(bool is_system_server, bool is_zygote);
266
Nicolas Geoffray226805d2018-12-14 10:59:02 +0000267 // Clear the entrypoints of JIT compiled methods that belong in the zygote space.
268 // This is used for removing non-debuggable JIT code at the point we realize the runtime
269 // is debuggable.
270 void ClearEntryPointsInZygoteExecSpace() REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
271
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800272 private:
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +0000273 JitCodeCache();
274
275 void InitializeState(size_t initial_capacity, size_t max_capacity) REQUIRES(lock_);
276
277 bool InitializeMappings(bool rwx_memory_allowed, bool is_zygote, std::string* error_msg)
278 REQUIRES(lock_);
279
280 void InitializeSpaces() REQUIRES(lock_);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800281
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100282 // Internal version of 'CommitCode' that will not retry if the
283 // allocation fails. Return null if the allocation fails.
284 uint8_t* CommitCodeInternal(Thread* self,
285 ArtMethod* method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000286 uint8_t* stack_map,
287 uint8_t* roots_data,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100288 const uint8_t* code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000289 size_t code_size,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100290 size_t data_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000291 bool osr,
Vladimir Markoac3ac682018-09-20 11:01:43 +0100292 const std::vector<Handle<mirror::Object>>& roots,
Mingyao Yang063fc772016-08-02 11:02:54 -0700293 bool has_should_deoptimize_flag,
294 const ArenaSet<ArtMethod*>& cha_single_implementation_list)
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100295 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700296 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100297
Alex Light3e36a9c2018-06-19 09:45:05 -0700298 // Adds the given roots to the roots_data. Only a member for annotalysis.
Vladimir Markoac3ac682018-09-20 11:01:43 +0100299 void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots)
Alex Light3e36a9c2018-06-19 09:45:05 -0700300 REQUIRES(lock_)
301 REQUIRES_SHARED(Locks::mutator_lock_);
302
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000303 ProfilingInfo* AddProfilingInfoInternal(Thread* self,
304 ArtMethod* method,
305 const std::vector<uint32_t>& entries)
Nicolas Geoffray1e7da9b2016-03-01 14:11:40 +0000306 REQUIRES(lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700307 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000308
Alex Light33b7b5d2018-08-07 19:13:51 +0000309 // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
310 // The non-mutator lock version should be used if possible. This method will release then
311 // re-acquire the mutator lock.
312 void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
313 REQUIRES(lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
314
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100315 // If a collection is in progress, wait for it to finish. Return
316 // whether the thread actually waited.
317 bool WaitForPotentialCollectionToComplete(Thread* self)
318 REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
319
Mingyao Yang063fc772016-08-02 11:02:54 -0700320 // Remove CHA dependents and underlying allocations for entries in `method_headers`.
321 void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
322 REQUIRES(!lock_)
323 REQUIRES(!Locks::cha_lock_);
324
Vladimir Marko2196c652017-11-30 16:16:07 +0000325 // Removes method from the cache. The caller must ensure that all threads
326 // are suspended and the method should not be in any thread's stack.
327 bool RemoveMethodLocked(ArtMethod* method, bool release_memory)
328 REQUIRES(lock_)
329 REQUIRES(Locks::mutator_lock_);
330
Orion Hodson607624f2018-05-11 10:10:46 +0100331 // Free code and data allocations for `code_ptr`.
332 void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100333
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000334 // Number of bytes allocated in the code cache.
Nicolas Geoffrayb9f1af52018-11-16 10:30:29 +0000335 size_t CodeCacheSize() REQUIRES(!lock_);
336
337 // Number of bytes allocated in the data cache.
338 size_t DataCacheSize() REQUIRES(!lock_);
339
340 // Number of bytes allocated in the code cache.
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000341 size_t CodeCacheSizeLocked() REQUIRES(lock_);
342
343 // Number of bytes allocated in the data cache.
344 size_t DataCacheSizeLocked() REQUIRES(lock_);
345
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000346 // Notify all waiting threads that a collection is done.
347 void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
348
349 // Try to increase the current capacity of the code cache. Return whether we
350 // succeeded at doing so.
351 bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
352
353 // Set the footprint limit of the code cache.
354 void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
355
Vladimir Marko2196c652017-11-30 16:16:07 +0000356 // Return whether we should do a full collection given the current state of the cache.
357 bool ShouldDoFullCollection()
358 REQUIRES(lock_)
359 REQUIRES_SHARED(Locks::mutator_lock_);
360
Nicolas Geoffray35122442016-03-02 12:05:30 +0000361 void DoCollection(Thread* self, bool collect_profiling_info)
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000362 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700363 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000364
Nicolas Geoffray9abb2972016-03-04 14:32:59 +0000365 void RemoveUnmarkedCode(Thread* self)
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000366 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700367 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000368
369 void MarkCompiledCodeOnThreadStacks(Thread* self)
370 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700371 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000372
Nicolas Geoffray35122442016-03-02 12:05:30 +0000373 bool CheckLiveCompiledCodeHasProfilingInfo()
Vladimir Markod1ee20f2017-08-17 09:21:16 +0000374 REQUIRES(lock_)
375 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray35122442016-03-02 12:05:30 +0000376
Nicolas Geoffrayb9f1af52018-11-16 10:30:29 +0000377 CodeCacheBitmap* GetLiveBitmap() const {
378 return live_bitmap_.get();
379 }
380
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000381 uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
Orion Hodson607624f2018-05-11 10:10:46 +0100382 void FreeCode(uint8_t* code) REQUIRES(lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000383 uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
Orion Hodson607624f2018-05-11 10:10:46 +0100384 void FreeData(uint8_t* data) REQUIRES(lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000385
Orion Hodson1d3fd082018-09-28 09:38:35 +0100386 bool HasDualCodeMapping() const {
387 return non_exec_pages_.IsValid();
388 }
389
390 bool HasCodeMapping() const {
391 return exec_pages_.IsValid();
392 }
393
394 const MemMap* GetUpdatableCodeMapping() const;
395
Nicolas Geoffrayce9ed362018-11-29 03:19:28 +0000396 bool IsInZygoteDataSpace(const void* ptr) const {
397 return zygote_data_pages_.HasAddress(ptr);
398 }
399
400 bool IsInZygoteExecSpace(const void* ptr) const {
401 return zygote_exec_pages_.HasAddress(ptr);
402 }
403
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000404 bool IsWeakAccessEnabled(Thread* self) const;
405 void WaitUntilInlineCacheAccessible(Thread* self)
406 REQUIRES(!lock_)
407 REQUIRES_SHARED(Locks::mutator_lock_);
408
Vladimir Marko2196c652017-11-30 16:16:07 +0000409 class JniStubKey;
410 class JniStubData;
411
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100412 // Lock for guarding allocations, collections, and the method_code_map_.
Alex Light3e36a9c2018-06-19 09:45:05 -0700413 Mutex lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100414 // Condition to wait on during collection.
415 ConditionVariable lock_cond_ GUARDED_BY(lock_);
416 // Whether there is a code cache collection in progress.
417 bool collection_in_progress_ GUARDED_BY(lock_);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100418 // Mem map which holds data (stack maps and profiling info).
Orion Hodson1d3fd082018-09-28 09:38:35 +0100419 MemMap data_pages_;
420 // Mem map which holds code and has executable permission.
421 MemMap exec_pages_;
422 // Mem map which holds code with non executable permission. Only valid for dual view JIT when
423 // this is the non-executable view of code used to write updates.
424 MemMap non_exec_pages_;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100425 // The opaque mspace for allocating data.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100426 void* data_mspace_ GUARDED_BY(lock_);
Orion Hodson1d3fd082018-09-28 09:38:35 +0100427 // The opaque mspace for allocating code.
428 void* exec_mspace_ GUARDED_BY(lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100429 // Bitmap for collecting code and data.
430 std::unique_ptr<CodeCacheBitmap> live_bitmap_;
Vladimir Marko2196c652017-11-30 16:16:07 +0000431 // Holds compiled code associated with the shorty for a JNI stub.
432 SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(lock_);
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100433 // Holds compiled code associated to the ArtMethod.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100434 SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100435 // Holds osr compiled code associated to the ArtMethod.
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000436 SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000437 // ProfilingInfo objects we have allocated.
438 std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800439
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +0000440 // The initial capacity in bytes this code cache starts with.
441 size_t initial_capacity_ GUARDED_BY(lock_);
442
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000443 // The maximum capacity in bytes this code cache can go to.
444 size_t max_capacity_ GUARDED_BY(lock_);
445
446 // The current capacity in bytes of the code cache.
447 size_t current_capacity_ GUARDED_BY(lock_);
448
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000449 // The current footprint in bytes of the data portion of the code cache.
450 size_t data_end_ GUARDED_BY(lock_);
451
Orion Hodson1d3fd082018-09-28 09:38:35 +0100452 // The current footprint in bytes of the code portion of the code cache.
453 size_t exec_end_ GUARDED_BY(lock_);
454
Nicolas Geoffray35122442016-03-02 12:05:30 +0000455 // Whether the last collection round increased the code cache.
456 bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000457
Nicolas Geoffray169722b2017-02-27 14:01:59 +0000458 // Whether we can do garbage collection. Not 'const' as tests may override this.
Nicolas Geoffray226805d2018-12-14 10:59:02 +0000459 bool garbage_collect_code_ GUARDED_BY(lock_);
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000460
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +0000461 // The size in bytes of used memory for the data portion of the code cache.
462 size_t used_memory_for_data_ GUARDED_BY(lock_);
463
464 // The size in bytes of used memory for the code portion of the code cache.
465 size_t used_memory_for_code_ GUARDED_BY(lock_);
466
Nicolas Geoffray0a522232016-01-19 09:34:58 +0000467 // Number of compilations done throughout the lifetime of the JIT.
468 size_t number_of_compilations_ GUARDED_BY(lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000469
470 // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
Nicolas Geoffrayfcdd7292016-02-25 13:27:47 +0000471 size_t number_of_osr_compilations_ GUARDED_BY(lock_);
Nicolas Geoffray0a522232016-01-19 09:34:58 +0000472
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000473 // Number of code cache collections done throughout the lifetime of the JIT.
474 size_t number_of_collections_ GUARDED_BY(lock_);
475
Nicolas Geoffray933330a2016-03-16 14:20:06 +0000476 // Histograms for keeping track of stack map size statistics.
477 Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
478
479 // Histograms for keeping track of code size statistics.
480 Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
481
482 // Histograms for keeping track of profiling info statistics.
483 Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
484
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000485 // Whether the GC allows accessing weaks in inline caches. Note that this
486 // is not used by the concurrent collector, which uses
487 // Thread::SetWeakRefAccessEnabled instead.
488 Atomic<bool> is_weak_access_enabled_;
489
490 // Condition to wait on for accessing inline caches.
491 ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
492
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +0000493 // Mem map which holds zygote data (stack maps and profiling info).
494 MemMap zygote_data_pages_;
495 // Mem map which holds zygote code and has executable permission.
496 MemMap zygote_exec_pages_;
497 // The opaque mspace for allocating zygote data.
498 void* zygote_data_mspace_ GUARDED_BY(lock_);
499 // The opaque mspace for allocating zygote code.
500 void* zygote_exec_mspace_ GUARDED_BY(lock_);
501
Vladimir Marko2196c652017-11-30 16:16:07 +0000502 friend class art::JitJniStubTestHelper;
Calin Juravle016fcbe22018-05-03 19:47:35 -0700503 friend class ScopedCodeCacheWrite;
Nicolas Geoffrayce9ed362018-11-29 03:19:28 +0000504 friend class MarkCodeClosure;
Calin Juravle016fcbe22018-05-03 19:47:35 -0700505
Nicolas Geoffrayc9de61c2018-11-27 17:34:31 +0000506 DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800507};
508
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800509} // namespace jit
510} // namespace art
511
512#endif // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_