Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_ |
| 18 | #define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_ |
| 19 | |
Andreas Gampe | f0f3c59 | 2018-06-26 13:28:00 -0700 | [diff] [blame] | 20 | #include <iosfwd> |
| 21 | #include <memory> |
| 22 | #include <set> |
| 23 | #include <string> |
| 24 | #include <unordered_set> |
| 25 | #include <vector> |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 26 | |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 27 | #include "base/arena_containers.h" |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 28 | #include "base/atomic.h" |
Andreas Gampe | f0f3c59 | 2018-06-26 13:28:00 -0700 | [diff] [blame] | 29 | #include "base/histogram.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 30 | #include "base/macros.h" |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 31 | #include "base/mem_map.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 32 | #include "base/mutex.h" |
David Sehr | 67bf42e | 2018-02-26 16:43:04 -0800 | [diff] [blame] | 33 | #include "base/safe_map.h" |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 34 | #include "jit_memory_region.h" |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 35 | |
| 36 | namespace art { |
| 37 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 38 | class ArtMethod; |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 39 | template<class T> class Handle; |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 40 | class LinearAlloc; |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 41 | class InlineCache; |
Andreas Gampe | 5d08fcc | 2017-06-05 17:56:46 -0700 | [diff] [blame] | 42 | class IsMarkedVisitor; |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 43 | class JitJniStubTestHelper; |
Andreas Gampe | 513061a | 2017-06-01 09:17:34 -0700 | [diff] [blame] | 44 | class OatQuickMethodHeader; |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 45 | struct ProfileMethodInfo; |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 46 | class ProfilingInfo; |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 47 | class Thread; |
| 48 | |
| 49 | namespace gc { |
| 50 | namespace accounting { |
| 51 | template<size_t kAlignment> class MemoryRangeBitmap; |
| 52 | } // namespace accounting |
| 53 | } // namespace gc |
| 54 | |
| 55 | namespace mirror { |
| 56 | class Class; |
| 57 | class Object; |
| 58 | template<class T> class ObjectArray; |
| 59 | } // namespace mirror |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 60 | |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 61 | namespace gc { |
| 62 | namespace accounting { |
| 63 | template<size_t kAlignment> class MemoryRangeBitmap; |
| 64 | } // namespace accounting |
| 65 | } // namespace gc |
| 66 | |
| 67 | namespace mirror { |
| 68 | class Class; |
| 69 | class Object; |
| 70 | template<class T> class ObjectArray; |
| 71 | } // namespace mirror |
| 72 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 73 | namespace jit { |
| 74 | |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 75 | class MarkCodeClosure; |
Orion Hodson | df1ab20 | 2019-06-02 16:45:03 +0100 | [diff] [blame] | 76 | |
Orion Hodson | 521ff98 | 2019-06-18 13:56:28 +0100 | [diff] [blame] | 77 | // Type of bitmap used for tracking live functions in the JIT code cache for the purposes |
| 78 | // of garbage collecting code. |
| 79 | using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAccountingBytes>; |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 80 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 81 | class JitCodeCache { |
| 82 | public: |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 83 | static constexpr size_t kMaxCapacity = 64 * MB; |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 84 | // Put the default to a very low amount for debug builds to stress the code cache |
| 85 | // collection. |
Nicolas Geoffray | 7ca4b77 | 2016-02-23 13:52:01 +0000 | [diff] [blame] | 86 | static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB; |
Nicolas Geoffray | 65b83d8 | 2016-02-22 13:14:04 +0000 | [diff] [blame] | 87 | |
| 88 | // By default, do not GC until reaching 256KB. |
| 89 | static constexpr size_t kReservedCapacity = kInitialCapacity * 4; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 90 | |
Mathieu Chartier | bce416f | 2015-03-23 12:37:35 -0700 | [diff] [blame] | 91 | // Create the code cache with a code + data capacity equal to "capacity", error message is passed |
| 92 | // in the out arg error_msg. |
Nicolas Geoffray | c9de61c | 2018-11-27 17:34:31 +0000 | [diff] [blame] | 93 | static JitCodeCache* Create(bool used_only_for_profile_data, |
Orion Hodson | ad28f5e | 2018-10-17 09:08:17 +0100 | [diff] [blame] | 94 | bool rwx_memory_allowed, |
Nicolas Geoffray | c9de61c | 2018-11-27 17:34:31 +0000 | [diff] [blame] | 95 | bool is_zygote, |
Nicolas Geoffray | a25dce9 | 2016-01-12 16:41:10 +0000 | [diff] [blame] | 96 | std::string* error_msg); |
Vladimir Marko | b0b68cf | 2017-11-14 18:11:50 +0000 | [diff] [blame] | 97 | ~JitCodeCache(); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 98 | |
Nicolas Geoffray | 2fef66b | 2019-06-26 22:00:02 +0000 | [diff] [blame^] | 99 | bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 100 | REQUIRES_SHARED(Locks::mutator_lock_) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 101 | REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 102 | |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 103 | void NotifyMethodRedefined(ArtMethod* method) |
| 104 | REQUIRES(Locks::mutator_lock_) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 105 | REQUIRES(!Locks::jit_lock_); |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 106 | |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 107 | // Notify to the code cache that the compiler wants to use the |
| 108 | // profiling info of `method` to drive optimizations, |
| 109 | // and therefore ensure the returned profiling info object is not |
| 110 | // collected. |
| 111 | ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 112 | REQUIRES_SHARED(Locks::mutator_lock_) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 113 | REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 114 | |
buzbee | 454b3b6 | 2016-04-07 14:42:47 -0700 | [diff] [blame] | 115 | void DoneCompiling(ArtMethod* method, Thread* self, bool osr) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 116 | REQUIRES_SHARED(Locks::mutator_lock_) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 117 | REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | 73be1e8 | 2015-09-17 15:22:56 +0100 | [diff] [blame] | 118 | |
Nicolas Geoffray | 07e3ca9 | 2016-03-11 09:57:57 +0000 | [diff] [blame] | 119 | void DoneCompilerUse(ArtMethod* method, Thread* self) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 120 | REQUIRES_SHARED(Locks::mutator_lock_) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 121 | REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 122 | |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 123 | // Allocate and write code and its metadata to the code cache. |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 124 | // `cha_single_implementation_list` needs to be registered via CHA (if it's |
| 125 | // still valid), since the compiled code still needs to be invalidated if the |
| 126 | // single-implementation assumptions are violated later. This needs to be done |
| 127 | // even if `has_should_deoptimize_flag` is false, which can happen due to CHA |
| 128 | // guard elimination. |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 129 | uint8_t* CommitCode(Thread* self, |
Nicolas Geoffray | 7f7539b | 2019-06-06 16:20:54 +0100 | [diff] [blame] | 130 | JitMemoryRegion* region, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 131 | ArtMethod* method, |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 132 | const uint8_t* code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 133 | size_t code_size, |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 134 | const uint8_t* stack_map, |
| 135 | size_t stack_map_size, |
| 136 | uint8_t* roots_data, |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 137 | const std::vector<Handle<mirror::Object>>& roots, |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 138 | bool osr, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 139 | bool has_should_deoptimize_flag, |
| 140 | const ArenaSet<ArtMethod*>& cha_single_implementation_list) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 141 | REQUIRES_SHARED(Locks::mutator_lock_) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 142 | REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | 0c3c266 | 2015-10-15 13:53:04 +0100 | [diff] [blame] | 143 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 144 | // Return true if the code cache contains this pc. |
| 145 | bool ContainsPc(const void* pc) const; |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 146 | |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 147 | // Returns true if either the method's entrypoint is JIT compiled code or it is the |
| 148 | // instrumentation entrypoint and we can jump to jit code for this method. For testing use only. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 149 | bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_); |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 150 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 151 | // Return true if the code cache contains this method. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 152 | bool ContainsMethod(ArtMethod* method) REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 153 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 154 | // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 155 | const void* GetJniStubCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_); |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 156 | |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 157 | // Allocate a region of data that will contain a stack map of size `stack_map_size` and |
| 158 | // `number_of_roots` roots accessed by the JIT code. |
| 159 | // Return a pointer to where roots will be stored. |
| 160 | uint8_t* ReserveData(Thread* self, |
| 161 | JitMemoryRegion* region, |
| 162 | size_t stack_map_size, |
| 163 | size_t number_of_roots, |
| 164 | ArtMethod* method) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 165 | REQUIRES_SHARED(Locks::mutator_lock_) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 166 | REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | 5550ca8 | 2015-08-21 18:38:30 +0100 | [diff] [blame] | 167 | |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 168 | // Clear data from the data portion of the code cache. |
Nicolas Geoffray | 7f7539b | 2019-06-06 16:20:54 +0100 | [diff] [blame] | 169 | void ClearData( |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 170 | Thread* self, JitMemoryRegion* region, uint8_t* roots_data) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 171 | REQUIRES_SHARED(Locks::mutator_lock_) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 172 | REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | d28b969 | 2015-11-04 14:36:55 +0000 | [diff] [blame] | 173 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 174 | // Perform a collection on the code cache. |
| 175 | void GarbageCollectCache(Thread* self) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 176 | REQUIRES(!Locks::jit_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 177 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 178 | |
| 179 | // Given the 'pc', try to find the JIT compiled code associated with it. |
| 180 | // Return null if 'pc' is not in the code cache. 'method' is passed for |
| 181 | // sanity check. |
| 182 | OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 183 | REQUIRES(!Locks::jit_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 184 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 185 | |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 186 | OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 187 | REQUIRES(!Locks::jit_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 188 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 189 | |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 190 | // Removes method from the cache for testing purposes. The caller |
| 191 | // must ensure that all threads are suspended and the method should |
| 192 | // not be in any thread's stack. |
| 193 | bool RemoveMethod(ArtMethod* method, bool release_memory) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 194 | REQUIRES(!Locks::jit_lock_) |
Orion Hodson | eced692 | 2017-06-01 10:54:28 +0100 | [diff] [blame] | 195 | REQUIRES(Locks::mutator_lock_); |
| 196 | |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 197 | // Remove all methods in our cache that were allocated by 'alloc'. |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 198 | void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 199 | REQUIRES(!Locks::jit_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 200 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 201 | |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 202 | void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 203 | REQUIRES(!Locks::jit_lock_) |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 204 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | b6e20ae | 2016-03-07 14:29:04 +0000 | [diff] [blame] | 205 | |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 206 | // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true, |
| 207 | // will collect and retry if the first allocation is unsuccessful. |
| 208 | ProfilingInfo* AddProfilingInfo(Thread* self, |
| 209 | ArtMethod* method, |
| 210 | const std::vector<uint32_t>& entries, |
| 211 | bool retry_allocation) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 212 | REQUIRES(!Locks::jit_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 213 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 214 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 215 | bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS { |
Nicolas Geoffray | 2fef66b | 2019-06-26 22:00:02 +0000 | [diff] [blame^] | 216 | return private_region_.OwnsSpace(mspace); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 217 | } |
| 218 | |
| 219 | void* MoreCore(const void* mspace, intptr_t increment); |
| 220 | |
Calin Juravle | 9962962 | 2016-04-19 16:33:46 +0100 | [diff] [blame] | 221 | // Adds to `methods` all profiled methods which are part of any of the given dex locations. |
| 222 | void GetProfiledMethods(const std::set<std::string>& dex_base_locations, |
Calin Juravle | 940eb0c | 2017-01-30 19:30:44 -0800 | [diff] [blame] | 223 | std::vector<ProfileMethodInfo>& methods) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 224 | REQUIRES(!Locks::jit_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 225 | REQUIRES_SHARED(Locks::mutator_lock_); |
Calin Juravle | 31f2c15 | 2015-10-23 17:56:15 +0100 | [diff] [blame] | 226 | |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 227 | void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 228 | REQUIRES(!Locks::jit_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 229 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | b88d59e | 2016-02-17 11:31:49 +0000 | [diff] [blame] | 230 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 231 | void Dump(std::ostream& os) REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | bcd94c8 | 2016-03-03 13:23:33 +0000 | [diff] [blame] | 232 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 233 | bool IsOsrCompiled(ArtMethod* method) REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | 71cd50f | 2016-04-14 15:00:33 +0100 | [diff] [blame] | 234 | |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 235 | void SweepRootTables(IsMarkedVisitor* visitor) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 236 | REQUIRES(!Locks::jit_lock_) |
Nicolas Geoffray | 132d836 | 2016-11-16 09:19:42 +0000 | [diff] [blame] | 237 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 238 | |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 239 | // The GC needs to disallow the reading of inline caches when it processes them, |
| 240 | // to avoid having a class being used while it is being deleted. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 241 | void AllowInlineCacheAccess() REQUIRES(!Locks::jit_lock_); |
| 242 | void DisallowInlineCacheAccess() REQUIRES(!Locks::jit_lock_); |
| 243 | void BroadcastForInlineCacheAccess() REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 244 | |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 245 | // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer |
| 246 | // 'new_method' since it is being made obsolete. |
| 247 | void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 248 | REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_); |
Alex Light | dba6148 | 2016-12-21 08:20:29 -0800 | [diff] [blame] | 249 | |
Nicolas Geoffray | 226805d | 2018-12-14 10:59:02 +0000 | [diff] [blame] | 250 | // Dynamically change whether we want to garbage collect code. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 251 | void SetGarbageCollectCode(bool value) REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | 169722b | 2017-02-27 14:01:59 +0000 | [diff] [blame] | 252 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 253 | bool GetGarbageCollectCode() REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | 226805d | 2018-12-14 10:59:02 +0000 | [diff] [blame] | 254 | |
| 255 | // Unsafe variant for debug checks. |
| 256 | bool GetGarbageCollectCodeUnsafe() const NO_THREAD_SAFETY_ANALYSIS { |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 257 | return garbage_collect_code_; |
| 258 | } |
| 259 | |
| 260 | // If Jit-gc has been disabled (and instrumentation has been enabled) this will return the |
| 261 | // jit-compiled entrypoint for this method. Otherwise it will return null. |
| 262 | const void* FindCompiledCodeForInstrumentation(ArtMethod* method) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 263 | REQUIRES(!Locks::jit_lock_) |
Alex Light | 2d441b1 | 2018-06-08 15:33:21 -0700 | [diff] [blame] | 264 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 265 | |
Nicolas Geoffray | 7989ac9 | 2019-04-10 12:42:30 +0100 | [diff] [blame] | 266 | // Fetch the entrypoint that zygote may have saved for a method. The zygote saves an entrypoint |
| 267 | // only for the case when the method's declaring class is not initialized. |
| 268 | const void* GetZygoteSavedEntryPoint(ArtMethod* method) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 269 | REQUIRES(!Locks::jit_lock_) |
Nicolas Geoffray | 7989ac9 | 2019-04-10 12:42:30 +0100 | [diff] [blame] | 270 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 271 | |
Nicolas Geoffray | c9de61c | 2018-11-27 17:34:31 +0000 | [diff] [blame] | 272 | void PostForkChildAction(bool is_system_server, bool is_zygote); |
| 273 | |
Nicolas Geoffray | 226805d | 2018-12-14 10:59:02 +0000 | [diff] [blame] | 274 | // Clear the entrypoints of JIT compiled methods that belong in the zygote space. |
| 275 | // This is used for removing non-debuggable JIT code at the point we realize the runtime |
| 276 | // is debuggable. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 277 | void ClearEntryPointsInZygoteExecSpace() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_); |
Nicolas Geoffray | 226805d | 2018-12-14 10:59:02 +0000 | [diff] [blame] | 278 | |
Nicolas Geoffray | 2fef66b | 2019-06-26 22:00:02 +0000 | [diff] [blame^] | 279 | JitMemoryRegion* GetPrivateRegion() { return &private_region_; } |
Nicolas Geoffray | 7f7539b | 2019-06-06 16:20:54 +0100 | [diff] [blame] | 280 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 281 | private: |
Nicolas Geoffray | c9de61c | 2018-11-27 17:34:31 +0000 | [diff] [blame] | 282 | JitCodeCache(); |
| 283 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 284 | // Internal version of 'CommitCode' that will not retry if the |
| 285 | // allocation fails. Return null if the allocation fails. |
| 286 | uint8_t* CommitCodeInternal(Thread* self, |
Nicolas Geoffray | 7f7539b | 2019-06-06 16:20:54 +0100 | [diff] [blame] | 287 | JitMemoryRegion* region, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 288 | ArtMethod* method, |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 289 | const uint8_t* code, |
Nicolas Geoffray | b331feb | 2016-02-05 16:51:53 +0000 | [diff] [blame] | 290 | size_t code_size, |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 291 | const uint8_t* stack_map, |
| 292 | size_t stack_map_size, |
| 293 | uint8_t* roots_data, |
Vladimir Marko | ac3ac68 | 2018-09-20 11:01:43 +0100 | [diff] [blame] | 294 | const std::vector<Handle<mirror::Object>>& roots, |
Nicolas Geoffray | 00a37ff | 2019-06-20 14:27:22 +0100 | [diff] [blame] | 295 | bool osr, |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 296 | bool has_should_deoptimize_flag, |
| 297 | const ArenaSet<ArtMethod*>& cha_single_implementation_list) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 298 | REQUIRES(!Locks::jit_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 299 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 300 | |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 301 | ProfilingInfo* AddProfilingInfoInternal(Thread* self, |
| 302 | ArtMethod* method, |
| 303 | const std::vector<uint32_t>& entries) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 304 | REQUIRES(Locks::jit_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 305 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | 26705e2 | 2015-10-28 12:50:11 +0000 | [diff] [blame] | 306 | |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 307 | // If a collection is in progress, wait for it to finish. Must be called with the mutator lock. |
| 308 | // The non-mutator lock version should be used if possible. This method will release then |
| 309 | // re-acquire the mutator lock. |
| 310 | void WaitForPotentialCollectionToCompleteRunnable(Thread* self) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 311 | REQUIRES(Locks::jit_lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_); |
Alex Light | 33b7b5d | 2018-08-07 19:13:51 +0000 | [diff] [blame] | 312 | |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 313 | // If a collection is in progress, wait for it to finish. Return |
| 314 | // whether the thread actually waited. |
| 315 | bool WaitForPotentialCollectionToComplete(Thread* self) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 316 | REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::mutator_lock_); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 317 | |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 318 | // Remove CHA dependents and underlying allocations for entries in `method_headers`. |
| 319 | void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 320 | REQUIRES(!Locks::jit_lock_) |
Mingyao Yang | 063fc77 | 2016-08-02 11:02:54 -0700 | [diff] [blame] | 321 | REQUIRES(!Locks::cha_lock_); |
| 322 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 323 | // Removes method from the cache. The caller must ensure that all threads |
| 324 | // are suspended and the method should not be in any thread's stack. |
| 325 | bool RemoveMethodLocked(ArtMethod* method, bool release_memory) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 326 | REQUIRES(Locks::jit_lock_) |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 327 | REQUIRES(Locks::mutator_lock_); |
| 328 | |
Orion Hodson | 607624f | 2018-05-11 10:10:46 +0100 | [diff] [blame] | 329 | // Free code and data allocations for `code_ptr`. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 330 | void FreeCodeAndData(const void* code_ptr) REQUIRES(Locks::jit_lock_); |
Nicolas Geoffray | 1dad3f6 | 2015-10-23 14:59:54 +0100 | [diff] [blame] | 331 | |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 332 | // Number of bytes allocated in the code cache. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 333 | size_t CodeCacheSize() REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | b9f1af5 | 2018-11-16 10:30:29 +0000 | [diff] [blame] | 334 | |
| 335 | // Number of bytes allocated in the data cache. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 336 | size_t DataCacheSize() REQUIRES(!Locks::jit_lock_); |
Nicolas Geoffray | b9f1af5 | 2018-11-16 10:30:29 +0000 | [diff] [blame] | 337 | |
| 338 | // Number of bytes allocated in the code cache. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 339 | size_t CodeCacheSizeLocked() REQUIRES(Locks::jit_lock_); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 340 | |
| 341 | // Number of bytes allocated in the data cache. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 342 | size_t DataCacheSizeLocked() REQUIRES(Locks::jit_lock_); |
Nicolas Geoffray | a5891e8 | 2015-11-06 14:18:27 +0000 | [diff] [blame] | 343 | |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 344 | // Notify all waiting threads that a collection is done. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 345 | void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_); |
Nicolas Geoffray | 0a3be16 | 2015-11-18 11:15:22 +0000 | [diff] [blame] | 346 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 347 | // Return whether we should do a full collection given the current state of the cache. |
| 348 | bool ShouldDoFullCollection() |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 349 | REQUIRES(Locks::jit_lock_) |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 350 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 351 | |
Nicolas Geoffray | 3512244 | 2016-03-02 12:05:30 +0000 | [diff] [blame] | 352 | void DoCollection(Thread* self, bool collect_profiling_info) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 353 | REQUIRES(!Locks::jit_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 354 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 355 | |
Nicolas Geoffray | 9abb297 | 2016-03-04 14:32:59 +0000 | [diff] [blame] | 356 | void RemoveUnmarkedCode(Thread* self) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 357 | REQUIRES(!Locks::jit_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 358 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 359 | |
| 360 | void MarkCompiledCodeOnThreadStacks(Thread* self) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 361 | REQUIRES(!Locks::jit_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 362 | REQUIRES_SHARED(Locks::mutator_lock_); |
Nicolas Geoffray | 8d37250 | 2016-02-23 13:56:43 +0000 | [diff] [blame] | 363 | |
Nicolas Geoffray | b9f1af5 | 2018-11-16 10:30:29 +0000 | [diff] [blame] | 364 | CodeCacheBitmap* GetLiveBitmap() const { |
| 365 | return live_bitmap_.get(); |
| 366 | } |
| 367 | |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 368 | bool IsInZygoteDataSpace(const void* ptr) const { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 369 | return shared_region_.IsInDataSpace(ptr); |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 370 | } |
| 371 | |
| 372 | bool IsInZygoteExecSpace(const void* ptr) const { |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 373 | return shared_region_.IsInExecSpace(ptr); |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 374 | } |
| 375 | |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 376 | bool IsWeakAccessEnabled(Thread* self) const; |
| 377 | void WaitUntilInlineCacheAccessible(Thread* self) |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 378 | REQUIRES(!Locks::jit_lock_) |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 379 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 380 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 381 | class JniStubKey; |
| 382 | class JniStubData; |
| 383 | |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 384 | // Whether the GC allows accessing weaks in inline caches. Note that this |
| 385 | // is not used by the concurrent collector, which uses |
| 386 | // Thread::SetWeakRefAccessEnabled instead. |
| 387 | Atomic<bool> is_weak_access_enabled_; |
| 388 | |
| 389 | // Condition to wait on for accessing inline caches. |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 390 | ConditionVariable inline_cache_cond_ GUARDED_BY(Locks::jit_lock_); |
Nicolas Geoffray | e51ca8b | 2016-11-22 14:49:31 +0000 | [diff] [blame] | 391 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 392 | // -------------- JIT memory regions ------------------------------------- // |
| 393 | |
| 394 | // Shared region, inherited from the zygote. |
| 395 | JitMemoryRegion shared_region_; |
| 396 | |
| 397 | // Process's own region. |
| 398 | JitMemoryRegion private_region_; |
| 399 | |
| 400 | // -------------- Global JIT maps --------------------------------------- // |
| 401 | |
| 402 | // Holds compiled code associated with the shorty for a JNI stub. |
| 403 | SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(Locks::jit_lock_); |
| 404 | |
| 405 | // Holds compiled code associated to the ArtMethod. |
| 406 | SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(Locks::jit_lock_); |
| 407 | |
| 408 | // Holds osr compiled code associated to the ArtMethod. |
| 409 | SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(Locks::jit_lock_); |
| 410 | |
| 411 | // ProfilingInfo objects we have allocated. |
| 412 | std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_); |
| 413 | |
| 414 | // -------------- JIT GC related data structures ----------------------- // |
| 415 | |
| 416 | // Condition to wait on during collection. |
| 417 | ConditionVariable lock_cond_ GUARDED_BY(Locks::jit_lock_); |
| 418 | |
| 419 | // Whether there is a code cache collection in progress. |
| 420 | bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_); |
| 421 | |
| 422 | // Bitmap for collecting code and data. |
| 423 | std::unique_ptr<CodeCacheBitmap> live_bitmap_; |
| 424 | |
| 425 | // Whether the last collection round increased the code cache. |
| 426 | bool last_collection_increased_code_cache_ GUARDED_BY(Locks::jit_lock_); |
| 427 | |
| 428 | // Whether we can do garbage collection. Not 'const' as tests may override this. |
| 429 | bool garbage_collect_code_ GUARDED_BY(Locks::jit_lock_); |
| 430 | |
| 431 | // ---------------- JIT statistics -------------------------------------- // |
| 432 | |
| 433 | // Number of compilations done throughout the lifetime of the JIT. |
| 434 | size_t number_of_compilations_ GUARDED_BY(Locks::jit_lock_); |
| 435 | |
| 436 | // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT. |
| 437 | size_t number_of_osr_compilations_ GUARDED_BY(Locks::jit_lock_); |
| 438 | |
| 439 | // Number of code cache collections done throughout the lifetime of the JIT. |
| 440 | size_t number_of_collections_ GUARDED_BY(Locks::jit_lock_); |
| 441 | |
| 442 | // Histograms for keeping track of stack map size statistics. |
| 443 | Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(Locks::jit_lock_); |
| 444 | |
| 445 | // Histograms for keeping track of code size statistics. |
| 446 | Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(Locks::jit_lock_); |
| 447 | |
| 448 | // Histograms for keeping track of profiling info statistics. |
| 449 | Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(Locks::jit_lock_); |
Nicolas Geoffray | c9de61c | 2018-11-27 17:34:31 +0000 | [diff] [blame] | 450 | |
Vladimir Marko | 2196c65 | 2017-11-30 16:16:07 +0000 | [diff] [blame] | 451 | friend class art::JitJniStubTestHelper; |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 452 | friend class ScopedCodeCacheWrite; |
Nicolas Geoffray | ce9ed36 | 2018-11-29 03:19:28 +0000 | [diff] [blame] | 453 | friend class MarkCodeClosure; |
Calin Juravle | 016fcbe2 | 2018-05-03 19:47:35 -0700 | [diff] [blame] | 454 | |
Nicolas Geoffray | c9de61c | 2018-11-27 17:34:31 +0000 | [diff] [blame] | 455 | DISALLOW_COPY_AND_ASSIGN(JitCodeCache); |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 456 | }; |
| 457 | |
Mathieu Chartier | e5f13e5 | 2015-02-24 09:37:21 -0800 | [diff] [blame] | 458 | } // namespace jit |
| 459 | } // namespace art |
| 460 | |
| 461 | #endif // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_ |