blob: 46a408590bd8640fe3260def31e11c4ca55cf4c4 [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
18#define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
19
20#include "instrumentation.h"
21
22#include "atomic.h"
Mingyao Yang063fc772016-08-02 11:02:54 -070023#include "base/arena_containers.h"
Nicolas Geoffray933330a2016-03-16 14:20:06 +000024#include "base/histogram-inl.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080025#include "base/macros.h"
26#include "base/mutex.h"
27#include "gc_root.h"
Calin Juravle99629622016-04-19 16:33:46 +010028#include "method_reference.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080029#include "safe_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080030
31namespace art {
32
Mathieu Chartiere401d142015-04-22 13:56:20 -070033class ArtMethod;
Vladimir Markob0b68cf2017-11-14 18:11:50 +000034template<class T> class Handle;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010035class LinearAlloc;
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +000036class InlineCache;
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070037class IsMarkedVisitor;
Andreas Gampe513061a2017-06-01 09:17:34 -070038class OatQuickMethodHeader;
Vladimir Markob0b68cf2017-11-14 18:11:50 +000039struct ProfileMethodInfo;
Nicolas Geoffray26705e22015-10-28 12:50:11 +000040class ProfilingInfo;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080041
Vladimir Markob0b68cf2017-11-14 18:11:50 +000042namespace gc {
43namespace accounting {
44template<size_t kAlignment> class MemoryRangeBitmap;
45} // namespace accounting
46} // namespace gc
47
48namespace mirror {
49class Class;
50class Object;
51template<class T> class ObjectArray;
52} // namespace mirror
53
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080054namespace jit {
55
56class JitInstrumentationCache;
57
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000058// Alignment in bits that will suit all architectures.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010059static constexpr int kJitCodeAlignment = 16;
60using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
61
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080062class JitCodeCache {
63 public:
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000064 static constexpr size_t kMaxCapacity = 64 * MB;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010065 // Put the default to a very low amount for debug builds to stress the code cache
66 // collection.
Nicolas Geoffray7ca4b772016-02-23 13:52:01 +000067 static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;
Nicolas Geoffray65b83d82016-02-22 13:14:04 +000068
69 // By default, do not GC until reaching 256KB.
70 static constexpr size_t kReservedCapacity = kInitialCapacity * 4;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080071
Mathieu Chartierbce416f2015-03-23 12:37:35 -070072 // Create the code cache with a code + data capacity equal to "capacity", error message is passed
73 // in the out arg error_msg.
Nicolas Geoffraya25dce92016-01-12 16:41:10 +000074 static JitCodeCache* Create(size_t initial_capacity,
75 size_t max_capacity,
76 bool generate_debug_info,
77 std::string* error_msg);
Vladimir Markob0b68cf2017-11-14 18:11:50 +000078 ~JitCodeCache();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080079
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010080 // Number of bytes allocated in the code cache.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010081 size_t CodeCacheSize() REQUIRES(!lock_);
82
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010083 // Number of bytes allocated in the data cache.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010084 size_t DataCacheSize() REQUIRES(!lock_);
85
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +000086 bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070087 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray73be1e82015-09-17 15:22:56 +010088 REQUIRES(!lock_);
89
Alex Lightdba61482016-12-21 08:20:29 -080090 void NotifyMethodRedefined(ArtMethod* method)
91 REQUIRES(Locks::mutator_lock_)
92 REQUIRES(!lock_);
93
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +000094 // Notify to the code cache that the compiler wants to use the
95 // profiling info of `method` to drive optimizations,
96 // and therefore ensure the returned profiling info object is not
97 // collected.
98 ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070099 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000100 REQUIRES(!lock_);
101
buzbee454b3b62016-04-07 14:42:47 -0700102 void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700103 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100104 REQUIRES(!lock_);
105
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +0000106 void DoneCompilerUse(ArtMethod* method, Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700107 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000108 REQUIRES(!lock_);
109
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100110 // Allocate and write code and its metadata to the code cache.
Mingyao Yang063fc772016-08-02 11:02:54 -0700111 // `cha_single_implementation_list` needs to be registered via CHA (if it's
112 // still valid), since the compiled code still needs to be invalidated if the
113 // single-implementation assumptions are violated later. This needs to be done
114 // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
115 // guard elimination.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100116 uint8_t* CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100117 ArtMethod* method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000118 uint8_t* stack_map,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700119 uint8_t* method_info,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000120 uint8_t* roots_data,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100121 size_t frame_size_in_bytes,
122 size_t core_spill_mask,
123 size_t fp_spill_mask,
124 const uint8_t* code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000125 size_t code_size,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100126 size_t data_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000127 bool osr,
Mingyao Yang063fc772016-08-02 11:02:54 -0700128 Handle<mirror::ObjectArray<mirror::Object>> roots,
129 bool has_should_deoptimize_flag,
130 const ArenaSet<ArtMethod*>& cha_single_implementation_list)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700131 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100132 REQUIRES(!lock_);
133
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100134 // Return true if the code cache contains this pc.
135 bool ContainsPc(const void* pc) const;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800136
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000137 // Return true if the code cache contains this method.
138 bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
139
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000140 // Allocate a region of data that contain `size` bytes, and potentially space
141 // for storing `number_of_roots` roots. Returns null if there is no more room.
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000142 // Return the number of bytes allocated.
143 size_t ReserveData(Thread* self,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700144 size_t stack_map_size,
145 size_t method_info_size,
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000146 size_t number_of_roots,
147 ArtMethod* method,
148 uint8_t** stack_map_data,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700149 uint8_t** method_info_data,
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000150 uint8_t** roots_data)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700151 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100152 REQUIRES(!lock_);
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100153
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000154 // Clear data from the data portion of the code cache.
Nicolas Geoffrayf46501c2016-11-22 13:45:36 +0000155 void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700156 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000157 REQUIRES(!lock_);
158
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100159 CodeCacheBitmap* GetLiveBitmap() const {
160 return live_bitmap_.get();
161 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800162
Nicolas Geoffray35122442016-03-02 12:05:30 +0000163 // Return whether we should do a full collection given the current state of the cache.
164 bool ShouldDoFullCollection()
165 REQUIRES(lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700166 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray35122442016-03-02 12:05:30 +0000167
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100168 // Perform a collection on the code cache.
169 void GarbageCollectCache(Thread* self)
170 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700171 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100172
173 // Given the 'pc', try to find the JIT compiled code associated with it.
174 // Return null if 'pc' is not in the code cache. 'method' is passed for
175 // sanity check.
176 OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
177 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700178 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100179
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000180 OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
181 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700182 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000183
Orion Hodsoneced6922017-06-01 10:54:28 +0100184 // Removes method from the cache for testing purposes. The caller
185 // must ensure that all threads are suspended and the method should
186 // not be in any thread's stack.
187 bool RemoveMethod(ArtMethod* method, bool release_memory)
188 REQUIRES(!lock_)
189 REQUIRES(Locks::mutator_lock_);
190
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000191 // Remove all methods in our cache that were allocated by 'alloc'.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100192 void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
193 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700194 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800195
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000196 void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
197 REQUIRES(!lock_)
198 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000199
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000200 // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
201 // will collect and retry if the first allocation is unsuccessful.
202 ProfilingInfo* AddProfilingInfo(Thread* self,
203 ArtMethod* method,
204 const std::vector<uint32_t>& entries,
205 bool retry_allocation)
206 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700207 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000208
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000209 bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
210 return mspace == code_mspace_ || mspace == data_mspace_;
211 }
212
213 void* MoreCore(const void* mspace, intptr_t increment);
214
Calin Juravle99629622016-04-19 16:33:46 +0100215 // Adds to `methods` all profiled methods which are part of any of the given dex locations.
216 void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
Calin Juravle940eb0c2017-01-30 19:30:44 -0800217 std::vector<ProfileMethodInfo>& methods)
Calin Juravle31f2c152015-10-23 17:56:15 +0100218 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700219 REQUIRES_SHARED(Locks::mutator_lock_);
Calin Juravle31f2c152015-10-23 17:56:15 +0100220
Calin Juravle4d77b6a2015-12-01 18:38:09 +0000221 uint64_t GetLastUpdateTimeNs() const;
Calin Juravle31f2c152015-10-23 17:56:15 +0100222
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000223 size_t GetMemorySizeOfCodePointer(const void* ptr) REQUIRES(!lock_);
224
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +0000225 void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
226 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700227 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +0000228
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000229 void Dump(std::ostream& os) REQUIRES(!lock_);
230
Nicolas Geoffray71cd50f2016-04-14 15:00:33 +0100231 bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
232
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000233 void SweepRootTables(IsMarkedVisitor* visitor)
234 REQUIRES(!lock_)
235 REQUIRES_SHARED(Locks::mutator_lock_);
236
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000237 // The GC needs to disallow the reading of inline caches when it processes them,
238 // to avoid having a class being used while it is being deleted.
239 void AllowInlineCacheAccess() REQUIRES(!lock_);
240 void DisallowInlineCacheAccess() REQUIRES(!lock_);
241 void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
242
Alex Lightdba61482016-12-21 08:20:29 -0800243 // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
244 // 'new_method' since it is being made obsolete.
245 void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
246 REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
247
Nicolas Geoffray169722b2017-02-27 14:01:59 +0000248 // Dynamically change whether we want to garbage collect code. Should only be used
249 // by tests.
250 void SetGarbageCollectCode(bool value) {
251 garbage_collect_code_ = value;
252 }
253
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800254 private:
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000255 // Take ownership of maps.
256 JitCodeCache(MemMap* code_map,
257 MemMap* data_map,
258 size_t initial_code_capacity,
259 size_t initial_data_capacity,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000260 size_t max_capacity,
261 bool garbage_collect_code);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800262
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100263 // Internal version of 'CommitCode' that will not retry if the
264 // allocation fails. Return null if the allocation fails.
265 uint8_t* CommitCodeInternal(Thread* self,
266 ArtMethod* method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000267 uint8_t* stack_map,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700268 uint8_t* method_info,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000269 uint8_t* roots_data,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100270 size_t frame_size_in_bytes,
271 size_t core_spill_mask,
272 size_t fp_spill_mask,
273 const uint8_t* code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000274 size_t code_size,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100275 size_t data_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000276 bool osr,
Mingyao Yang063fc772016-08-02 11:02:54 -0700277 Handle<mirror::ObjectArray<mirror::Object>> roots,
278 bool has_should_deoptimize_flag,
279 const ArenaSet<ArtMethod*>& cha_single_implementation_list)
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100280 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700281 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100282
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000283 ProfilingInfo* AddProfilingInfoInternal(Thread* self,
284 ArtMethod* method,
285 const std::vector<uint32_t>& entries)
Nicolas Geoffray1e7da9b2016-03-01 14:11:40 +0000286 REQUIRES(lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700287 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000288
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100289 // If a collection is in progress, wait for it to finish. Return
290 // whether the thread actually waited.
291 bool WaitForPotentialCollectionToComplete(Thread* self)
292 REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
293
Mingyao Yang063fc772016-08-02 11:02:54 -0700294 // Remove CHA dependents and underlying allocations for entries in `method_headers`.
295 void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
296 REQUIRES(!lock_)
297 REQUIRES(!Locks::cha_lock_);
298
299 // Free in the mspace allocations for `code_ptr`.
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100300 void FreeCode(const void* code_ptr) REQUIRES(lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100301
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000302 // Number of bytes allocated in the code cache.
303 size_t CodeCacheSizeLocked() REQUIRES(lock_);
304
305 // Number of bytes allocated in the data cache.
306 size_t DataCacheSizeLocked() REQUIRES(lock_);
307
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000308 // Notify all waiting threads that a collection is done.
309 void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
310
311 // Try to increase the current capacity of the code cache. Return whether we
312 // succeeded at doing so.
313 bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
314
315 // Set the footprint limit of the code cache.
316 void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
317
Nicolas Geoffray35122442016-03-02 12:05:30 +0000318 void DoCollection(Thread* self, bool collect_profiling_info)
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000319 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700320 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000321
Nicolas Geoffray9abb2972016-03-04 14:32:59 +0000322 void RemoveUnmarkedCode(Thread* self)
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000323 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700324 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000325
326 void MarkCompiledCodeOnThreadStacks(Thread* self)
327 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700328 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000329
Nicolas Geoffray35122442016-03-02 12:05:30 +0000330 bool CheckLiveCompiledCodeHasProfilingInfo()
Vladimir Markod1ee20f2017-08-17 09:21:16 +0000331 REQUIRES(lock_)
332 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray35122442016-03-02 12:05:30 +0000333
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100334 void FreeCode(uint8_t* code) REQUIRES(lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000335 uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
336 void FreeData(uint8_t* data) REQUIRES(lock_);
337 uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
338
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000339 bool IsWeakAccessEnabled(Thread* self) const;
340 void WaitUntilInlineCacheAccessible(Thread* self)
341 REQUIRES(!lock_)
342 REQUIRES_SHARED(Locks::mutator_lock_);
343
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100344 // Lock for guarding allocations, collections, and the method_code_map_.
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800345 Mutex lock_;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100346 // Condition to wait on during collection.
347 ConditionVariable lock_cond_ GUARDED_BY(lock_);
348 // Whether there is a code cache collection in progress.
349 bool collection_in_progress_ GUARDED_BY(lock_);
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100350 // Mem map which holds code.
351 std::unique_ptr<MemMap> code_map_;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100352 // Mem map which holds data (stack maps and profiling info).
353 std::unique_ptr<MemMap> data_map_;
354 // The opaque mspace for allocating code.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100355 void* code_mspace_ GUARDED_BY(lock_);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100356 // The opaque mspace for allocating data.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100357 void* data_mspace_ GUARDED_BY(lock_);
358 // Bitmap for collecting code and data.
359 std::unique_ptr<CodeCacheBitmap> live_bitmap_;
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100360 // Holds compiled code associated to the ArtMethod.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100361 SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100362 // Holds osr compiled code associated to the ArtMethod.
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000363 SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000364 // ProfilingInfo objects we have allocated.
365 std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800366
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000367 // The maximum capacity in bytes this code cache can go to.
368 size_t max_capacity_ GUARDED_BY(lock_);
369
370 // The current capacity in bytes of the code cache.
371 size_t current_capacity_ GUARDED_BY(lock_);
372
373 // The current footprint in bytes of the code portion of the code cache.
374 size_t code_end_ GUARDED_BY(lock_);
375
376 // The current footprint in bytes of the data portion of the code cache.
377 size_t data_end_ GUARDED_BY(lock_);
378
Nicolas Geoffray35122442016-03-02 12:05:30 +0000379 // Whether the last collection round increased the code cache.
380 bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000381
Calin Juravle31f2c152015-10-23 17:56:15 +0100382 // Last time the the code_cache was updated.
Calin Juravle4d77b6a2015-12-01 18:38:09 +0000383 // It is atomic to avoid locking when reading it.
384 Atomic<uint64_t> last_update_time_ns_;
Calin Juravle31f2c152015-10-23 17:56:15 +0100385
Nicolas Geoffray169722b2017-02-27 14:01:59 +0000386 // Whether we can do garbage collection. Not 'const' as tests may override this.
387 bool garbage_collect_code_;
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000388
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +0000389 // The size in bytes of used memory for the data portion of the code cache.
390 size_t used_memory_for_data_ GUARDED_BY(lock_);
391
392 // The size in bytes of used memory for the code portion of the code cache.
393 size_t used_memory_for_code_ GUARDED_BY(lock_);
394
Nicolas Geoffray0a522232016-01-19 09:34:58 +0000395 // Number of compilations done throughout the lifetime of the JIT.
396 size_t number_of_compilations_ GUARDED_BY(lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000397
398 // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
Nicolas Geoffrayfcdd7292016-02-25 13:27:47 +0000399 size_t number_of_osr_compilations_ GUARDED_BY(lock_);
Nicolas Geoffray0a522232016-01-19 09:34:58 +0000400
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000401 // Number of code cache collections done throughout the lifetime of the JIT.
402 size_t number_of_collections_ GUARDED_BY(lock_);
403
Nicolas Geoffray933330a2016-03-16 14:20:06 +0000404 // Histograms for keeping track of stack map size statistics.
405 Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
406
407 // Histograms for keeping track of code size statistics.
408 Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
409
410 // Histograms for keeping track of profiling info statistics.
411 Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
412
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000413 // Whether the GC allows accessing weaks in inline caches. Note that this
414 // is not used by the concurrent collector, which uses
415 // Thread::SetWeakRefAccessEnabled instead.
416 Atomic<bool> is_weak_access_enabled_;
417
418 // Condition to wait on for accessing inline caches.
419 ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
420
Mathieu Chartier3130cdf2015-05-03 15:20:23 -0700421 DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800422};
423
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800424} // namespace jit
425} // namespace art
426
427#endif // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_