blob: 9790e3aa438d998249c2b4c6f8dd394136a206bb [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
18#define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
19
20#include "instrumentation.h"
21
22#include "atomic.h"
Mingyao Yang063fc772016-08-02 11:02:54 -070023#include "base/arena_containers.h"
Nicolas Geoffray933330a2016-03-16 14:20:06 +000024#include "base/histogram-inl.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080025#include "base/macros.h"
26#include "base/mutex.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010027#include "gc/accounting/bitmap.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080028#include "gc_root.h"
29#include "jni.h"
Calin Juravle99629622016-04-19 16:33:46 +010030#include "method_reference.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080031#include "oat_file.h"
Calin Juravle940eb0c2017-01-30 19:30:44 -080032#include "profile_compilation_info.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080033#include "safe_map.h"
34#include "thread_pool.h"
35
36namespace art {
37
Mathieu Chartiere401d142015-04-22 13:56:20 -070038class ArtMethod;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010039class LinearAlloc;
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +000040class InlineCache;
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070041class IsMarkedVisitor;
Andreas Gampe513061a2017-06-01 09:17:34 -070042class OatQuickMethodHeader;
Nicolas Geoffray26705e22015-10-28 12:50:11 +000043class ProfilingInfo;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080044
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080045namespace jit {
46
47class JitInstrumentationCache;
48
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000049// Alignment in bits that will suit all architectures.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010050static constexpr int kJitCodeAlignment = 16;
51using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
52
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080053class JitCodeCache {
54 public:
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000055 static constexpr size_t kMaxCapacity = 64 * MB;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010056 // Put the default to a very low amount for debug builds to stress the code cache
57 // collection.
Nicolas Geoffray7ca4b772016-02-23 13:52:01 +000058 static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;
Nicolas Geoffray65b83d82016-02-22 13:14:04 +000059
60 // By default, do not GC until reaching 256KB.
61 static constexpr size_t kReservedCapacity = kInitialCapacity * 4;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080062
Mathieu Chartierbce416f2015-03-23 12:37:35 -070063 // Create the code cache with a code + data capacity equal to "capacity", error message is passed
64 // in the out arg error_msg.
Nicolas Geoffraya25dce92016-01-12 16:41:10 +000065 static JitCodeCache* Create(size_t initial_capacity,
66 size_t max_capacity,
67 bool generate_debug_info,
68 std::string* error_msg);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080069
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010070 // Number of bytes allocated in the code cache.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010071 size_t CodeCacheSize() REQUIRES(!lock_);
72
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010073 // Number of bytes allocated in the data cache.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010074 size_t DataCacheSize() REQUIRES(!lock_);
75
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +000076 bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070077 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray73be1e82015-09-17 15:22:56 +010078 REQUIRES(!lock_);
79
Alex Lightdba61482016-12-21 08:20:29 -080080 void NotifyMethodRedefined(ArtMethod* method)
81 REQUIRES(Locks::mutator_lock_)
82 REQUIRES(!lock_);
83
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +000084 // Notify to the code cache that the compiler wants to use the
85 // profiling info of `method` to drive optimizations,
86 // and therefore ensure the returned profiling info object is not
87 // collected.
88 ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070089 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +000090 REQUIRES(!lock_);
91
buzbee454b3b62016-04-07 14:42:47 -070092 void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070093 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray73be1e82015-09-17 15:22:56 +010094 REQUIRES(!lock_);
95
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +000096 void DoneCompilerUse(ArtMethod* method, Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070097 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +000098 REQUIRES(!lock_);
99
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100100 // Allocate and write code and its metadata to the code cache.
Mingyao Yang063fc772016-08-02 11:02:54 -0700101 // `cha_single_implementation_list` needs to be registered via CHA (if it's
102 // still valid), since the compiled code still needs to be invalidated if the
103 // single-implementation assumptions are violated later. This needs to be done
104 // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
105 // guard elimination.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100106 uint8_t* CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100107 ArtMethod* method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000108 uint8_t* stack_map,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700109 uint8_t* method_info,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000110 uint8_t* roots_data,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100111 size_t frame_size_in_bytes,
112 size_t core_spill_mask,
113 size_t fp_spill_mask,
114 const uint8_t* code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000115 size_t code_size,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100116 size_t data_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000117 bool osr,
Mingyao Yang063fc772016-08-02 11:02:54 -0700118 Handle<mirror::ObjectArray<mirror::Object>> roots,
119 bool has_should_deoptimize_flag,
120 const ArenaSet<ArtMethod*>& cha_single_implementation_list)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700121 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100122 REQUIRES(!lock_);
123
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100124 // Return true if the code cache contains this pc.
125 bool ContainsPc(const void* pc) const;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800126
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000127 // Return true if the code cache contains this method.
128 bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
129
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000130 // Allocate a region of data that contain `size` bytes, and potentially space
131 // for storing `number_of_roots` roots. Returns null if there is no more room.
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000132 // Return the number of bytes allocated.
133 size_t ReserveData(Thread* self,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700134 size_t stack_map_size,
135 size_t method_info_size,
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000136 size_t number_of_roots,
137 ArtMethod* method,
138 uint8_t** stack_map_data,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700139 uint8_t** method_info_data,
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000140 uint8_t** roots_data)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700141 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100142 REQUIRES(!lock_);
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100143
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000144 // Clear data from the data portion of the code cache.
Nicolas Geoffrayf46501c2016-11-22 13:45:36 +0000145 void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700146 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000147 REQUIRES(!lock_);
148
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100149 CodeCacheBitmap* GetLiveBitmap() const {
150 return live_bitmap_.get();
151 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800152
Nicolas Geoffray35122442016-03-02 12:05:30 +0000153 // Return whether we should do a full collection given the current state of the cache.
154 bool ShouldDoFullCollection()
155 REQUIRES(lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700156 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray35122442016-03-02 12:05:30 +0000157
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100158 // Perform a collection on the code cache.
159 void GarbageCollectCache(Thread* self)
160 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700161 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100162
163 // Given the 'pc', try to find the JIT compiled code associated with it.
164 // Return null if 'pc' is not in the code cache. 'method' is passed for
165 // sanity check.
166 OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
167 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700168 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100169
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000170 OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
171 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700172 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000173
Orion Hodsoneced6922017-06-01 10:54:28 +0100174 // Removes method from the cache for testing purposes. The caller
175 // must ensure that all threads are suspended and the method should
176 // not be in any thread's stack.
177 bool RemoveMethod(ArtMethod* method, bool release_memory)
178 REQUIRES(!lock_)
179 REQUIRES(Locks::mutator_lock_);
180
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000181 // Remove all methods in our cache that were allocated by 'alloc'.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100182 void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
183 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700184 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800185
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000186 void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
187 REQUIRES(!lock_)
188 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000189
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000190 // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
191 // will collect and retry if the first allocation is unsuccessful.
192 ProfilingInfo* AddProfilingInfo(Thread* self,
193 ArtMethod* method,
194 const std::vector<uint32_t>& entries,
195 bool retry_allocation)
196 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700197 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000198
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000199 bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
200 return mspace == code_mspace_ || mspace == data_mspace_;
201 }
202
203 void* MoreCore(const void* mspace, intptr_t increment);
204
Calin Juravle99629622016-04-19 16:33:46 +0100205 // Adds to `methods` all profiled methods which are part of any of the given dex locations.
206 void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
Calin Juravle940eb0c2017-01-30 19:30:44 -0800207 std::vector<ProfileMethodInfo>& methods)
Calin Juravle31f2c152015-10-23 17:56:15 +0100208 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700209 REQUIRES_SHARED(Locks::mutator_lock_);
Calin Juravle31f2c152015-10-23 17:56:15 +0100210
Calin Juravle4d77b6a2015-12-01 18:38:09 +0000211 uint64_t GetLastUpdateTimeNs() const;
Calin Juravle31f2c152015-10-23 17:56:15 +0100212
Nicolas Geoffrayaee21562015-12-15 16:39:44 +0000213 size_t GetCurrentCapacity() REQUIRES(!lock_) {
214 MutexLock lock(Thread::Current(), lock_);
215 return current_capacity_;
216 }
217
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000218 size_t GetMemorySizeOfCodePointer(const void* ptr) REQUIRES(!lock_);
219
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +0000220 void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
221 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700222 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +0000223
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000224 void Dump(std::ostream& os) REQUIRES(!lock_);
225
Nicolas Geoffray71cd50f2016-04-14 15:00:33 +0100226 bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
227
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000228 void SweepRootTables(IsMarkedVisitor* visitor)
229 REQUIRES(!lock_)
230 REQUIRES_SHARED(Locks::mutator_lock_);
231
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000232 // The GC needs to disallow the reading of inline caches when it processes them,
233 // to avoid having a class being used while it is being deleted.
234 void AllowInlineCacheAccess() REQUIRES(!lock_);
235 void DisallowInlineCacheAccess() REQUIRES(!lock_);
236 void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
237
Alex Lightdba61482016-12-21 08:20:29 -0800238 // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
239 // 'new_method' since it is being made obsolete.
240 void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
241 REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
242
Nicolas Geoffray169722b2017-02-27 14:01:59 +0000243 // Dynamically change whether we want to garbage collect code. Should only be used
244 // by tests.
245 void SetGarbageCollectCode(bool value) {
246 garbage_collect_code_ = value;
247 }
248
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800249 private:
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000250 // Take ownership of maps.
251 JitCodeCache(MemMap* code_map,
252 MemMap* data_map,
253 size_t initial_code_capacity,
254 size_t initial_data_capacity,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000255 size_t max_capacity,
256 bool garbage_collect_code);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800257
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100258 // Internal version of 'CommitCode' that will not retry if the
259 // allocation fails. Return null if the allocation fails.
260 uint8_t* CommitCodeInternal(Thread* self,
261 ArtMethod* method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000262 uint8_t* stack_map,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700263 uint8_t* method_info,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000264 uint8_t* roots_data,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100265 size_t frame_size_in_bytes,
266 size_t core_spill_mask,
267 size_t fp_spill_mask,
268 const uint8_t* code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000269 size_t code_size,
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100270 size_t data_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000271 bool osr,
Mingyao Yang063fc772016-08-02 11:02:54 -0700272 Handle<mirror::ObjectArray<mirror::Object>> roots,
273 bool has_should_deoptimize_flag,
274 const ArenaSet<ArtMethod*>& cha_single_implementation_list)
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100275 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700276 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100277
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000278 ProfilingInfo* AddProfilingInfoInternal(Thread* self,
279 ArtMethod* method,
280 const std::vector<uint32_t>& entries)
Nicolas Geoffray1e7da9b2016-03-01 14:11:40 +0000281 REQUIRES(lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700282 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000283
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100284 // If a collection is in progress, wait for it to finish. Return
285 // whether the thread actually waited.
286 bool WaitForPotentialCollectionToComplete(Thread* self)
287 REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
288
Mingyao Yang063fc772016-08-02 11:02:54 -0700289 // Remove CHA dependents and underlying allocations for entries in `method_headers`.
290 void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
291 REQUIRES(!lock_)
292 REQUIRES(!Locks::cha_lock_);
293
294 // Free in the mspace allocations for `code_ptr`.
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100295 void FreeCode(const void* code_ptr) REQUIRES(lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100296
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000297 // Number of bytes allocated in the code cache.
298 size_t CodeCacheSizeLocked() REQUIRES(lock_);
299
300 // Number of bytes allocated in the data cache.
301 size_t DataCacheSizeLocked() REQUIRES(lock_);
302
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000303 // Notify all waiting threads that a collection is done.
304 void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
305
306 // Try to increase the current capacity of the code cache. Return whether we
307 // succeeded at doing so.
308 bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
309
310 // Set the footprint limit of the code cache.
311 void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
312
Nicolas Geoffray35122442016-03-02 12:05:30 +0000313 void DoCollection(Thread* self, bool collect_profiling_info)
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000314 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700315 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000316
Nicolas Geoffray9abb2972016-03-04 14:32:59 +0000317 void RemoveUnmarkedCode(Thread* self)
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000318 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700319 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000320
321 void MarkCompiledCodeOnThreadStacks(Thread* self)
322 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700323 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000324
Nicolas Geoffray35122442016-03-02 12:05:30 +0000325 bool CheckLiveCompiledCodeHasProfilingInfo()
Vladimir Markod1ee20f2017-08-17 09:21:16 +0000326 REQUIRES(lock_)
327 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray35122442016-03-02 12:05:30 +0000328
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100329 void FreeCode(uint8_t* code) REQUIRES(lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000330 uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
331 void FreeData(uint8_t* data) REQUIRES(lock_);
332 uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
333
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000334 bool IsWeakAccessEnabled(Thread* self) const;
335 void WaitUntilInlineCacheAccessible(Thread* self)
336 REQUIRES(!lock_)
337 REQUIRES_SHARED(Locks::mutator_lock_);
338
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100339 // Lock for guarding allocations, collections, and the method_code_map_.
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800340 Mutex lock_;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100341 // Condition to wait on during collection.
342 ConditionVariable lock_cond_ GUARDED_BY(lock_);
343 // Whether there is a code cache collection in progress.
344 bool collection_in_progress_ GUARDED_BY(lock_);
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100345 // Mem map which holds code.
346 std::unique_ptr<MemMap> code_map_;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100347 // Mem map which holds data (stack maps and profiling info).
348 std::unique_ptr<MemMap> data_map_;
349 // The opaque mspace for allocating code.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100350 void* code_mspace_ GUARDED_BY(lock_);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100351 // The opaque mspace for allocating data.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100352 void* data_mspace_ GUARDED_BY(lock_);
353 // Bitmap for collecting code and data.
354 std::unique_ptr<CodeCacheBitmap> live_bitmap_;
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100355 // Holds compiled code associated to the ArtMethod.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100356 SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
Orion Hodsondbd05fe2017-08-10 11:41:35 +0100357 // Holds osr compiled code associated to the ArtMethod.
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000358 SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000359 // ProfilingInfo objects we have allocated.
360 std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800361
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000362 // The maximum capacity in bytes this code cache can go to.
363 size_t max_capacity_ GUARDED_BY(lock_);
364
365 // The current capacity in bytes of the code cache.
366 size_t current_capacity_ GUARDED_BY(lock_);
367
368 // The current footprint in bytes of the code portion of the code cache.
369 size_t code_end_ GUARDED_BY(lock_);
370
371 // The current footprint in bytes of the data portion of the code cache.
372 size_t data_end_ GUARDED_BY(lock_);
373
Nicolas Geoffray35122442016-03-02 12:05:30 +0000374 // Whether the last collection round increased the code cache.
375 bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000376
Calin Juravle31f2c152015-10-23 17:56:15 +0100377 // Last time the the code_cache was updated.
Calin Juravle4d77b6a2015-12-01 18:38:09 +0000378 // It is atomic to avoid locking when reading it.
379 Atomic<uint64_t> last_update_time_ns_;
Calin Juravle31f2c152015-10-23 17:56:15 +0100380
Nicolas Geoffray169722b2017-02-27 14:01:59 +0000381 // Whether we can do garbage collection. Not 'const' as tests may override this.
382 bool garbage_collect_code_;
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000383
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +0000384 // The size in bytes of used memory for the data portion of the code cache.
385 size_t used_memory_for_data_ GUARDED_BY(lock_);
386
387 // The size in bytes of used memory for the code portion of the code cache.
388 size_t used_memory_for_code_ GUARDED_BY(lock_);
389
Nicolas Geoffray0a522232016-01-19 09:34:58 +0000390 // Number of compilations done throughout the lifetime of the JIT.
391 size_t number_of_compilations_ GUARDED_BY(lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000392
393 // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
Nicolas Geoffrayfcdd7292016-02-25 13:27:47 +0000394 size_t number_of_osr_compilations_ GUARDED_BY(lock_);
Nicolas Geoffray0a522232016-01-19 09:34:58 +0000395
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000396 // Number of code cache collections done throughout the lifetime of the JIT.
397 size_t number_of_collections_ GUARDED_BY(lock_);
398
Nicolas Geoffray933330a2016-03-16 14:20:06 +0000399 // Histograms for keeping track of stack map size statistics.
400 Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
401
402 // Histograms for keeping track of code size statistics.
403 Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
404
405 // Histograms for keeping track of profiling info statistics.
406 Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
407
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000408 // Whether the GC allows accessing weaks in inline caches. Note that this
409 // is not used by the concurrent collector, which uses
410 // Thread::SetWeakRefAccessEnabled instead.
411 Atomic<bool> is_weak_access_enabled_;
412
413 // Condition to wait on for accessing inline caches.
414 ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
415
Mathieu Chartier3130cdf2015-05-03 15:20:23 -0700416 DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800417};
418
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800419} // namespace jit
420} // namespace art
421
422#endif // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_