blob: 08eac0ec20c3a7678bf851bdfe3fb7c6972d69b3 [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit_code_cache.h"
18
19#include <sstream>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "art_method-inl.h"
Calin Juravle66f55232015-12-08 15:09:10 +000022#include "base/stl_util.h"
Calin Juravle31f2c152015-10-23 17:56:15 +010023#include "base/time_utils.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010024#include "entrypoints/runtime_asm_entrypoints.h"
25#include "gc/accounting/bitmap-inl.h"
Nicolas Geoffray26705e22015-10-28 12:50:11 +000026#include "jit/profiling_info.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010027#include "linear_alloc.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080028#include "mem_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080029#include "oat_file-inl.h"
Nicolas Geoffray62623402015-10-28 19:15:05 +000030#include "scoped_thread_state_change.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010031#include "thread_list.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080032
33namespace art {
34namespace jit {
35
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010036static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
37static constexpr int kProtData = PROT_READ | PROT_WRITE;
38static constexpr int kProtCode = PROT_READ | PROT_EXEC;
39
40#define CHECKED_MPROTECT(memory, size, prot) \
41 do { \
42 int rc = mprotect(memory, size, prot); \
43 if (UNLIKELY(rc != 0)) { \
44 errno = rc; \
45 PLOG(FATAL) << "Failed to mprotect jit code cache"; \
46 } \
47 } while (false) \
48
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000049JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
50 size_t max_capacity,
51 std::string* error_msg) {
52 CHECK_GE(max_capacity, initial_capacity);
53 // We need to have 32 bit offsets from method headers in code cache which point to things
54 // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
55 // Ensure we're below 1 GB to be safe.
56 if (max_capacity > 1 * GB) {
57 std::ostringstream oss;
58 oss << "Maxium code cache capacity is limited to 1 GB, "
59 << PrettySize(max_capacity) << " is too big";
60 *error_msg = oss.str();
61 return nullptr;
62 }
63
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080064 std::string error_str;
65 // Map name specific for android_os_Debug.cpp accounting.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010066 MemMap* data_map = MemMap::MapAnonymous(
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000067 "data-code-cache", nullptr, max_capacity, kProtAll, false, false, &error_str);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010068 if (data_map == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080069 std::ostringstream oss;
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000070 oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080071 *error_msg = oss.str();
72 return nullptr;
73 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010074
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000075 // Align both capacities to page size, as that's the unit mspaces use.
76 initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
77 max_capacity = RoundDown(max_capacity, 2 * kPageSize);
78
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000079 // Data cache is 1 / 2 of the map.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010080 // TODO: Make this variable?
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000081 size_t data_size = max_capacity / 2;
82 size_t code_size = max_capacity - data_size;
83 DCHECK_EQ(code_size + data_size, max_capacity);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010084 uint8_t* divider = data_map->Begin() + data_size;
85
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010086 MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
87 if (code_map == nullptr) {
88 std::ostringstream oss;
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000089 oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010090 *error_msg = oss.str();
91 return nullptr;
92 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010093 DCHECK_EQ(code_map->Begin(), divider);
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000094 data_size = initial_capacity / 2;
95 code_size = initial_capacity - data_size;
96 DCHECK_EQ(code_size + data_size, initial_capacity);
97 return new JitCodeCache(code_map, data_map, code_size, data_size, max_capacity);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080098}
99
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000100JitCodeCache::JitCodeCache(MemMap* code_map,
101 MemMap* data_map,
102 size_t initial_code_capacity,
103 size_t initial_data_capacity,
104 size_t max_capacity)
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100105 : lock_("Jit code cache", kJitCodeCacheLock),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100106 lock_cond_("Jit code cache variable", lock_),
107 collection_in_progress_(false),
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100108 code_map_(code_map),
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000109 data_map_(data_map),
110 max_capacity_(max_capacity),
111 current_capacity_(initial_code_capacity + initial_data_capacity),
112 code_end_(initial_code_capacity),
113 data_end_(initial_data_capacity),
Calin Juravle31f2c152015-10-23 17:56:15 +0100114 has_done_one_collection_(false),
115 last_update_time_ns_(0) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100116
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000117 code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
118 data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100119
120 if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
121 PLOG(FATAL) << "create_mspace_with_base failed";
122 }
123
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000124 SetFootprintLimit(current_capacity_);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100125
126 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
127 CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100128
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000129 VLOG(jit) << "Created jit code cache: initial data size="
130 << PrettySize(initial_data_capacity)
131 << ", initial code size="
132 << PrettySize(initial_code_capacity);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800133}
134
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100135bool JitCodeCache::ContainsPc(const void* ptr) const {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100136 return code_map_->Begin() <= ptr && ptr < code_map_->End();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800137}
138
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000139bool JitCodeCache::ContainsMethod(ArtMethod* method) {
140 MutexLock mu(Thread::Current(), lock_);
141 for (auto& it : method_code_map_) {
142 if (it.second == method) {
143 return true;
144 }
145 }
146 return false;
147}
148
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100149class ScopedCodeCacheWrite {
150 public:
151 explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
152 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800153 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100154 ~ScopedCodeCacheWrite() {
155 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
156 }
157 private:
158 MemMap* const code_map_;
159
160 DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
161};
162
163uint8_t* JitCodeCache::CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100164 ArtMethod* method,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100165 const uint8_t* mapping_table,
166 const uint8_t* vmap_table,
167 const uint8_t* gc_map,
168 size_t frame_size_in_bytes,
169 size_t core_spill_mask,
170 size_t fp_spill_mask,
171 const uint8_t* code,
172 size_t code_size) {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100173 uint8_t* result = CommitCodeInternal(self,
174 method,
175 mapping_table,
176 vmap_table,
177 gc_map,
178 frame_size_in_bytes,
179 core_spill_mask,
180 fp_spill_mask,
181 code,
182 code_size);
183 if (result == nullptr) {
184 // Retry.
185 GarbageCollectCache(self);
186 result = CommitCodeInternal(self,
187 method,
188 mapping_table,
189 vmap_table,
190 gc_map,
191 frame_size_in_bytes,
192 core_spill_mask,
193 fp_spill_mask,
194 code,
195 code_size);
196 }
197 return result;
198}
199
200bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
201 bool in_collection = false;
202 while (collection_in_progress_) {
203 in_collection = true;
204 lock_cond_.Wait(self);
205 }
206 return in_collection;
207}
208
209static uintptr_t FromCodeToAllocation(const void* code) {
210 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
211 return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
212}
213
214void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
215 uintptr_t allocation = FromCodeToAllocation(code_ptr);
216 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
217 const uint8_t* data = method_header->GetNativeGcMap();
218 if (data != nullptr) {
219 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
220 }
221 data = method_header->GetMappingTable();
222 if (data != nullptr) {
223 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
224 }
225 // Use the offset directly to prevent sanity check that the method is
226 // compiled with optimizing.
227 // TODO(ngeoffray): Clean up.
228 if (method_header->vmap_table_offset_ != 0) {
229 data = method_header->code_ - method_header->vmap_table_offset_;
230 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
231 }
232 mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation));
233}
234
235void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
236 MutexLock mu(self, lock_);
237 // We do not check if a code cache GC is in progress, as this method comes
238 // with the classlinker_classes_lock_ held, and suspending ourselves could
239 // lead to a deadlock.
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000240 {
241 ScopedCodeCacheWrite scc(code_map_.get());
242 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
243 if (alloc.ContainsUnsafe(it->second)) {
244 FreeCode(it->first, it->second);
245 it = method_code_map_.erase(it);
246 } else {
247 ++it;
248 }
249 }
250 }
251 for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
252 ProfilingInfo* info = *it;
253 if (alloc.ContainsUnsafe(info->GetMethod())) {
254 info->GetMethod()->SetProfilingInfo(nullptr);
255 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
256 it = profiling_infos_.erase(it);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100257 } else {
258 ++it;
259 }
260 }
261}
262
263uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
264 ArtMethod* method,
265 const uint8_t* mapping_table,
266 const uint8_t* vmap_table,
267 const uint8_t* gc_map,
268 size_t frame_size_in_bytes,
269 size_t core_spill_mask,
270 size_t fp_spill_mask,
271 const uint8_t* code,
272 size_t code_size) {
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100273 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
274 // Ensure the header ends up at expected instruction alignment.
275 size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
276 size_t total_size = header_size + code_size;
277
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100278 OatQuickMethodHeader* method_header = nullptr;
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100279 uint8_t* code_ptr = nullptr;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100280 {
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000281 ScopedThreadSuspension sts(self, kSuspended);
282 MutexLock mu(self, lock_);
283 WaitForPotentialCollectionToComplete(self);
284 {
285 ScopedCodeCacheWrite scc(code_map_.get());
286 uint8_t* result = reinterpret_cast<uint8_t*>(
287 mspace_memalign(code_mspace_, alignment, total_size));
288 if (result == nullptr) {
289 return nullptr;
290 }
291 code_ptr = result + header_size;
292 DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
293
294 std::copy(code, code + code_size, code_ptr);
295 method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
296 new (method_header) OatQuickMethodHeader(
297 (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
298 (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
299 (gc_map == nullptr) ? 0 : code_ptr - gc_map,
300 frame_size_in_bytes,
301 core_spill_mask,
302 fp_spill_mask,
303 code_size);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100304 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100305
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000306 __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
307 reinterpret_cast<char*>(code_ptr + code_size));
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100308 }
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000309 // We need to update the entry point in the runnable state for the instrumentation.
310 {
311 MutexLock mu(self, lock_);
312 method_code_map_.Put(code_ptr, method);
313 Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
314 method, method_header->GetEntryPoint());
315 if (collection_in_progress_) {
316 // We need to update the live bitmap if there is a GC to ensure it sees this new
317 // code.
318 GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
319 }
Calin Juravle4d77b6a2015-12-01 18:38:09 +0000320 last_update_time_ns_.StoreRelease(NanoTime());
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000321 VLOG(jit)
322 << "JIT added "
323 << PrettyMethod(method) << "@" << method
324 << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
325 << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
326 << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
327 << reinterpret_cast<const void*>(method_header->GetEntryPoint() + method_header->code_size_);
328 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100329
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100330 return reinterpret_cast<uint8_t*>(method_header);
331}
332
333size_t JitCodeCache::CodeCacheSize() {
334 MutexLock mu(Thread::Current(), lock_);
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000335 return CodeCacheSizeLocked();
336}
337
338size_t JitCodeCache::CodeCacheSizeLocked() {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100339 size_t bytes_allocated = 0;
340 mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
341 return bytes_allocated;
342}
343
344size_t JitCodeCache::DataCacheSize() {
345 MutexLock mu(Thread::Current(), lock_);
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000346 return DataCacheSizeLocked();
347}
348
349size_t JitCodeCache::DataCacheSizeLocked() {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100350 size_t bytes_allocated = 0;
351 mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
352 return bytes_allocated;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800353}
354
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100355size_t JitCodeCache::NumberOfCompiledCode() {
356 MutexLock mu(Thread::Current(), lock_);
357 return method_code_map_.size();
358}
359
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000360void JitCodeCache::ClearData(Thread* self, void* data) {
361 MutexLock mu(self, lock_);
362 mspace_free(data_mspace_, data);
363}
364
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100365uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100366 size = RoundUp(size, sizeof(void*));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100367 uint8_t* result = nullptr;
368
369 {
370 ScopedThreadSuspension sts(self, kSuspended);
371 MutexLock mu(self, lock_);
372 WaitForPotentialCollectionToComplete(self);
373 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
374 }
375
376 if (result == nullptr) {
377 // Retry.
378 GarbageCollectCache(self);
379 ScopedThreadSuspension sts(self, kSuspended);
380 MutexLock mu(self, lock_);
381 WaitForPotentialCollectionToComplete(self);
382 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
383 }
384
385 return result;
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100386}
387
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800388uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100389 uint8_t* result = ReserveData(self, end - begin);
390 if (result == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800391 return nullptr; // Out of space in the data cache.
392 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100393 std::copy(begin, end, result);
394 return result;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800395}
396
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100397class MarkCodeVisitor FINAL : public StackVisitor {
398 public:
399 MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
400 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
401 code_cache_(code_cache_in),
402 bitmap_(code_cache_->GetLiveBitmap()) {}
403
404 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
405 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
406 if (method_header == nullptr) {
407 return true;
408 }
409 const void* code = method_header->GetCode();
410 if (code_cache_->ContainsPc(code)) {
411 // Use the atomic set version, as multiple threads are executing this code.
412 bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
413 }
414 return true;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800415 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100416
417 private:
418 JitCodeCache* const code_cache_;
419 CodeCacheBitmap* const bitmap_;
420};
421
422class MarkCodeClosure FINAL : public Closure {
423 public:
424 MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
425 : code_cache_(code_cache), barrier_(barrier) {}
426
427 void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
428 DCHECK(thread == Thread::Current() || thread->IsSuspended());
429 MarkCodeVisitor visitor(thread, code_cache_);
430 visitor.WalkStack();
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +0000431 if (kIsDebugBuild) {
432 // The stack walking code queries the side instrumentation stack if it
433 // sees an instrumentation exit pc, so the JIT code of methods in that stack
434 // must have been seen. We sanity check this below.
435 for (const instrumentation::InstrumentationStackFrame& frame
436 : *thread->GetInstrumentationStack()) {
437 // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in
438 // its stack frame, it is not the method owning return_pc_. We just pass null to
439 // LookupMethodHeader: the method is only checked against in debug builds.
440 OatQuickMethodHeader* method_header =
441 code_cache_->LookupMethodHeader(frame.return_pc_, nullptr);
442 if (method_header != nullptr) {
443 const void* code = method_header->GetCode();
444 CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
445 }
446 }
447 }
Mathieu Chartier10d25082015-10-28 18:36:09 -0700448 barrier_->Pass(Thread::Current());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800449 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100450
451 private:
452 JitCodeCache* const code_cache_;
453 Barrier* const barrier_;
454};
455
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000456void JitCodeCache::NotifyCollectionDone(Thread* self) {
457 collection_in_progress_ = false;
458 lock_cond_.Broadcast(self);
459}
460
461void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
462 size_t per_space_footprint = new_footprint / 2;
463 DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
464 DCHECK_EQ(per_space_footprint * 2, new_footprint);
465 mspace_set_footprint_limit(data_mspace_, per_space_footprint);
466 {
467 ScopedCodeCacheWrite scc(code_map_.get());
468 mspace_set_footprint_limit(code_mspace_, per_space_footprint);
469 }
470}
471
472bool JitCodeCache::IncreaseCodeCacheCapacity() {
473 if (current_capacity_ == max_capacity_) {
474 return false;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100475 }
476
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000477 // Double the capacity if we're below 1MB, or increase it by 1MB if
478 // we're above.
479 if (current_capacity_ < 1 * MB) {
480 current_capacity_ *= 2;
481 } else {
482 current_capacity_ += 1 * MB;
483 }
484 if (current_capacity_ > max_capacity_) {
485 current_capacity_ = max_capacity_;
486 }
487
488 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
489 LOG(INFO) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
490 }
491
492 SetFootprintLimit(current_capacity_);
493
494 return true;
495}
496
497void JitCodeCache::GarbageCollectCache(Thread* self) {
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000498 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100499
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000500 // Wait for an existing collection, or let everyone know we are starting one.
501 {
502 ScopedThreadSuspension sts(self, kSuspended);
503 MutexLock mu(self, lock_);
504 if (WaitForPotentialCollectionToComplete(self)) {
505 return;
506 } else {
507 collection_in_progress_ = true;
508 }
509 }
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000510
511 // Check if we just need to grow the capacity. If we don't, allocate the bitmap while
512 // we hold the lock.
513 {
514 MutexLock mu(self, lock_);
515 if (has_done_one_collection_ && IncreaseCodeCacheCapacity()) {
516 has_done_one_collection_ = false;
517 NotifyCollectionDone(self);
518 return;
519 } else {
520 live_bitmap_.reset(CodeCacheBitmap::Create(
521 "code-cache-bitmap",
522 reinterpret_cast<uintptr_t>(code_map_->Begin()),
523 reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2)));
524 }
525 }
526
527 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
528 LOG(INFO) << "Clearing code cache, code="
529 << PrettySize(CodeCacheSize())
530 << ", data=" << PrettySize(DataCacheSize());
531 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100532 // Walk over all compiled methods and set the entry points of these
533 // methods to interpreter.
534 {
535 MutexLock mu(self, lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100536 for (auto& it : method_code_map_) {
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000537 instrumentation->UpdateMethodsCode(it.second, GetQuickToInterpreterBridge());
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100538 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000539 for (ProfilingInfo* info : profiling_infos_) {
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100540 if (!info->IsMethodBeingCompiled()) {
541 info->GetMethod()->SetProfilingInfo(nullptr);
542 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000543 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100544 }
545
546 // Run a checkpoint on all threads to mark the JIT compiled code they are running.
547 {
548 Barrier barrier(0);
Nicolas Geoffray62623402015-10-28 19:15:05 +0000549 size_t threads_running_checkpoint = 0;
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000550 MarkCodeClosure closure(this, &barrier);
551 threads_running_checkpoint =
552 Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
553 // Now that we have run our checkpoint, move to a suspended state and wait
554 // for other threads to run the checkpoint.
555 ScopedThreadSuspension sts(self, kSuspended);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100556 if (threads_running_checkpoint != 0) {
557 barrier.Increment(self, threads_running_checkpoint);
558 }
559 }
560
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100561 {
562 MutexLock mu(self, lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000563 // Free unused compiled code, and restore the entry point of used compiled code.
564 {
565 ScopedCodeCacheWrite scc(code_map_.get());
566 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
567 const void* code_ptr = it->first;
568 ArtMethod* method = it->second;
569 uintptr_t allocation = FromCodeToAllocation(code_ptr);
570 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
571 if (GetLiveBitmap()->Test(allocation)) {
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000572 instrumentation->UpdateMethodsCode(method, method_header->GetEntryPoint());
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000573 ++it;
574 } else {
575 method->ClearCounter();
576 DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
577 FreeCode(code_ptr, method);
578 it = method_code_map_.erase(it);
579 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100580 }
581 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000582
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100583 void* data_mspace = data_mspace_;
584 // Free all profiling infos of methods that were not being compiled.
585 auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
586 [data_mspace] (ProfilingInfo* info) {
587 if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
588 mspace_free(data_mspace, reinterpret_cast<uint8_t*>(info));
589 return true;
590 }
591 return false;
592 });
593 profiling_infos_.erase(profiling_kept_end, profiling_infos_.end());
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000594
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000595 live_bitmap_.reset(nullptr);
596 has_done_one_collection_ = true;
597 NotifyCollectionDone(self);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100598 }
599
600 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
601 LOG(INFO) << "After clearing code cache, code="
602 << PrettySize(CodeCacheSize())
603 << ", data=" << PrettySize(DataCacheSize());
604 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800605}
606
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100607
608OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
609 static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
610 if (kRuntimeISA == kArm) {
611 // On Thumb-2, the pc is offset by one.
612 --pc;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800613 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100614 if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
615 return nullptr;
616 }
617
618 MutexLock mu(Thread::Current(), lock_);
619 if (method_code_map_.empty()) {
620 return nullptr;
621 }
622 auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
623 --it;
624
625 const void* code_ptr = it->first;
626 OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
627 if (!method_header->Contains(pc)) {
628 return nullptr;
629 }
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +0000630 if (kIsDebugBuild && method != nullptr) {
631 DCHECK_EQ(it->second, method)
632 << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
633 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100634 return method_header;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800635}
636
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000637ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
638 ArtMethod* method,
639 const std::vector<uint32_t>& entries,
640 bool retry_allocation) {
641 ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
642
643 if (info == nullptr && retry_allocation) {
644 GarbageCollectCache(self);
645 info = AddProfilingInfoInternal(self, method, entries);
646 }
647 return info;
648}
649
650ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
651 ArtMethod* method,
652 const std::vector<uint32_t>& entries) {
653 size_t profile_info_size = RoundUp(
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100654 sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(),
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000655 sizeof(void*));
656 ScopedThreadSuspension sts(self, kSuspended);
657 MutexLock mu(self, lock_);
658 WaitForPotentialCollectionToComplete(self);
659
660 // Check whether some other thread has concurrently created it.
661 ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
662 if (info != nullptr) {
663 return info;
664 }
665
666 uint8_t* data = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, profile_info_size));
667 if (data == nullptr) {
668 return nullptr;
669 }
670 info = new (data) ProfilingInfo(method, entries);
671 method->SetProfilingInfo(info);
672 profiling_infos_.push_back(info);
673 return info;
674}
675
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000676// NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
677// is already held.
678void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
679 if (code_mspace_ == mspace) {
680 size_t result = code_end_;
681 code_end_ += increment;
682 return reinterpret_cast<void*>(result + code_map_->Begin());
683 } else {
684 DCHECK_EQ(data_mspace_, mspace);
685 size_t result = data_end_;
686 data_end_ += increment;
687 return reinterpret_cast<void*>(result + data_map_->Begin());
688 }
689}
690
Calin Juravle66f55232015-12-08 15:09:10 +0000691void JitCodeCache::GetCompiledArtMethods(const std::set<const std::string>& dex_base_locations,
Calin Juravle4d77b6a2015-12-01 18:38:09 +0000692 std::vector<ArtMethod*>& methods) {
Calin Juravle31f2c152015-10-23 17:56:15 +0100693 MutexLock mu(Thread::Current(), lock_);
694 for (auto it : method_code_map_) {
Calin Juravle66f55232015-12-08 15:09:10 +0000695 if (ContainsElement(dex_base_locations, it.second->GetDexFile()->GetBaseLocation())) {
Calin Juravle4d77b6a2015-12-01 18:38:09 +0000696 methods.push_back(it.second);
Calin Juravle31f2c152015-10-23 17:56:15 +0100697 }
698 }
699}
700
Calin Juravle4d77b6a2015-12-01 18:38:09 +0000701uint64_t JitCodeCache::GetLastUpdateTimeNs() const {
702 return last_update_time_ns_.LoadAcquire();
Calin Juravle31f2c152015-10-23 17:56:15 +0100703}
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100704
705bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self) {
706 if (ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
707 return false;
708 }
709 MutexLock mu(self, lock_);
710 ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
711 if (info == nullptr || info->IsMethodBeingCompiled()) {
712 return false;
713 }
714 info->SetIsMethodBeingCompiled(true);
715 return true;
716}
717
718void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED) {
719 ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
720 DCHECK(info->IsMethodBeingCompiled());
721 info->SetIsMethodBeingCompiled(false);
722}
723
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800724} // namespace jit
725} // namespace art