blob: 3342e92a79952fde8db043ba65a4c11397b65231 [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit_code_cache.h"
18
19#include <sstream>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "art_method-inl.h"
Calin Juravle31f2c152015-10-23 17:56:15 +010022#include "base/time_utils.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010023#include "entrypoints/runtime_asm_entrypoints.h"
24#include "gc/accounting/bitmap-inl.h"
Nicolas Geoffray26705e22015-10-28 12:50:11 +000025#include "jit/profiling_info.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010026#include "linear_alloc.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080027#include "mem_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080028#include "oat_file-inl.h"
Nicolas Geoffray62623402015-10-28 19:15:05 +000029#include "scoped_thread_state_change.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010030#include "thread_list.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080031
32namespace art {
33namespace jit {
34
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010035static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
36static constexpr int kProtData = PROT_READ | PROT_WRITE;
37static constexpr int kProtCode = PROT_READ | PROT_EXEC;
38
39#define CHECKED_MPROTECT(memory, size, prot) \
40 do { \
41 int rc = mprotect(memory, size, prot); \
42 if (UNLIKELY(rc != 0)) { \
43 errno = rc; \
44 PLOG(FATAL) << "Failed to mprotect jit code cache"; \
45 } \
46 } while (false) \
47
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000048JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
49 size_t max_capacity,
50 std::string* error_msg) {
51 CHECK_GE(max_capacity, initial_capacity);
52 // We need to have 32 bit offsets from method headers in code cache which point to things
53 // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
54 // Ensure we're below 1 GB to be safe.
55 if (max_capacity > 1 * GB) {
56 std::ostringstream oss;
57 oss << "Maxium code cache capacity is limited to 1 GB, "
58 << PrettySize(max_capacity) << " is too big";
59 *error_msg = oss.str();
60 return nullptr;
61 }
62
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080063 std::string error_str;
64 // Map name specific for android_os_Debug.cpp accounting.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010065 MemMap* data_map = MemMap::MapAnonymous(
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000066 "data-code-cache", nullptr, max_capacity, kProtAll, false, false, &error_str);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010067 if (data_map == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080068 std::ostringstream oss;
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000069 oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080070 *error_msg = oss.str();
71 return nullptr;
72 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010073
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000074 // Align both capacities to page size, as that's the unit mspaces use.
75 initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
76 max_capacity = RoundDown(max_capacity, 2 * kPageSize);
77
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000078 // Data cache is 1 / 2 of the map.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010079 // TODO: Make this variable?
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000080 size_t data_size = max_capacity / 2;
81 size_t code_size = max_capacity - data_size;
82 DCHECK_EQ(code_size + data_size, max_capacity);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010083 uint8_t* divider = data_map->Begin() + data_size;
84
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010085 MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
86 if (code_map == nullptr) {
87 std::ostringstream oss;
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000088 oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010089 *error_msg = oss.str();
90 return nullptr;
91 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010092 DCHECK_EQ(code_map->Begin(), divider);
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000093 data_size = initial_capacity / 2;
94 code_size = initial_capacity - data_size;
95 DCHECK_EQ(code_size + data_size, initial_capacity);
96 return new JitCodeCache(code_map, data_map, code_size, data_size, max_capacity);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080097}
98
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000099JitCodeCache::JitCodeCache(MemMap* code_map,
100 MemMap* data_map,
101 size_t initial_code_capacity,
102 size_t initial_data_capacity,
103 size_t max_capacity)
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100104 : lock_("Jit code cache", kJitCodeCacheLock),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100105 lock_cond_("Jit code cache variable", lock_),
106 collection_in_progress_(false),
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100107 code_map_(code_map),
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000108 data_map_(data_map),
109 max_capacity_(max_capacity),
110 current_capacity_(initial_code_capacity + initial_data_capacity),
111 code_end_(initial_code_capacity),
112 data_end_(initial_data_capacity),
Calin Juravle31f2c152015-10-23 17:56:15 +0100113 has_done_one_collection_(false),
114 last_update_time_ns_(0) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100115
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000116 code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
117 data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100118
119 if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
120 PLOG(FATAL) << "create_mspace_with_base failed";
121 }
122
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000123 SetFootprintLimit(current_capacity_);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100124
125 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
126 CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100127
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000128 VLOG(jit) << "Created jit code cache: initial data size="
129 << PrettySize(initial_data_capacity)
130 << ", initial code size="
131 << PrettySize(initial_code_capacity);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800132}
133
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100134bool JitCodeCache::ContainsPc(const void* ptr) const {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100135 return code_map_->Begin() <= ptr && ptr < code_map_->End();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800136}
137
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000138bool JitCodeCache::ContainsMethod(ArtMethod* method) {
139 MutexLock mu(Thread::Current(), lock_);
140 for (auto& it : method_code_map_) {
141 if (it.second == method) {
142 return true;
143 }
144 }
145 return false;
146}
147
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100148class ScopedCodeCacheWrite {
149 public:
150 explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
151 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800152 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100153 ~ScopedCodeCacheWrite() {
154 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
155 }
156 private:
157 MemMap* const code_map_;
158
159 DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
160};
161
162uint8_t* JitCodeCache::CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100163 ArtMethod* method,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100164 const uint8_t* mapping_table,
165 const uint8_t* vmap_table,
166 const uint8_t* gc_map,
167 size_t frame_size_in_bytes,
168 size_t core_spill_mask,
169 size_t fp_spill_mask,
170 const uint8_t* code,
171 size_t code_size) {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100172 uint8_t* result = CommitCodeInternal(self,
173 method,
174 mapping_table,
175 vmap_table,
176 gc_map,
177 frame_size_in_bytes,
178 core_spill_mask,
179 fp_spill_mask,
180 code,
181 code_size);
182 if (result == nullptr) {
183 // Retry.
184 GarbageCollectCache(self);
185 result = CommitCodeInternal(self,
186 method,
187 mapping_table,
188 vmap_table,
189 gc_map,
190 frame_size_in_bytes,
191 core_spill_mask,
192 fp_spill_mask,
193 code,
194 code_size);
195 }
196 return result;
197}
198
199bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
200 bool in_collection = false;
201 while (collection_in_progress_) {
202 in_collection = true;
203 lock_cond_.Wait(self);
204 }
205 return in_collection;
206}
207
208static uintptr_t FromCodeToAllocation(const void* code) {
209 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
210 return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
211}
212
213void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
214 uintptr_t allocation = FromCodeToAllocation(code_ptr);
215 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
216 const uint8_t* data = method_header->GetNativeGcMap();
217 if (data != nullptr) {
218 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
219 }
220 data = method_header->GetMappingTable();
221 if (data != nullptr) {
222 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
223 }
224 // Use the offset directly to prevent sanity check that the method is
225 // compiled with optimizing.
226 // TODO(ngeoffray): Clean up.
227 if (method_header->vmap_table_offset_ != 0) {
228 data = method_header->code_ - method_header->vmap_table_offset_;
229 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
230 }
231 mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation));
232}
233
234void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
235 MutexLock mu(self, lock_);
236 // We do not check if a code cache GC is in progress, as this method comes
237 // with the classlinker_classes_lock_ held, and suspending ourselves could
238 // lead to a deadlock.
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000239 {
240 ScopedCodeCacheWrite scc(code_map_.get());
241 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
242 if (alloc.ContainsUnsafe(it->second)) {
243 FreeCode(it->first, it->second);
244 it = method_code_map_.erase(it);
245 } else {
246 ++it;
247 }
248 }
249 }
250 for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
251 ProfilingInfo* info = *it;
252 if (alloc.ContainsUnsafe(info->GetMethod())) {
253 info->GetMethod()->SetProfilingInfo(nullptr);
254 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
255 it = profiling_infos_.erase(it);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100256 } else {
257 ++it;
258 }
259 }
260}
261
262uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
263 ArtMethod* method,
264 const uint8_t* mapping_table,
265 const uint8_t* vmap_table,
266 const uint8_t* gc_map,
267 size_t frame_size_in_bytes,
268 size_t core_spill_mask,
269 size_t fp_spill_mask,
270 const uint8_t* code,
271 size_t code_size) {
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100272 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
273 // Ensure the header ends up at expected instruction alignment.
274 size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
275 size_t total_size = header_size + code_size;
276
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100277 OatQuickMethodHeader* method_header = nullptr;
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100278 uint8_t* code_ptr = nullptr;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100279 {
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000280 ScopedThreadSuspension sts(self, kSuspended);
281 MutexLock mu(self, lock_);
282 WaitForPotentialCollectionToComplete(self);
283 {
284 ScopedCodeCacheWrite scc(code_map_.get());
285 uint8_t* result = reinterpret_cast<uint8_t*>(
286 mspace_memalign(code_mspace_, alignment, total_size));
287 if (result == nullptr) {
288 return nullptr;
289 }
290 code_ptr = result + header_size;
291 DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
292
293 std::copy(code, code + code_size, code_ptr);
294 method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
295 new (method_header) OatQuickMethodHeader(
296 (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
297 (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
298 (gc_map == nullptr) ? 0 : code_ptr - gc_map,
299 frame_size_in_bytes,
300 core_spill_mask,
301 fp_spill_mask,
302 code_size);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100303 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100304
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000305 __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
306 reinterpret_cast<char*>(code_ptr + code_size));
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100307 }
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000308 // We need to update the entry point in the runnable state for the instrumentation.
309 {
310 MutexLock mu(self, lock_);
311 method_code_map_.Put(code_ptr, method);
312 Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
313 method, method_header->GetEntryPoint());
314 if (collection_in_progress_) {
315 // We need to update the live bitmap if there is a GC to ensure it sees this new
316 // code.
317 GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
318 }
Calin Juravle31f2c152015-10-23 17:56:15 +0100319 last_update_time_ns_ = NanoTime();
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000320 VLOG(jit)
321 << "JIT added "
322 << PrettyMethod(method) << "@" << method
323 << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
324 << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
325 << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
326 << reinterpret_cast<const void*>(method_header->GetEntryPoint() + method_header->code_size_);
327 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100328
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100329 return reinterpret_cast<uint8_t*>(method_header);
330}
331
332size_t JitCodeCache::CodeCacheSize() {
333 MutexLock mu(Thread::Current(), lock_);
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000334 return CodeCacheSizeLocked();
335}
336
337size_t JitCodeCache::CodeCacheSizeLocked() {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100338 size_t bytes_allocated = 0;
339 mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
340 return bytes_allocated;
341}
342
343size_t JitCodeCache::DataCacheSize() {
344 MutexLock mu(Thread::Current(), lock_);
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000345 return DataCacheSizeLocked();
346}
347
348size_t JitCodeCache::DataCacheSizeLocked() {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100349 size_t bytes_allocated = 0;
350 mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
351 return bytes_allocated;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800352}
353
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100354size_t JitCodeCache::NumberOfCompiledCode() {
355 MutexLock mu(Thread::Current(), lock_);
356 return method_code_map_.size();
357}
358
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000359void JitCodeCache::ClearData(Thread* self, void* data) {
360 MutexLock mu(self, lock_);
361 mspace_free(data_mspace_, data);
362}
363
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100364uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100365 size = RoundUp(size, sizeof(void*));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100366 uint8_t* result = nullptr;
367
368 {
369 ScopedThreadSuspension sts(self, kSuspended);
370 MutexLock mu(self, lock_);
371 WaitForPotentialCollectionToComplete(self);
372 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
373 }
374
375 if (result == nullptr) {
376 // Retry.
377 GarbageCollectCache(self);
378 ScopedThreadSuspension sts(self, kSuspended);
379 MutexLock mu(self, lock_);
380 WaitForPotentialCollectionToComplete(self);
381 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
382 }
383
384 return result;
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100385}
386
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800387uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100388 uint8_t* result = ReserveData(self, end - begin);
389 if (result == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800390 return nullptr; // Out of space in the data cache.
391 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100392 std::copy(begin, end, result);
393 return result;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800394}
395
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100396class MarkCodeVisitor FINAL : public StackVisitor {
397 public:
398 MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
399 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
400 code_cache_(code_cache_in),
401 bitmap_(code_cache_->GetLiveBitmap()) {}
402
403 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
404 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
405 if (method_header == nullptr) {
406 return true;
407 }
408 const void* code = method_header->GetCode();
409 if (code_cache_->ContainsPc(code)) {
410 // Use the atomic set version, as multiple threads are executing this code.
411 bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
412 }
413 return true;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800414 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100415
416 private:
417 JitCodeCache* const code_cache_;
418 CodeCacheBitmap* const bitmap_;
419};
420
421class MarkCodeClosure FINAL : public Closure {
422 public:
423 MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
424 : code_cache_(code_cache), barrier_(barrier) {}
425
426 void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
427 DCHECK(thread == Thread::Current() || thread->IsSuspended());
428 MarkCodeVisitor visitor(thread, code_cache_);
429 visitor.WalkStack();
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +0000430 if (kIsDebugBuild) {
431 // The stack walking code queries the side instrumentation stack if it
432 // sees an instrumentation exit pc, so the JIT code of methods in that stack
433 // must have been seen. We sanity check this below.
434 for (const instrumentation::InstrumentationStackFrame& frame
435 : *thread->GetInstrumentationStack()) {
436 // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in
437 // its stack frame, it is not the method owning return_pc_. We just pass null to
438 // LookupMethodHeader: the method is only checked against in debug builds.
439 OatQuickMethodHeader* method_header =
440 code_cache_->LookupMethodHeader(frame.return_pc_, nullptr);
441 if (method_header != nullptr) {
442 const void* code = method_header->GetCode();
443 CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
444 }
445 }
446 }
Mathieu Chartier10d25082015-10-28 18:36:09 -0700447 barrier_->Pass(Thread::Current());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800448 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100449
450 private:
451 JitCodeCache* const code_cache_;
452 Barrier* const barrier_;
453};
454
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000455void JitCodeCache::NotifyCollectionDone(Thread* self) {
456 collection_in_progress_ = false;
457 lock_cond_.Broadcast(self);
458}
459
460void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
461 size_t per_space_footprint = new_footprint / 2;
462 DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
463 DCHECK_EQ(per_space_footprint * 2, new_footprint);
464 mspace_set_footprint_limit(data_mspace_, per_space_footprint);
465 {
466 ScopedCodeCacheWrite scc(code_map_.get());
467 mspace_set_footprint_limit(code_mspace_, per_space_footprint);
468 }
469}
470
471bool JitCodeCache::IncreaseCodeCacheCapacity() {
472 if (current_capacity_ == max_capacity_) {
473 return false;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100474 }
475
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000476 // Double the capacity if we're below 1MB, or increase it by 1MB if
477 // we're above.
478 if (current_capacity_ < 1 * MB) {
479 current_capacity_ *= 2;
480 } else {
481 current_capacity_ += 1 * MB;
482 }
483 if (current_capacity_ > max_capacity_) {
484 current_capacity_ = max_capacity_;
485 }
486
487 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
488 LOG(INFO) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
489 }
490
491 SetFootprintLimit(current_capacity_);
492
493 return true;
494}
495
496void JitCodeCache::GarbageCollectCache(Thread* self) {
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000497 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100498
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000499 // Wait for an existing collection, or let everyone know we are starting one.
500 {
501 ScopedThreadSuspension sts(self, kSuspended);
502 MutexLock mu(self, lock_);
503 if (WaitForPotentialCollectionToComplete(self)) {
504 return;
505 } else {
506 collection_in_progress_ = true;
507 }
508 }
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000509
510 // Check if we just need to grow the capacity. If we don't, allocate the bitmap while
511 // we hold the lock.
512 {
513 MutexLock mu(self, lock_);
514 if (has_done_one_collection_ && IncreaseCodeCacheCapacity()) {
515 has_done_one_collection_ = false;
516 NotifyCollectionDone(self);
517 return;
518 } else {
519 live_bitmap_.reset(CodeCacheBitmap::Create(
520 "code-cache-bitmap",
521 reinterpret_cast<uintptr_t>(code_map_->Begin()),
522 reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2)));
523 }
524 }
525
526 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
527 LOG(INFO) << "Clearing code cache, code="
528 << PrettySize(CodeCacheSize())
529 << ", data=" << PrettySize(DataCacheSize());
530 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100531 // Walk over all compiled methods and set the entry points of these
532 // methods to interpreter.
533 {
534 MutexLock mu(self, lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100535 for (auto& it : method_code_map_) {
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000536 instrumentation->UpdateMethodsCode(it.second, GetQuickToInterpreterBridge());
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100537 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000538 for (ProfilingInfo* info : profiling_infos_) {
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100539 if (!info->IsMethodBeingCompiled()) {
540 info->GetMethod()->SetProfilingInfo(nullptr);
541 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000542 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100543 }
544
545 // Run a checkpoint on all threads to mark the JIT compiled code they are running.
546 {
547 Barrier barrier(0);
Nicolas Geoffray62623402015-10-28 19:15:05 +0000548 size_t threads_running_checkpoint = 0;
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000549 MarkCodeClosure closure(this, &barrier);
550 threads_running_checkpoint =
551 Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
552 // Now that we have run our checkpoint, move to a suspended state and wait
553 // for other threads to run the checkpoint.
554 ScopedThreadSuspension sts(self, kSuspended);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100555 if (threads_running_checkpoint != 0) {
556 barrier.Increment(self, threads_running_checkpoint);
557 }
558 }
559
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100560 {
561 MutexLock mu(self, lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000562 // Free unused compiled code, and restore the entry point of used compiled code.
563 {
564 ScopedCodeCacheWrite scc(code_map_.get());
565 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
566 const void* code_ptr = it->first;
567 ArtMethod* method = it->second;
568 uintptr_t allocation = FromCodeToAllocation(code_ptr);
569 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
570 if (GetLiveBitmap()->Test(allocation)) {
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000571 instrumentation->UpdateMethodsCode(method, method_header->GetEntryPoint());
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000572 ++it;
573 } else {
574 method->ClearCounter();
575 DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
576 FreeCode(code_ptr, method);
577 it = method_code_map_.erase(it);
578 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100579 }
580 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000581
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100582 void* data_mspace = data_mspace_;
583 // Free all profiling infos of methods that were not being compiled.
584 auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
585 [data_mspace] (ProfilingInfo* info) {
586 if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
587 mspace_free(data_mspace, reinterpret_cast<uint8_t*>(info));
588 return true;
589 }
590 return false;
591 });
592 profiling_infos_.erase(profiling_kept_end, profiling_infos_.end());
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000593
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000594 live_bitmap_.reset(nullptr);
595 has_done_one_collection_ = true;
596 NotifyCollectionDone(self);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100597 }
598
599 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
600 LOG(INFO) << "After clearing code cache, code="
601 << PrettySize(CodeCacheSize())
602 << ", data=" << PrettySize(DataCacheSize());
603 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800604}
605
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100606
607OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
608 static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
609 if (kRuntimeISA == kArm) {
610 // On Thumb-2, the pc is offset by one.
611 --pc;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800612 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100613 if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
614 return nullptr;
615 }
616
617 MutexLock mu(Thread::Current(), lock_);
618 if (method_code_map_.empty()) {
619 return nullptr;
620 }
621 auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
622 --it;
623
624 const void* code_ptr = it->first;
625 OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
626 if (!method_header->Contains(pc)) {
627 return nullptr;
628 }
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +0000629 if (kIsDebugBuild && method != nullptr) {
630 DCHECK_EQ(it->second, method)
631 << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
632 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100633 return method_header;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800634}
635
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000636ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
637 ArtMethod* method,
638 const std::vector<uint32_t>& entries,
639 bool retry_allocation) {
640 ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
641
642 if (info == nullptr && retry_allocation) {
643 GarbageCollectCache(self);
644 info = AddProfilingInfoInternal(self, method, entries);
645 }
646 return info;
647}
648
649ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
650 ArtMethod* method,
651 const std::vector<uint32_t>& entries) {
652 size_t profile_info_size = RoundUp(
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100653 sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(),
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000654 sizeof(void*));
655 ScopedThreadSuspension sts(self, kSuspended);
656 MutexLock mu(self, lock_);
657 WaitForPotentialCollectionToComplete(self);
658
659 // Check whether some other thread has concurrently created it.
660 ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
661 if (info != nullptr) {
662 return info;
663 }
664
665 uint8_t* data = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, profile_info_size));
666 if (data == nullptr) {
667 return nullptr;
668 }
669 info = new (data) ProfilingInfo(method, entries);
670 method->SetProfilingInfo(info);
671 profiling_infos_.push_back(info);
672 return info;
673}
674
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000675// NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
676// is already held.
677void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
678 if (code_mspace_ == mspace) {
679 size_t result = code_end_;
680 code_end_ += increment;
681 return reinterpret_cast<void*>(result + code_map_->Begin());
682 } else {
683 DCHECK_EQ(data_mspace_, mspace);
684 size_t result = data_end_;
685 data_end_ += increment;
686 return reinterpret_cast<void*>(result + data_map_->Begin());
687 }
688}
689
Calin Juravle31f2c152015-10-23 17:56:15 +0100690void JitCodeCache::GetCompiledArtMethods(const OatFile* oat_file,
691 std::set<ArtMethod*>& methods) {
692 MutexLock mu(Thread::Current(), lock_);
693 for (auto it : method_code_map_) {
694 if (it.second->GetDexFile()->GetOatDexFile()->GetOatFile() == oat_file) {
695 methods.insert(it.second);
696 }
697 }
698}
699
700uint64_t JitCodeCache::GetLastUpdateTimeNs() {
701 MutexLock mu(Thread::Current(), lock_);
702 return last_update_time_ns_;
703}
Nicolas Geoffray73be1e82015-09-17 15:22:56 +0100704
705bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self) {
706 if (ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
707 return false;
708 }
709 MutexLock mu(self, lock_);
710 ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
711 if (info == nullptr || info->IsMethodBeingCompiled()) {
712 return false;
713 }
714 info->SetIsMethodBeingCompiled(true);
715 return true;
716}
717
718void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED) {
719 ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
720 DCHECK(info->IsMethodBeingCompiled());
721 info->SetIsMethodBeingCompiled(false);
722}
723
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800724} // namespace jit
725} // namespace art