blob: a291a094308bf355899986fada8895e4f0913621 [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit_code_cache.h"
18
19#include <sstream>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "art_method-inl.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010022#include "entrypoints/runtime_asm_entrypoints.h"
23#include "gc/accounting/bitmap-inl.h"
Nicolas Geoffray26705e22015-10-28 12:50:11 +000024#include "jit/profiling_info.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010025#include "linear_alloc.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080026#include "mem_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080027#include "oat_file-inl.h"
Nicolas Geoffray62623402015-10-28 19:15:05 +000028#include "scoped_thread_state_change.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010029#include "thread_list.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080030
31namespace art {
32namespace jit {
33
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010034static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
35static constexpr int kProtData = PROT_READ | PROT_WRITE;
36static constexpr int kProtCode = PROT_READ | PROT_EXEC;
37
38#define CHECKED_MPROTECT(memory, size, prot) \
39 do { \
40 int rc = mprotect(memory, size, prot); \
41 if (UNLIKELY(rc != 0)) { \
42 errno = rc; \
43 PLOG(FATAL) << "Failed to mprotect jit code cache"; \
44 } \
45 } while (false) \
46
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080047JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
48 CHECK_GT(capacity, 0U);
49 CHECK_LT(capacity, kMaxCapacity);
50 std::string error_str;
51 // Map name specific for android_os_Debug.cpp accounting.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010052 MemMap* data_map = MemMap::MapAnonymous(
53 "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str);
54 if (data_map == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080055 std::ostringstream oss;
56 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
57 *error_msg = oss.str();
58 return nullptr;
59 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010060
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000061 // Data cache is 1 / 2 of the map.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010062 // TODO: Make this variable?
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000063 size_t data_size = RoundUp(data_map->Size() / 2, kPageSize);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010064 size_t code_size = data_map->Size() - data_size;
65 uint8_t* divider = data_map->Begin() + data_size;
66
67 // We need to have 32 bit offsets from method headers in code cache which point to things
68 // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
69 MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
70 if (code_map == nullptr) {
71 std::ostringstream oss;
72 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
73 *error_msg = oss.str();
74 return nullptr;
75 }
76 DCHECK_EQ(code_map->Size(), code_size);
77 DCHECK_EQ(code_map->Begin(), divider);
78 return new JitCodeCache(code_map, data_map);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080079}
80
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010081JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
82 : lock_("Jit code cache", kJitCodeCacheLock),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010083 lock_cond_("Jit code cache variable", lock_),
84 collection_in_progress_(false),
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010085 code_map_(code_map),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010086 data_map_(data_map) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010087
88 code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
89 data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
90
91 if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
92 PLOG(FATAL) << "create_mspace_with_base failed";
93 }
94
95 // Prevent morecore requests from the mspace.
96 mspace_set_footprint_limit(code_mspace_, code_map_->Size());
97 mspace_set_footprint_limit(data_mspace_, data_map_->Size());
98
99 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
100 CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100101
102 live_bitmap_.reset(CodeCacheBitmap::Create("code-cache-bitmap",
103 reinterpret_cast<uintptr_t>(code_map_->Begin()),
104 reinterpret_cast<uintptr_t>(code_map_->End())));
105
106 if (live_bitmap_.get() == nullptr) {
107 PLOG(FATAL) << "creating bitmaps for the JIT code cache failed";
108 }
109
110 VLOG(jit) << "Created jit code cache: data size="
111 << PrettySize(data_map_->Size())
112 << ", code size="
113 << PrettySize(code_map_->Size());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800114}
115
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100116bool JitCodeCache::ContainsPc(const void* ptr) const {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100117 return code_map_->Begin() <= ptr && ptr < code_map_->End();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800118}
119
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000120bool JitCodeCache::ContainsMethod(ArtMethod* method) {
121 MutexLock mu(Thread::Current(), lock_);
122 for (auto& it : method_code_map_) {
123 if (it.second == method) {
124 return true;
125 }
126 }
127 return false;
128}
129
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100130class ScopedCodeCacheWrite {
131 public:
132 explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
133 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800134 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100135 ~ScopedCodeCacheWrite() {
136 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
137 }
138 private:
139 MemMap* const code_map_;
140
141 DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
142};
143
144uint8_t* JitCodeCache::CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100145 ArtMethod* method,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100146 const uint8_t* mapping_table,
147 const uint8_t* vmap_table,
148 const uint8_t* gc_map,
149 size_t frame_size_in_bytes,
150 size_t core_spill_mask,
151 size_t fp_spill_mask,
152 const uint8_t* code,
153 size_t code_size) {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100154 uint8_t* result = CommitCodeInternal(self,
155 method,
156 mapping_table,
157 vmap_table,
158 gc_map,
159 frame_size_in_bytes,
160 core_spill_mask,
161 fp_spill_mask,
162 code,
163 code_size);
164 if (result == nullptr) {
165 // Retry.
166 GarbageCollectCache(self);
167 result = CommitCodeInternal(self,
168 method,
169 mapping_table,
170 vmap_table,
171 gc_map,
172 frame_size_in_bytes,
173 core_spill_mask,
174 fp_spill_mask,
175 code,
176 code_size);
177 }
178 return result;
179}
180
181bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
182 bool in_collection = false;
183 while (collection_in_progress_) {
184 in_collection = true;
185 lock_cond_.Wait(self);
186 }
187 return in_collection;
188}
189
190static uintptr_t FromCodeToAllocation(const void* code) {
191 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
192 return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
193}
194
195void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
196 uintptr_t allocation = FromCodeToAllocation(code_ptr);
197 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
198 const uint8_t* data = method_header->GetNativeGcMap();
199 if (data != nullptr) {
200 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
201 }
202 data = method_header->GetMappingTable();
203 if (data != nullptr) {
204 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
205 }
206 // Use the offset directly to prevent sanity check that the method is
207 // compiled with optimizing.
208 // TODO(ngeoffray): Clean up.
209 if (method_header->vmap_table_offset_ != 0) {
210 data = method_header->code_ - method_header->vmap_table_offset_;
211 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
212 }
213 mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation));
214}
215
216void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
217 MutexLock mu(self, lock_);
218 // We do not check if a code cache GC is in progress, as this method comes
219 // with the classlinker_classes_lock_ held, and suspending ourselves could
220 // lead to a deadlock.
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000221 {
222 ScopedCodeCacheWrite scc(code_map_.get());
223 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
224 if (alloc.ContainsUnsafe(it->second)) {
225 FreeCode(it->first, it->second);
226 it = method_code_map_.erase(it);
227 } else {
228 ++it;
229 }
230 }
231 }
232 for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
233 ProfilingInfo* info = *it;
234 if (alloc.ContainsUnsafe(info->GetMethod())) {
235 info->GetMethod()->SetProfilingInfo(nullptr);
236 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
237 it = profiling_infos_.erase(it);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100238 } else {
239 ++it;
240 }
241 }
242}
243
244uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
245 ArtMethod* method,
246 const uint8_t* mapping_table,
247 const uint8_t* vmap_table,
248 const uint8_t* gc_map,
249 size_t frame_size_in_bytes,
250 size_t core_spill_mask,
251 size_t fp_spill_mask,
252 const uint8_t* code,
253 size_t code_size) {
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100254 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
255 // Ensure the header ends up at expected instruction alignment.
256 size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
257 size_t total_size = header_size + code_size;
258
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100259 OatQuickMethodHeader* method_header = nullptr;
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100260 uint8_t* code_ptr = nullptr;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100261 {
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000262 ScopedThreadSuspension sts(self, kSuspended);
263 MutexLock mu(self, lock_);
264 WaitForPotentialCollectionToComplete(self);
265 {
266 ScopedCodeCacheWrite scc(code_map_.get());
267 uint8_t* result = reinterpret_cast<uint8_t*>(
268 mspace_memalign(code_mspace_, alignment, total_size));
269 if (result == nullptr) {
270 return nullptr;
271 }
272 code_ptr = result + header_size;
273 DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
274
275 std::copy(code, code + code_size, code_ptr);
276 method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
277 new (method_header) OatQuickMethodHeader(
278 (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
279 (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
280 (gc_map == nullptr) ? 0 : code_ptr - gc_map,
281 frame_size_in_bytes,
282 core_spill_mask,
283 fp_spill_mask,
284 code_size);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100285 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100286
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000287 __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
288 reinterpret_cast<char*>(code_ptr + code_size));
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100289 }
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000290 // We need to update the entry point in the runnable state for the instrumentation.
291 {
292 MutexLock mu(self, lock_);
293 method_code_map_.Put(code_ptr, method);
294 Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
295 method, method_header->GetEntryPoint());
296 if (collection_in_progress_) {
297 // We need to update the live bitmap if there is a GC to ensure it sees this new
298 // code.
299 GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
300 }
301 VLOG(jit)
302 << "JIT added "
303 << PrettyMethod(method) << "@" << method
304 << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
305 << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
306 << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
307 << reinterpret_cast<const void*>(method_header->GetEntryPoint() + method_header->code_size_);
308 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100309
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100310 return reinterpret_cast<uint8_t*>(method_header);
311}
312
313size_t JitCodeCache::CodeCacheSize() {
314 MutexLock mu(Thread::Current(), lock_);
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000315 return CodeCacheSizeLocked();
316}
317
318size_t JitCodeCache::CodeCacheSizeLocked() {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100319 size_t bytes_allocated = 0;
320 mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
321 return bytes_allocated;
322}
323
324size_t JitCodeCache::DataCacheSize() {
325 MutexLock mu(Thread::Current(), lock_);
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000326 return DataCacheSizeLocked();
327}
328
329size_t JitCodeCache::DataCacheSizeLocked() {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100330 size_t bytes_allocated = 0;
331 mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
332 return bytes_allocated;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800333}
334
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100335size_t JitCodeCache::NumberOfCompiledCode() {
336 MutexLock mu(Thread::Current(), lock_);
337 return method_code_map_.size();
338}
339
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000340void JitCodeCache::ClearData(Thread* self, void* data) {
341 MutexLock mu(self, lock_);
342 mspace_free(data_mspace_, data);
343}
344
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100345uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100346 size = RoundUp(size, sizeof(void*));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100347 uint8_t* result = nullptr;
348
349 {
350 ScopedThreadSuspension sts(self, kSuspended);
351 MutexLock mu(self, lock_);
352 WaitForPotentialCollectionToComplete(self);
353 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
354 }
355
356 if (result == nullptr) {
357 // Retry.
358 GarbageCollectCache(self);
359 ScopedThreadSuspension sts(self, kSuspended);
360 MutexLock mu(self, lock_);
361 WaitForPotentialCollectionToComplete(self);
362 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
363 }
364
365 return result;
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100366}
367
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800368uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100369 uint8_t* result = ReserveData(self, end - begin);
370 if (result == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800371 return nullptr; // Out of space in the data cache.
372 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100373 std::copy(begin, end, result);
374 return result;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800375}
376
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100377class MarkCodeVisitor FINAL : public StackVisitor {
378 public:
379 MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
380 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
381 code_cache_(code_cache_in),
382 bitmap_(code_cache_->GetLiveBitmap()) {}
383
384 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
385 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
386 if (method_header == nullptr) {
387 return true;
388 }
389 const void* code = method_header->GetCode();
390 if (code_cache_->ContainsPc(code)) {
391 // Use the atomic set version, as multiple threads are executing this code.
392 bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
393 }
394 return true;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800395 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100396
397 private:
398 JitCodeCache* const code_cache_;
399 CodeCacheBitmap* const bitmap_;
400};
401
402class MarkCodeClosure FINAL : public Closure {
403 public:
404 MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
405 : code_cache_(code_cache), barrier_(barrier) {}
406
407 void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
408 DCHECK(thread == Thread::Current() || thread->IsSuspended());
409 MarkCodeVisitor visitor(thread, code_cache_);
410 visitor.WalkStack();
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +0000411 if (kIsDebugBuild) {
412 // The stack walking code queries the side instrumentation stack if it
413 // sees an instrumentation exit pc, so the JIT code of methods in that stack
414 // must have been seen. We sanity check this below.
415 for (const instrumentation::InstrumentationStackFrame& frame
416 : *thread->GetInstrumentationStack()) {
417 // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in
418 // its stack frame, it is not the method owning return_pc_. We just pass null to
419 // LookupMethodHeader: the method is only checked against in debug builds.
420 OatQuickMethodHeader* method_header =
421 code_cache_->LookupMethodHeader(frame.return_pc_, nullptr);
422 if (method_header != nullptr) {
423 const void* code = method_header->GetCode();
424 CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
425 }
426 }
427 }
Mathieu Chartier10d25082015-10-28 18:36:09 -0700428 barrier_->Pass(Thread::Current());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800429 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100430
431 private:
432 JitCodeCache* const code_cache_;
433 Barrier* const barrier_;
434};
435
436void JitCodeCache::GarbageCollectCache(Thread* self) {
437 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
438 LOG(INFO) << "Clearing code cache, code="
439 << PrettySize(CodeCacheSize())
440 << ", data=" << PrettySize(DataCacheSize());
441 }
442
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000443 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100444
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000445 // Wait for an existing collection, or let everyone know we are starting one.
446 {
447 ScopedThreadSuspension sts(self, kSuspended);
448 MutexLock mu(self, lock_);
449 if (WaitForPotentialCollectionToComplete(self)) {
450 return;
451 } else {
452 collection_in_progress_ = true;
453 }
454 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100455 // Walk over all compiled methods and set the entry points of these
456 // methods to interpreter.
457 {
458 MutexLock mu(self, lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100459 for (auto& it : method_code_map_) {
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000460 instrumentation->UpdateMethodsCode(it.second, GetQuickToInterpreterBridge());
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100461 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000462 for (ProfilingInfo* info : profiling_infos_) {
463 info->GetMethod()->SetProfilingInfo(nullptr);
464 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100465 }
466
467 // Run a checkpoint on all threads to mark the JIT compiled code they are running.
468 {
469 Barrier barrier(0);
Nicolas Geoffray62623402015-10-28 19:15:05 +0000470 size_t threads_running_checkpoint = 0;
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000471 MarkCodeClosure closure(this, &barrier);
472 threads_running_checkpoint =
473 Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
474 // Now that we have run our checkpoint, move to a suspended state and wait
475 // for other threads to run the checkpoint.
476 ScopedThreadSuspension sts(self, kSuspended);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100477 if (threads_running_checkpoint != 0) {
478 barrier.Increment(self, threads_running_checkpoint);
479 }
480 }
481
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100482 {
483 MutexLock mu(self, lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000484 // Free unused compiled code, and restore the entry point of used compiled code.
485 {
486 ScopedCodeCacheWrite scc(code_map_.get());
487 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
488 const void* code_ptr = it->first;
489 ArtMethod* method = it->second;
490 uintptr_t allocation = FromCodeToAllocation(code_ptr);
491 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
492 if (GetLiveBitmap()->Test(allocation)) {
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000493 instrumentation->UpdateMethodsCode(method, method_header->GetEntryPoint());
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000494 ++it;
495 } else {
496 method->ClearCounter();
497 DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
498 FreeCode(code_ptr, method);
499 it = method_code_map_.erase(it);
500 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100501 }
502 }
503 GetLiveBitmap()->Bitmap::Clear();
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000504
505 // Free all profiling info.
506 for (ProfilingInfo* info : profiling_infos_) {
507 DCHECK(info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr);
508 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
509 }
510 profiling_infos_.clear();
511
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100512 collection_in_progress_ = false;
513 lock_cond_.Broadcast(self);
514 }
515
516 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
517 LOG(INFO) << "After clearing code cache, code="
518 << PrettySize(CodeCacheSize())
519 << ", data=" << PrettySize(DataCacheSize());
520 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800521}
522
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100523
524OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
525 static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
526 if (kRuntimeISA == kArm) {
527 // On Thumb-2, the pc is offset by one.
528 --pc;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800529 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100530 if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
531 return nullptr;
532 }
533
534 MutexLock mu(Thread::Current(), lock_);
535 if (method_code_map_.empty()) {
536 return nullptr;
537 }
538 auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
539 --it;
540
541 const void* code_ptr = it->first;
542 OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
543 if (!method_header->Contains(pc)) {
544 return nullptr;
545 }
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +0000546 if (kIsDebugBuild && method != nullptr) {
547 DCHECK_EQ(it->second, method)
548 << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
549 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100550 return method_header;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800551}
552
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000553ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
554 ArtMethod* method,
555 const std::vector<uint32_t>& entries,
556 bool retry_allocation) {
557 ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
558
559 if (info == nullptr && retry_allocation) {
560 GarbageCollectCache(self);
561 info = AddProfilingInfoInternal(self, method, entries);
562 }
563 return info;
564}
565
566ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
567 ArtMethod* method,
568 const std::vector<uint32_t>& entries) {
569 size_t profile_info_size = RoundUp(
570 sizeof(ProfilingInfo) + sizeof(ProfilingInfo::InlineCache) * entries.size(),
571 sizeof(void*));
572 ScopedThreadSuspension sts(self, kSuspended);
573 MutexLock mu(self, lock_);
574 WaitForPotentialCollectionToComplete(self);
575
576 // Check whether some other thread has concurrently created it.
577 ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
578 if (info != nullptr) {
579 return info;
580 }
581
582 uint8_t* data = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, profile_info_size));
583 if (data == nullptr) {
584 return nullptr;
585 }
586 info = new (data) ProfilingInfo(method, entries);
587 method->SetProfilingInfo(info);
588 profiling_infos_.push_back(info);
589 return info;
590}
591
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800592} // namespace jit
593} // namespace art