blob: fbcba1b8815664af833cb9e49ea12c618b015cde [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit_code_cache.h"
18
19#include <sstream>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "art_method-inl.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010022#include "entrypoints/runtime_asm_entrypoints.h"
23#include "gc/accounting/bitmap-inl.h"
Nicolas Geoffray26705e22015-10-28 12:50:11 +000024#include "jit/profiling_info.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010025#include "linear_alloc.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080026#include "mem_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080027#include "oat_file-inl.h"
Nicolas Geoffray62623402015-10-28 19:15:05 +000028#include "scoped_thread_state_change.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010029#include "thread_list.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080030
31namespace art {
32namespace jit {
33
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010034static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
35static constexpr int kProtData = PROT_READ | PROT_WRITE;
36static constexpr int kProtCode = PROT_READ | PROT_EXEC;
37
38#define CHECKED_MPROTECT(memory, size, prot) \
39 do { \
40 int rc = mprotect(memory, size, prot); \
41 if (UNLIKELY(rc != 0)) { \
42 errno = rc; \
43 PLOG(FATAL) << "Failed to mprotect jit code cache"; \
44 } \
45 } while (false) \
46
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080047JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
48 CHECK_GT(capacity, 0U);
49 CHECK_LT(capacity, kMaxCapacity);
50 std::string error_str;
51 // Map name specific for android_os_Debug.cpp accounting.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010052 MemMap* data_map = MemMap::MapAnonymous(
53 "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str);
54 if (data_map == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080055 std::ostringstream oss;
56 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
57 *error_msg = oss.str();
58 return nullptr;
59 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010060
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000061 // Data cache is 1 / 2 of the map.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010062 // TODO: Make this variable?
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000063 size_t data_size = RoundUp(data_map->Size() / 2, kPageSize);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010064 size_t code_size = data_map->Size() - data_size;
65 uint8_t* divider = data_map->Begin() + data_size;
66
67 // We need to have 32 bit offsets from method headers in code cache which point to things
68 // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
69 MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
70 if (code_map == nullptr) {
71 std::ostringstream oss;
72 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
73 *error_msg = oss.str();
74 return nullptr;
75 }
76 DCHECK_EQ(code_map->Size(), code_size);
77 DCHECK_EQ(code_map->Begin(), divider);
78 return new JitCodeCache(code_map, data_map);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080079}
80
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010081JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
82 : lock_("Jit code cache", kJitCodeCacheLock),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010083 lock_cond_("Jit code cache variable", lock_),
84 collection_in_progress_(false),
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010085 code_map_(code_map),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010086 data_map_(data_map) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010087
88 code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
89 data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
90
91 if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
92 PLOG(FATAL) << "create_mspace_with_base failed";
93 }
94
95 // Prevent morecore requests from the mspace.
96 mspace_set_footprint_limit(code_mspace_, code_map_->Size());
97 mspace_set_footprint_limit(data_mspace_, data_map_->Size());
98
99 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
100 CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100101
102 live_bitmap_.reset(CodeCacheBitmap::Create("code-cache-bitmap",
103 reinterpret_cast<uintptr_t>(code_map_->Begin()),
104 reinterpret_cast<uintptr_t>(code_map_->End())));
105
106 if (live_bitmap_.get() == nullptr) {
107 PLOG(FATAL) << "creating bitmaps for the JIT code cache failed";
108 }
109
110 VLOG(jit) << "Created jit code cache: data size="
111 << PrettySize(data_map_->Size())
112 << ", code size="
113 << PrettySize(code_map_->Size());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800114}
115
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100116bool JitCodeCache::ContainsPc(const void* ptr) const {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100117 return code_map_->Begin() <= ptr && ptr < code_map_->End();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800118}
119
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100120class ScopedCodeCacheWrite {
121 public:
122 explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
123 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800124 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100125 ~ScopedCodeCacheWrite() {
126 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
127 }
128 private:
129 MemMap* const code_map_;
130
131 DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
132};
133
134uint8_t* JitCodeCache::CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100135 ArtMethod* method,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100136 const uint8_t* mapping_table,
137 const uint8_t* vmap_table,
138 const uint8_t* gc_map,
139 size_t frame_size_in_bytes,
140 size_t core_spill_mask,
141 size_t fp_spill_mask,
142 const uint8_t* code,
143 size_t code_size) {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100144 uint8_t* result = CommitCodeInternal(self,
145 method,
146 mapping_table,
147 vmap_table,
148 gc_map,
149 frame_size_in_bytes,
150 core_spill_mask,
151 fp_spill_mask,
152 code,
153 code_size);
154 if (result == nullptr) {
155 // Retry.
156 GarbageCollectCache(self);
157 result = CommitCodeInternal(self,
158 method,
159 mapping_table,
160 vmap_table,
161 gc_map,
162 frame_size_in_bytes,
163 core_spill_mask,
164 fp_spill_mask,
165 code,
166 code_size);
167 }
168 return result;
169}
170
171bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
172 bool in_collection = false;
173 while (collection_in_progress_) {
174 in_collection = true;
175 lock_cond_.Wait(self);
176 }
177 return in_collection;
178}
179
180static uintptr_t FromCodeToAllocation(const void* code) {
181 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
182 return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
183}
184
185void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
186 uintptr_t allocation = FromCodeToAllocation(code_ptr);
187 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
188 const uint8_t* data = method_header->GetNativeGcMap();
189 if (data != nullptr) {
190 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
191 }
192 data = method_header->GetMappingTable();
193 if (data != nullptr) {
194 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
195 }
196 // Use the offset directly to prevent sanity check that the method is
197 // compiled with optimizing.
198 // TODO(ngeoffray): Clean up.
199 if (method_header->vmap_table_offset_ != 0) {
200 data = method_header->code_ - method_header->vmap_table_offset_;
201 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
202 }
203 mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation));
204}
205
206void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
207 MutexLock mu(self, lock_);
208 // We do not check if a code cache GC is in progress, as this method comes
209 // with the classlinker_classes_lock_ held, and suspending ourselves could
210 // lead to a deadlock.
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000211 {
212 ScopedCodeCacheWrite scc(code_map_.get());
213 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
214 if (alloc.ContainsUnsafe(it->second)) {
215 FreeCode(it->first, it->second);
216 it = method_code_map_.erase(it);
217 } else {
218 ++it;
219 }
220 }
221 }
222 for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
223 ProfilingInfo* info = *it;
224 if (alloc.ContainsUnsafe(info->GetMethod())) {
225 info->GetMethod()->SetProfilingInfo(nullptr);
226 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
227 it = profiling_infos_.erase(it);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100228 } else {
229 ++it;
230 }
231 }
232}
233
234uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
235 ArtMethod* method,
236 const uint8_t* mapping_table,
237 const uint8_t* vmap_table,
238 const uint8_t* gc_map,
239 size_t frame_size_in_bytes,
240 size_t core_spill_mask,
241 size_t fp_spill_mask,
242 const uint8_t* code,
243 size_t code_size) {
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100244 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
245 // Ensure the header ends up at expected instruction alignment.
246 size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
247 size_t total_size = header_size + code_size;
248
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100249 OatQuickMethodHeader* method_header = nullptr;
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100250 uint8_t* code_ptr = nullptr;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100251 {
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000252 ScopedThreadSuspension sts(self, kSuspended);
253 MutexLock mu(self, lock_);
254 WaitForPotentialCollectionToComplete(self);
255 {
256 ScopedCodeCacheWrite scc(code_map_.get());
257 uint8_t* result = reinterpret_cast<uint8_t*>(
258 mspace_memalign(code_mspace_, alignment, total_size));
259 if (result == nullptr) {
260 return nullptr;
261 }
262 code_ptr = result + header_size;
263 DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
264
265 std::copy(code, code + code_size, code_ptr);
266 method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
267 new (method_header) OatQuickMethodHeader(
268 (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
269 (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
270 (gc_map == nullptr) ? 0 : code_ptr - gc_map,
271 frame_size_in_bytes,
272 core_spill_mask,
273 fp_spill_mask,
274 code_size);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100275 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100276
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000277 __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
278 reinterpret_cast<char*>(code_ptr + code_size));
279 method_code_map_.Put(code_ptr, method);
280 // We have checked there was no collection in progress earlier. If we
281 // were, setting the entry point of a method would be unsafe, as the collection
282 // could delete it.
283 DCHECK(!collection_in_progress_);
284 method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100285 }
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000286 VLOG(jit)
287 << "JIT added "
288 << PrettyMethod(method) << "@" << method
289 << " ccache_size=" << PrettySize(CodeCacheSize()) << ": "
290 << " dcache_size=" << PrettySize(DataCacheSize()) << ": "
291 << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
292 << reinterpret_cast<const void*>(method_header->GetEntryPoint() + method_header->code_size_);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100293
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100294 return reinterpret_cast<uint8_t*>(method_header);
295}
296
297size_t JitCodeCache::CodeCacheSize() {
298 MutexLock mu(Thread::Current(), lock_);
299 size_t bytes_allocated = 0;
300 mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
301 return bytes_allocated;
302}
303
304size_t JitCodeCache::DataCacheSize() {
305 MutexLock mu(Thread::Current(), lock_);
306 size_t bytes_allocated = 0;
307 mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
308 return bytes_allocated;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800309}
310
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100311size_t JitCodeCache::NumberOfCompiledCode() {
312 MutexLock mu(Thread::Current(), lock_);
313 return method_code_map_.size();
314}
315
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000316void JitCodeCache::ClearData(Thread* self, void* data) {
317 MutexLock mu(self, lock_);
318 mspace_free(data_mspace_, data);
319}
320
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100321uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100322 size = RoundUp(size, sizeof(void*));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100323 uint8_t* result = nullptr;
324
325 {
326 ScopedThreadSuspension sts(self, kSuspended);
327 MutexLock mu(self, lock_);
328 WaitForPotentialCollectionToComplete(self);
329 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
330 }
331
332 if (result == nullptr) {
333 // Retry.
334 GarbageCollectCache(self);
335 ScopedThreadSuspension sts(self, kSuspended);
336 MutexLock mu(self, lock_);
337 WaitForPotentialCollectionToComplete(self);
338 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
339 }
340
341 return result;
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100342}
343
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800344uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100345 uint8_t* result = ReserveData(self, end - begin);
346 if (result == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800347 return nullptr; // Out of space in the data cache.
348 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100349 std::copy(begin, end, result);
350 return result;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800351}
352
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100353class MarkCodeVisitor FINAL : public StackVisitor {
354 public:
355 MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
356 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
357 code_cache_(code_cache_in),
358 bitmap_(code_cache_->GetLiveBitmap()) {}
359
360 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
361 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
362 if (method_header == nullptr) {
363 return true;
364 }
365 const void* code = method_header->GetCode();
366 if (code_cache_->ContainsPc(code)) {
367 // Use the atomic set version, as multiple threads are executing this code.
368 bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
369 }
370 return true;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800371 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100372
373 private:
374 JitCodeCache* const code_cache_;
375 CodeCacheBitmap* const bitmap_;
376};
377
378class MarkCodeClosure FINAL : public Closure {
379 public:
380 MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
381 : code_cache_(code_cache), barrier_(barrier) {}
382
383 void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
384 DCHECK(thread == Thread::Current() || thread->IsSuspended());
385 MarkCodeVisitor visitor(thread, code_cache_);
386 visitor.WalkStack();
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +0000387 if (kIsDebugBuild) {
388 // The stack walking code queries the side instrumentation stack if it
389 // sees an instrumentation exit pc, so the JIT code of methods in that stack
390 // must have been seen. We sanity check this below.
391 for (const instrumentation::InstrumentationStackFrame& frame
392 : *thread->GetInstrumentationStack()) {
393 // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in
394 // its stack frame, it is not the method owning return_pc_. We just pass null to
395 // LookupMethodHeader: the method is only checked against in debug builds.
396 OatQuickMethodHeader* method_header =
397 code_cache_->LookupMethodHeader(frame.return_pc_, nullptr);
398 if (method_header != nullptr) {
399 const void* code = method_header->GetCode();
400 CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
401 }
402 }
403 }
Mathieu Chartier10d25082015-10-28 18:36:09 -0700404 barrier_->Pass(Thread::Current());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800405 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100406
407 private:
408 JitCodeCache* const code_cache_;
409 Barrier* const barrier_;
410};
411
412void JitCodeCache::GarbageCollectCache(Thread* self) {
413 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
414 LOG(INFO) << "Clearing code cache, code="
415 << PrettySize(CodeCacheSize())
416 << ", data=" << PrettySize(DataCacheSize());
417 }
418
419 size_t map_size = 0;
420 ScopedThreadSuspension sts(self, kSuspended);
421
422 // Walk over all compiled methods and set the entry points of these
423 // methods to interpreter.
424 {
425 MutexLock mu(self, lock_);
426 if (WaitForPotentialCollectionToComplete(self)) {
427 return;
428 }
429 collection_in_progress_ = true;
430 map_size = method_code_map_.size();
431 for (auto& it : method_code_map_) {
432 it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
433 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000434 for (ProfilingInfo* info : profiling_infos_) {
435 info->GetMethod()->SetProfilingInfo(nullptr);
436 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100437 }
438
439 // Run a checkpoint on all threads to mark the JIT compiled code they are running.
440 {
441 Barrier barrier(0);
Nicolas Geoffray62623402015-10-28 19:15:05 +0000442 size_t threads_running_checkpoint = 0;
443 {
444 // Walking the stack requires the mutator lock.
445 // We only take the lock when running the checkpoint and not waiting so that
446 // when we go back to suspended, we can execute checkpoints that were requested
447 // concurrently, and then move to waiting for our own checkpoint to finish.
448 ScopedObjectAccess soa(self);
449 MarkCodeClosure closure(this, &barrier);
450 threads_running_checkpoint =
451 Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
452 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100453 if (threads_running_checkpoint != 0) {
454 barrier.Increment(self, threads_running_checkpoint);
455 }
456 }
457
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100458 {
459 MutexLock mu(self, lock_);
460 DCHECK_EQ(map_size, method_code_map_.size());
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000461 // Free unused compiled code, and restore the entry point of used compiled code.
462 {
463 ScopedCodeCacheWrite scc(code_map_.get());
464 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
465 const void* code_ptr = it->first;
466 ArtMethod* method = it->second;
467 uintptr_t allocation = FromCodeToAllocation(code_ptr);
468 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
469 if (GetLiveBitmap()->Test(allocation)) {
470 method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
471 ++it;
472 } else {
473 method->ClearCounter();
474 DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
475 FreeCode(code_ptr, method);
476 it = method_code_map_.erase(it);
477 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100478 }
479 }
480 GetLiveBitmap()->Bitmap::Clear();
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000481
482 // Free all profiling info.
483 for (ProfilingInfo* info : profiling_infos_) {
484 DCHECK(info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr);
485 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
486 }
487 profiling_infos_.clear();
488
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100489 collection_in_progress_ = false;
490 lock_cond_.Broadcast(self);
491 }
492
493 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
494 LOG(INFO) << "After clearing code cache, code="
495 << PrettySize(CodeCacheSize())
496 << ", data=" << PrettySize(DataCacheSize());
497 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800498}
499
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100500
501OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
502 static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
503 if (kRuntimeISA == kArm) {
504 // On Thumb-2, the pc is offset by one.
505 --pc;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800506 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100507 if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
508 return nullptr;
509 }
510
511 MutexLock mu(Thread::Current(), lock_);
512 if (method_code_map_.empty()) {
513 return nullptr;
514 }
515 auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
516 --it;
517
518 const void* code_ptr = it->first;
519 OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
520 if (!method_header->Contains(pc)) {
521 return nullptr;
522 }
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +0000523 if (kIsDebugBuild && method != nullptr) {
524 DCHECK_EQ(it->second, method)
525 << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
526 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100527 return method_header;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800528}
529
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000530ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
531 ArtMethod* method,
532 const std::vector<uint32_t>& entries,
533 bool retry_allocation) {
534 ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
535
536 if (info == nullptr && retry_allocation) {
537 GarbageCollectCache(self);
538 info = AddProfilingInfoInternal(self, method, entries);
539 }
540 return info;
541}
542
543ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
544 ArtMethod* method,
545 const std::vector<uint32_t>& entries) {
546 size_t profile_info_size = RoundUp(
547 sizeof(ProfilingInfo) + sizeof(ProfilingInfo::InlineCache) * entries.size(),
548 sizeof(void*));
549 ScopedThreadSuspension sts(self, kSuspended);
550 MutexLock mu(self, lock_);
551 WaitForPotentialCollectionToComplete(self);
552
553 // Check whether some other thread has concurrently created it.
554 ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
555 if (info != nullptr) {
556 return info;
557 }
558
559 uint8_t* data = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, profile_info_size));
560 if (data == nullptr) {
561 return nullptr;
562 }
563 info = new (data) ProfilingInfo(method, entries);
564 method->SetProfilingInfo(info);
565 profiling_infos_.push_back(info);
566 return info;
567}
568
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800569} // namespace jit
570} // namespace art