blob: 60568b2f77c51941f3319b04efabb9f57221110b [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit_code_cache.h"
18
19#include <sstream>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "art_method-inl.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010022#include "entrypoints/runtime_asm_entrypoints.h"
23#include "gc/accounting/bitmap-inl.h"
Nicolas Geoffray26705e22015-10-28 12:50:11 +000024#include "jit/profiling_info.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010025#include "linear_alloc.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080026#include "mem_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080027#include "oat_file-inl.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010028#include "thread_list.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080029
30namespace art {
31namespace jit {
32
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010033static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
34static constexpr int kProtData = PROT_READ | PROT_WRITE;
35static constexpr int kProtCode = PROT_READ | PROT_EXEC;
36
37#define CHECKED_MPROTECT(memory, size, prot) \
38 do { \
39 int rc = mprotect(memory, size, prot); \
40 if (UNLIKELY(rc != 0)) { \
41 errno = rc; \
42 PLOG(FATAL) << "Failed to mprotect jit code cache"; \
43 } \
44 } while (false) \
45
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080046JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
47 CHECK_GT(capacity, 0U);
48 CHECK_LT(capacity, kMaxCapacity);
49 std::string error_str;
50 // Map name specific for android_os_Debug.cpp accounting.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010051 MemMap* data_map = MemMap::MapAnonymous(
52 "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str);
53 if (data_map == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080054 std::ostringstream oss;
55 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
56 *error_msg = oss.str();
57 return nullptr;
58 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010059
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000060 // Data cache is 1 / 2 of the map.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010061 // TODO: Make this variable?
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000062 size_t data_size = RoundUp(data_map->Size() / 2, kPageSize);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010063 size_t code_size = data_map->Size() - data_size;
64 uint8_t* divider = data_map->Begin() + data_size;
65
66 // We need to have 32 bit offsets from method headers in code cache which point to things
67 // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
68 MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
69 if (code_map == nullptr) {
70 std::ostringstream oss;
71 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
72 *error_msg = oss.str();
73 return nullptr;
74 }
75 DCHECK_EQ(code_map->Size(), code_size);
76 DCHECK_EQ(code_map->Begin(), divider);
77 return new JitCodeCache(code_map, data_map);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080078}
79
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010080JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
81 : lock_("Jit code cache", kJitCodeCacheLock),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010082 lock_cond_("Jit code cache variable", lock_),
83 collection_in_progress_(false),
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010084 code_map_(code_map),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010085 data_map_(data_map) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010086
87 code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
88 data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
89
90 if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
91 PLOG(FATAL) << "create_mspace_with_base failed";
92 }
93
94 // Prevent morecore requests from the mspace.
95 mspace_set_footprint_limit(code_mspace_, code_map_->Size());
96 mspace_set_footprint_limit(data_mspace_, data_map_->Size());
97
98 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
99 CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100100
101 live_bitmap_.reset(CodeCacheBitmap::Create("code-cache-bitmap",
102 reinterpret_cast<uintptr_t>(code_map_->Begin()),
103 reinterpret_cast<uintptr_t>(code_map_->End())));
104
105 if (live_bitmap_.get() == nullptr) {
106 PLOG(FATAL) << "creating bitmaps for the JIT code cache failed";
107 }
108
109 VLOG(jit) << "Created jit code cache: data size="
110 << PrettySize(data_map_->Size())
111 << ", code size="
112 << PrettySize(code_map_->Size());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800113}
114
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100115bool JitCodeCache::ContainsPc(const void* ptr) const {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100116 return code_map_->Begin() <= ptr && ptr < code_map_->End();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800117}
118
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100119class ScopedCodeCacheWrite {
120 public:
121 explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
122 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800123 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100124 ~ScopedCodeCacheWrite() {
125 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
126 }
127 private:
128 MemMap* const code_map_;
129
130 DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
131};
132
133uint8_t* JitCodeCache::CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100134 ArtMethod* method,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100135 const uint8_t* mapping_table,
136 const uint8_t* vmap_table,
137 const uint8_t* gc_map,
138 size_t frame_size_in_bytes,
139 size_t core_spill_mask,
140 size_t fp_spill_mask,
141 const uint8_t* code,
142 size_t code_size) {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100143 uint8_t* result = CommitCodeInternal(self,
144 method,
145 mapping_table,
146 vmap_table,
147 gc_map,
148 frame_size_in_bytes,
149 core_spill_mask,
150 fp_spill_mask,
151 code,
152 code_size);
153 if (result == nullptr) {
154 // Retry.
155 GarbageCollectCache(self);
156 result = CommitCodeInternal(self,
157 method,
158 mapping_table,
159 vmap_table,
160 gc_map,
161 frame_size_in_bytes,
162 core_spill_mask,
163 fp_spill_mask,
164 code,
165 code_size);
166 }
167 return result;
168}
169
170bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
171 bool in_collection = false;
172 while (collection_in_progress_) {
173 in_collection = true;
174 lock_cond_.Wait(self);
175 }
176 return in_collection;
177}
178
179static uintptr_t FromCodeToAllocation(const void* code) {
180 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
181 return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
182}
183
184void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
185 uintptr_t allocation = FromCodeToAllocation(code_ptr);
186 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
187 const uint8_t* data = method_header->GetNativeGcMap();
188 if (data != nullptr) {
189 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
190 }
191 data = method_header->GetMappingTable();
192 if (data != nullptr) {
193 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
194 }
195 // Use the offset directly to prevent sanity check that the method is
196 // compiled with optimizing.
197 // TODO(ngeoffray): Clean up.
198 if (method_header->vmap_table_offset_ != 0) {
199 data = method_header->code_ - method_header->vmap_table_offset_;
200 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
201 }
202 mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation));
203}
204
205void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
206 MutexLock mu(self, lock_);
207 // We do not check if a code cache GC is in progress, as this method comes
208 // with the classlinker_classes_lock_ held, and suspending ourselves could
209 // lead to a deadlock.
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000210 {
211 ScopedCodeCacheWrite scc(code_map_.get());
212 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
213 if (alloc.ContainsUnsafe(it->second)) {
214 FreeCode(it->first, it->second);
215 it = method_code_map_.erase(it);
216 } else {
217 ++it;
218 }
219 }
220 }
221 for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
222 ProfilingInfo* info = *it;
223 if (alloc.ContainsUnsafe(info->GetMethod())) {
224 info->GetMethod()->SetProfilingInfo(nullptr);
225 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
226 it = profiling_infos_.erase(it);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100227 } else {
228 ++it;
229 }
230 }
231}
232
233uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
234 ArtMethod* method,
235 const uint8_t* mapping_table,
236 const uint8_t* vmap_table,
237 const uint8_t* gc_map,
238 size_t frame_size_in_bytes,
239 size_t core_spill_mask,
240 size_t fp_spill_mask,
241 const uint8_t* code,
242 size_t code_size) {
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100243 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
244 // Ensure the header ends up at expected instruction alignment.
245 size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
246 size_t total_size = header_size + code_size;
247
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100248 OatQuickMethodHeader* method_header = nullptr;
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100249 uint8_t* code_ptr = nullptr;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100250
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100251 ScopedThreadSuspension sts(self, kSuspended);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100252 MutexLock mu(self, lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100253 WaitForPotentialCollectionToComplete(self);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100254 {
255 ScopedCodeCacheWrite scc(code_map_.get());
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100256 uint8_t* result = reinterpret_cast<uint8_t*>(
257 mspace_memalign(code_mspace_, alignment, total_size));
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100258 if (result == nullptr) {
259 return nullptr;
260 }
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100261 code_ptr = result + header_size;
262 DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100263
264 std::copy(code, code + code_size, code_ptr);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100265 method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100266 new (method_header) OatQuickMethodHeader(
267 (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
268 (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
269 (gc_map == nullptr) ? 0 : code_ptr - gc_map,
270 frame_size_in_bytes,
271 core_spill_mask,
272 fp_spill_mask,
273 code_size);
274 }
275
276 __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
277 reinterpret_cast<char*>(code_ptr + code_size));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100278 method_code_map_.Put(code_ptr, method);
279 // We have checked there was no collection in progress earlier. If we
280 // were, setting the entry point of a method would be unsafe, as the collection
281 // could delete it.
282 DCHECK(!collection_in_progress_);
283 method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100284 return reinterpret_cast<uint8_t*>(method_header);
285}
286
287size_t JitCodeCache::CodeCacheSize() {
288 MutexLock mu(Thread::Current(), lock_);
289 size_t bytes_allocated = 0;
290 mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
291 return bytes_allocated;
292}
293
294size_t JitCodeCache::DataCacheSize() {
295 MutexLock mu(Thread::Current(), lock_);
296 size_t bytes_allocated = 0;
297 mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
298 return bytes_allocated;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800299}
300
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100301size_t JitCodeCache::NumberOfCompiledCode() {
302 MutexLock mu(Thread::Current(), lock_);
303 return method_code_map_.size();
304}
305
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100306uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100307 size = RoundUp(size, sizeof(void*));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100308 uint8_t* result = nullptr;
309
310 {
311 ScopedThreadSuspension sts(self, kSuspended);
312 MutexLock mu(self, lock_);
313 WaitForPotentialCollectionToComplete(self);
314 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
315 }
316
317 if (result == nullptr) {
318 // Retry.
319 GarbageCollectCache(self);
320 ScopedThreadSuspension sts(self, kSuspended);
321 MutexLock mu(self, lock_);
322 WaitForPotentialCollectionToComplete(self);
323 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
324 }
325
326 return result;
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100327}
328
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800329uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100330 uint8_t* result = ReserveData(self, end - begin);
331 if (result == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800332 return nullptr; // Out of space in the data cache.
333 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100334 std::copy(begin, end, result);
335 return result;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800336}
337
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100338class MarkCodeVisitor FINAL : public StackVisitor {
339 public:
340 MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
341 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
342 code_cache_(code_cache_in),
343 bitmap_(code_cache_->GetLiveBitmap()) {}
344
345 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
346 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
347 if (method_header == nullptr) {
348 return true;
349 }
350 const void* code = method_header->GetCode();
351 if (code_cache_->ContainsPc(code)) {
352 // Use the atomic set version, as multiple threads are executing this code.
353 bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
354 }
355 return true;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800356 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100357
358 private:
359 JitCodeCache* const code_cache_;
360 CodeCacheBitmap* const bitmap_;
361};
362
363class MarkCodeClosure FINAL : public Closure {
364 public:
365 MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
366 : code_cache_(code_cache), barrier_(barrier) {}
367
368 void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
369 DCHECK(thread == Thread::Current() || thread->IsSuspended());
370 MarkCodeVisitor visitor(thread, code_cache_);
371 visitor.WalkStack();
372 if (thread->GetState() == kRunnable) {
373 barrier_->Pass(Thread::Current());
374 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800375 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100376
377 private:
378 JitCodeCache* const code_cache_;
379 Barrier* const barrier_;
380};
381
382void JitCodeCache::GarbageCollectCache(Thread* self) {
383 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
384 LOG(INFO) << "Clearing code cache, code="
385 << PrettySize(CodeCacheSize())
386 << ", data=" << PrettySize(DataCacheSize());
387 }
388
389 size_t map_size = 0;
390 ScopedThreadSuspension sts(self, kSuspended);
391
392 // Walk over all compiled methods and set the entry points of these
393 // methods to interpreter.
394 {
395 MutexLock mu(self, lock_);
396 if (WaitForPotentialCollectionToComplete(self)) {
397 return;
398 }
399 collection_in_progress_ = true;
400 map_size = method_code_map_.size();
401 for (auto& it : method_code_map_) {
402 it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
403 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000404 for (ProfilingInfo* info : profiling_infos_) {
405 info->GetMethod()->SetProfilingInfo(nullptr);
406 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100407 }
408
409 // Run a checkpoint on all threads to mark the JIT compiled code they are running.
410 {
411 Barrier barrier(0);
412 MarkCodeClosure closure(this, &barrier);
413 size_t threads_running_checkpoint =
414 Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
415 if (threads_running_checkpoint != 0) {
416 barrier.Increment(self, threads_running_checkpoint);
417 }
418 }
419
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100420 {
421 MutexLock mu(self, lock_);
422 DCHECK_EQ(map_size, method_code_map_.size());
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000423 // Free unused compiled code, and restore the entry point of used compiled code.
424 {
425 ScopedCodeCacheWrite scc(code_map_.get());
426 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
427 const void* code_ptr = it->first;
428 ArtMethod* method = it->second;
429 uintptr_t allocation = FromCodeToAllocation(code_ptr);
430 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
431 if (GetLiveBitmap()->Test(allocation)) {
432 method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
433 ++it;
434 } else {
435 method->ClearCounter();
436 DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
437 FreeCode(code_ptr, method);
438 it = method_code_map_.erase(it);
439 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100440 }
441 }
442 GetLiveBitmap()->Bitmap::Clear();
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000443
444 // Free all profiling info.
445 for (ProfilingInfo* info : profiling_infos_) {
446 DCHECK(info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr);
447 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
448 }
449 profiling_infos_.clear();
450
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100451 collection_in_progress_ = false;
452 lock_cond_.Broadcast(self);
453 }
454
455 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
456 LOG(INFO) << "After clearing code cache, code="
457 << PrettySize(CodeCacheSize())
458 << ", data=" << PrettySize(DataCacheSize());
459 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800460}
461
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100462
463OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
464 static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
465 if (kRuntimeISA == kArm) {
466 // On Thumb-2, the pc is offset by one.
467 --pc;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800468 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100469 if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
470 return nullptr;
471 }
472
473 MutexLock mu(Thread::Current(), lock_);
474 if (method_code_map_.empty()) {
475 return nullptr;
476 }
477 auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
478 --it;
479
480 const void* code_ptr = it->first;
481 OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
482 if (!method_header->Contains(pc)) {
483 return nullptr;
484 }
485 DCHECK_EQ(it->second, method)
486 << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
487 return method_header;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800488}
489
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000490ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
491 ArtMethod* method,
492 const std::vector<uint32_t>& entries,
493 bool retry_allocation) {
494 ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
495
496 if (info == nullptr && retry_allocation) {
497 GarbageCollectCache(self);
498 info = AddProfilingInfoInternal(self, method, entries);
499 }
500 return info;
501}
502
503ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
504 ArtMethod* method,
505 const std::vector<uint32_t>& entries) {
506 size_t profile_info_size = RoundUp(
507 sizeof(ProfilingInfo) + sizeof(ProfilingInfo::InlineCache) * entries.size(),
508 sizeof(void*));
509 ScopedThreadSuspension sts(self, kSuspended);
510 MutexLock mu(self, lock_);
511 WaitForPotentialCollectionToComplete(self);
512
513 // Check whether some other thread has concurrently created it.
514 ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
515 if (info != nullptr) {
516 return info;
517 }
518
519 uint8_t* data = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, profile_info_size));
520 if (data == nullptr) {
521 return nullptr;
522 }
523 info = new (data) ProfilingInfo(method, entries);
524 method->SetProfilingInfo(info);
525 profiling_infos_.push_back(info);
526 return info;
527}
528
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800529} // namespace jit
530} // namespace art