blob: cfccec87cff84f4ba2a8d3d36ed0f2f8e0f849be [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit_code_cache.h"
18
19#include <sstream>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "art_method-inl.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010022#include "entrypoints/runtime_asm_entrypoints.h"
23#include "gc/accounting/bitmap-inl.h"
Nicolas Geoffray26705e22015-10-28 12:50:11 +000024#include "jit/profiling_info.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010025#include "linear_alloc.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080026#include "mem_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080027#include "oat_file-inl.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010028#include "thread_list.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080029
30namespace art {
31namespace jit {
32
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010033static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
34static constexpr int kProtData = PROT_READ | PROT_WRITE;
35static constexpr int kProtCode = PROT_READ | PROT_EXEC;
36
37#define CHECKED_MPROTECT(memory, size, prot) \
38 do { \
39 int rc = mprotect(memory, size, prot); \
40 if (UNLIKELY(rc != 0)) { \
41 errno = rc; \
42 PLOG(FATAL) << "Failed to mprotect jit code cache"; \
43 } \
44 } while (false) \
45
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080046JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
47 CHECK_GT(capacity, 0U);
48 CHECK_LT(capacity, kMaxCapacity);
49 std::string error_str;
50 // Map name specific for android_os_Debug.cpp accounting.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010051 MemMap* data_map = MemMap::MapAnonymous(
52 "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str);
53 if (data_map == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080054 std::ostringstream oss;
55 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
56 *error_msg = oss.str();
57 return nullptr;
58 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010059
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000060 // Data cache is 1 / 2 of the map.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010061 // TODO: Make this variable?
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000062 size_t data_size = RoundUp(data_map->Size() / 2, kPageSize);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010063 size_t code_size = data_map->Size() - data_size;
64 uint8_t* divider = data_map->Begin() + data_size;
65
66 // We need to have 32 bit offsets from method headers in code cache which point to things
67 // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
68 MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
69 if (code_map == nullptr) {
70 std::ostringstream oss;
71 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
72 *error_msg = oss.str();
73 return nullptr;
74 }
75 DCHECK_EQ(code_map->Size(), code_size);
76 DCHECK_EQ(code_map->Begin(), divider);
77 return new JitCodeCache(code_map, data_map);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080078}
79
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010080JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
81 : lock_("Jit code cache", kJitCodeCacheLock),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010082 lock_cond_("Jit code cache variable", lock_),
83 collection_in_progress_(false),
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010084 code_map_(code_map),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010085 data_map_(data_map) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010086
87 code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
88 data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
89
90 if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
91 PLOG(FATAL) << "create_mspace_with_base failed";
92 }
93
94 // Prevent morecore requests from the mspace.
95 mspace_set_footprint_limit(code_mspace_, code_map_->Size());
96 mspace_set_footprint_limit(data_mspace_, data_map_->Size());
97
98 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
99 CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100100
101 live_bitmap_.reset(CodeCacheBitmap::Create("code-cache-bitmap",
102 reinterpret_cast<uintptr_t>(code_map_->Begin()),
103 reinterpret_cast<uintptr_t>(code_map_->End())));
104
105 if (live_bitmap_.get() == nullptr) {
106 PLOG(FATAL) << "creating bitmaps for the JIT code cache failed";
107 }
108
109 VLOG(jit) << "Created jit code cache: data size="
110 << PrettySize(data_map_->Size())
111 << ", code size="
112 << PrettySize(code_map_->Size());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800113}
114
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100115bool JitCodeCache::ContainsPc(const void* ptr) const {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100116 return code_map_->Begin() <= ptr && ptr < code_map_->End();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800117}
118
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100119class ScopedCodeCacheWrite {
120 public:
121 explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
122 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800123 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100124 ~ScopedCodeCacheWrite() {
125 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
126 }
127 private:
128 MemMap* const code_map_;
129
130 DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
131};
132
133uint8_t* JitCodeCache::CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100134 ArtMethod* method,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100135 const uint8_t* mapping_table,
136 const uint8_t* vmap_table,
137 const uint8_t* gc_map,
138 size_t frame_size_in_bytes,
139 size_t core_spill_mask,
140 size_t fp_spill_mask,
141 const uint8_t* code,
142 size_t code_size) {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100143 uint8_t* result = CommitCodeInternal(self,
144 method,
145 mapping_table,
146 vmap_table,
147 gc_map,
148 frame_size_in_bytes,
149 core_spill_mask,
150 fp_spill_mask,
151 code,
152 code_size);
153 if (result == nullptr) {
154 // Retry.
155 GarbageCollectCache(self);
156 result = CommitCodeInternal(self,
157 method,
158 mapping_table,
159 vmap_table,
160 gc_map,
161 frame_size_in_bytes,
162 core_spill_mask,
163 fp_spill_mask,
164 code,
165 code_size);
166 }
167 return result;
168}
169
170bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
171 bool in_collection = false;
172 while (collection_in_progress_) {
173 in_collection = true;
174 lock_cond_.Wait(self);
175 }
176 return in_collection;
177}
178
179static uintptr_t FromCodeToAllocation(const void* code) {
180 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
181 return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
182}
183
184void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
185 uintptr_t allocation = FromCodeToAllocation(code_ptr);
186 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
187 const uint8_t* data = method_header->GetNativeGcMap();
188 if (data != nullptr) {
189 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
190 }
191 data = method_header->GetMappingTable();
192 if (data != nullptr) {
193 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
194 }
195 // Use the offset directly to prevent sanity check that the method is
196 // compiled with optimizing.
197 // TODO(ngeoffray): Clean up.
198 if (method_header->vmap_table_offset_ != 0) {
199 data = method_header->code_ - method_header->vmap_table_offset_;
200 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
201 }
202 mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation));
203}
204
205void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
206 MutexLock mu(self, lock_);
207 // We do not check if a code cache GC is in progress, as this method comes
208 // with the classlinker_classes_lock_ held, and suspending ourselves could
209 // lead to a deadlock.
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000210 {
211 ScopedCodeCacheWrite scc(code_map_.get());
212 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
213 if (alloc.ContainsUnsafe(it->second)) {
214 FreeCode(it->first, it->second);
215 it = method_code_map_.erase(it);
216 } else {
217 ++it;
218 }
219 }
220 }
221 for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
222 ProfilingInfo* info = *it;
223 if (alloc.ContainsUnsafe(info->GetMethod())) {
224 info->GetMethod()->SetProfilingInfo(nullptr);
225 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
226 it = profiling_infos_.erase(it);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100227 } else {
228 ++it;
229 }
230 }
231}
232
233uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
234 ArtMethod* method,
235 const uint8_t* mapping_table,
236 const uint8_t* vmap_table,
237 const uint8_t* gc_map,
238 size_t frame_size_in_bytes,
239 size_t core_spill_mask,
240 size_t fp_spill_mask,
241 const uint8_t* code,
242 size_t code_size) {
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100243 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
244 // Ensure the header ends up at expected instruction alignment.
245 size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
246 size_t total_size = header_size + code_size;
247
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100248 OatQuickMethodHeader* method_header = nullptr;
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100249 uint8_t* code_ptr = nullptr;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100250
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100251 ScopedThreadSuspension sts(self, kSuspended);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100252 MutexLock mu(self, lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100253 WaitForPotentialCollectionToComplete(self);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100254 {
255 ScopedCodeCacheWrite scc(code_map_.get());
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100256 uint8_t* result = reinterpret_cast<uint8_t*>(
257 mspace_memalign(code_mspace_, alignment, total_size));
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100258 if (result == nullptr) {
259 return nullptr;
260 }
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100261 code_ptr = result + header_size;
262 DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100263
264 std::copy(code, code + code_size, code_ptr);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100265 method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100266 new (method_header) OatQuickMethodHeader(
267 (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
268 (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
269 (gc_map == nullptr) ? 0 : code_ptr - gc_map,
270 frame_size_in_bytes,
271 core_spill_mask,
272 fp_spill_mask,
273 code_size);
274 }
275
276 __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
277 reinterpret_cast<char*>(code_ptr + code_size));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100278 method_code_map_.Put(code_ptr, method);
279 // We have checked there was no collection in progress earlier. If we
280 // were, setting the entry point of a method would be unsafe, as the collection
281 // could delete it.
282 DCHECK(!collection_in_progress_);
283 method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100284 return reinterpret_cast<uint8_t*>(method_header);
285}
286
287size_t JitCodeCache::CodeCacheSize() {
288 MutexLock mu(Thread::Current(), lock_);
289 size_t bytes_allocated = 0;
290 mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
291 return bytes_allocated;
292}
293
294size_t JitCodeCache::DataCacheSize() {
295 MutexLock mu(Thread::Current(), lock_);
296 size_t bytes_allocated = 0;
297 mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
298 return bytes_allocated;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800299}
300
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100301size_t JitCodeCache::NumberOfCompiledCode() {
302 MutexLock mu(Thread::Current(), lock_);
303 return method_code_map_.size();
304}
305
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100306uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100307 size = RoundUp(size, sizeof(void*));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100308 uint8_t* result = nullptr;
309
310 {
311 ScopedThreadSuspension sts(self, kSuspended);
312 MutexLock mu(self, lock_);
313 WaitForPotentialCollectionToComplete(self);
314 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
315 }
316
317 if (result == nullptr) {
318 // Retry.
319 GarbageCollectCache(self);
320 ScopedThreadSuspension sts(self, kSuspended);
321 MutexLock mu(self, lock_);
322 WaitForPotentialCollectionToComplete(self);
323 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
324 }
325
326 return result;
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100327}
328
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800329uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100330 uint8_t* result = ReserveData(self, end - begin);
331 if (result == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800332 return nullptr; // Out of space in the data cache.
333 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100334 std::copy(begin, end, result);
335 return result;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800336}
337
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100338class MarkCodeVisitor FINAL : public StackVisitor {
339 public:
340 MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
341 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
342 code_cache_(code_cache_in),
343 bitmap_(code_cache_->GetLiveBitmap()) {}
344
345 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
346 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
347 if (method_header == nullptr) {
348 return true;
349 }
350 const void* code = method_header->GetCode();
351 if (code_cache_->ContainsPc(code)) {
352 // Use the atomic set version, as multiple threads are executing this code.
353 bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
354 }
355 return true;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800356 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100357
358 private:
359 JitCodeCache* const code_cache_;
360 CodeCacheBitmap* const bitmap_;
361};
362
363class MarkCodeClosure FINAL : public Closure {
364 public:
365 MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
366 : code_cache_(code_cache), barrier_(barrier) {}
367
368 void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
369 DCHECK(thread == Thread::Current() || thread->IsSuspended());
370 MarkCodeVisitor visitor(thread, code_cache_);
371 visitor.WalkStack();
Mathieu Chartier10d25082015-10-28 18:36:09 -0700372 barrier_->Pass(Thread::Current());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800373 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100374
375 private:
376 JitCodeCache* const code_cache_;
377 Barrier* const barrier_;
378};
379
380void JitCodeCache::GarbageCollectCache(Thread* self) {
381 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
382 LOG(INFO) << "Clearing code cache, code="
383 << PrettySize(CodeCacheSize())
384 << ", data=" << PrettySize(DataCacheSize());
385 }
386
387 size_t map_size = 0;
388 ScopedThreadSuspension sts(self, kSuspended);
389
390 // Walk over all compiled methods and set the entry points of these
391 // methods to interpreter.
392 {
393 MutexLock mu(self, lock_);
394 if (WaitForPotentialCollectionToComplete(self)) {
395 return;
396 }
397 collection_in_progress_ = true;
398 map_size = method_code_map_.size();
399 for (auto& it : method_code_map_) {
400 it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
401 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000402 for (ProfilingInfo* info : profiling_infos_) {
403 info->GetMethod()->SetProfilingInfo(nullptr);
404 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100405 }
406
407 // Run a checkpoint on all threads to mark the JIT compiled code they are running.
408 {
409 Barrier barrier(0);
410 MarkCodeClosure closure(this, &barrier);
411 size_t threads_running_checkpoint =
412 Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
413 if (threads_running_checkpoint != 0) {
414 barrier.Increment(self, threads_running_checkpoint);
415 }
416 }
417
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100418 {
419 MutexLock mu(self, lock_);
420 DCHECK_EQ(map_size, method_code_map_.size());
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000421 // Free unused compiled code, and restore the entry point of used compiled code.
422 {
423 ScopedCodeCacheWrite scc(code_map_.get());
424 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
425 const void* code_ptr = it->first;
426 ArtMethod* method = it->second;
427 uintptr_t allocation = FromCodeToAllocation(code_ptr);
428 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
429 if (GetLiveBitmap()->Test(allocation)) {
430 method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
431 ++it;
432 } else {
433 method->ClearCounter();
434 DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
435 FreeCode(code_ptr, method);
436 it = method_code_map_.erase(it);
437 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100438 }
439 }
440 GetLiveBitmap()->Bitmap::Clear();
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000441
442 // Free all profiling info.
443 for (ProfilingInfo* info : profiling_infos_) {
444 DCHECK(info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr);
445 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
446 }
447 profiling_infos_.clear();
448
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100449 collection_in_progress_ = false;
450 lock_cond_.Broadcast(self);
451 }
452
453 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
454 LOG(INFO) << "After clearing code cache, code="
455 << PrettySize(CodeCacheSize())
456 << ", data=" << PrettySize(DataCacheSize());
457 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800458}
459
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100460
461OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
462 static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
463 if (kRuntimeISA == kArm) {
464 // On Thumb-2, the pc is offset by one.
465 --pc;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800466 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100467 if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
468 return nullptr;
469 }
470
471 MutexLock mu(Thread::Current(), lock_);
472 if (method_code_map_.empty()) {
473 return nullptr;
474 }
475 auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
476 --it;
477
478 const void* code_ptr = it->first;
479 OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
480 if (!method_header->Contains(pc)) {
481 return nullptr;
482 }
483 DCHECK_EQ(it->second, method)
484 << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
485 return method_header;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800486}
487
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000488ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
489 ArtMethod* method,
490 const std::vector<uint32_t>& entries,
491 bool retry_allocation) {
492 ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
493
494 if (info == nullptr && retry_allocation) {
495 GarbageCollectCache(self);
496 info = AddProfilingInfoInternal(self, method, entries);
497 }
498 return info;
499}
500
501ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
502 ArtMethod* method,
503 const std::vector<uint32_t>& entries) {
504 size_t profile_info_size = RoundUp(
505 sizeof(ProfilingInfo) + sizeof(ProfilingInfo::InlineCache) * entries.size(),
506 sizeof(void*));
507 ScopedThreadSuspension sts(self, kSuspended);
508 MutexLock mu(self, lock_);
509 WaitForPotentialCollectionToComplete(self);
510
511 // Check whether some other thread has concurrently created it.
512 ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
513 if (info != nullptr) {
514 return info;
515 }
516
517 uint8_t* data = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, profile_info_size));
518 if (data == nullptr) {
519 return nullptr;
520 }
521 info = new (data) ProfilingInfo(method, entries);
522 method->SetProfilingInfo(info);
523 profiling_infos_.push_back(info);
524 return info;
525}
526
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800527} // namespace jit
528} // namespace art