blob: ce972ef9766818b740df3decb09d668527e80d22 [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit_code_cache.h"
18
19#include <sstream>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "art_method-inl.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010022#include "entrypoints/runtime_asm_entrypoints.h"
23#include "gc/accounting/bitmap-inl.h"
Nicolas Geoffray26705e22015-10-28 12:50:11 +000024#include "jit/profiling_info.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010025#include "linear_alloc.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080026#include "mem_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080027#include "oat_file-inl.h"
Nicolas Geoffray62623402015-10-28 19:15:05 +000028#include "scoped_thread_state_change.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010029#include "thread_list.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080030
31namespace art {
32namespace jit {
33
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010034static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
35static constexpr int kProtData = PROT_READ | PROT_WRITE;
36static constexpr int kProtCode = PROT_READ | PROT_EXEC;
37
38#define CHECKED_MPROTECT(memory, size, prot) \
39 do { \
40 int rc = mprotect(memory, size, prot); \
41 if (UNLIKELY(rc != 0)) { \
42 errno = rc; \
43 PLOG(FATAL) << "Failed to mprotect jit code cache"; \
44 } \
45 } while (false) \
46
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080047JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
48 CHECK_GT(capacity, 0U);
49 CHECK_LT(capacity, kMaxCapacity);
50 std::string error_str;
51 // Map name specific for android_os_Debug.cpp accounting.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010052 MemMap* data_map = MemMap::MapAnonymous(
53 "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str);
54 if (data_map == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080055 std::ostringstream oss;
56 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
57 *error_msg = oss.str();
58 return nullptr;
59 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010060
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000061 // Data cache is 1 / 2 of the map.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010062 // TODO: Make this variable?
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000063 size_t data_size = RoundUp(data_map->Size() / 2, kPageSize);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010064 size_t code_size = data_map->Size() - data_size;
65 uint8_t* divider = data_map->Begin() + data_size;
66
67 // We need to have 32 bit offsets from method headers in code cache which point to things
68 // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
69 MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
70 if (code_map == nullptr) {
71 std::ostringstream oss;
72 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
73 *error_msg = oss.str();
74 return nullptr;
75 }
76 DCHECK_EQ(code_map->Size(), code_size);
77 DCHECK_EQ(code_map->Begin(), divider);
78 return new JitCodeCache(code_map, data_map);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080079}
80
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010081JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
82 : lock_("Jit code cache", kJitCodeCacheLock),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010083 lock_cond_("Jit code cache variable", lock_),
84 collection_in_progress_(false),
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010085 code_map_(code_map),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010086 data_map_(data_map) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010087
88 code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
89 data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
90
91 if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
92 PLOG(FATAL) << "create_mspace_with_base failed";
93 }
94
95 // Prevent morecore requests from the mspace.
96 mspace_set_footprint_limit(code_mspace_, code_map_->Size());
97 mspace_set_footprint_limit(data_mspace_, data_map_->Size());
98
99 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
100 CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100101
102 live_bitmap_.reset(CodeCacheBitmap::Create("code-cache-bitmap",
103 reinterpret_cast<uintptr_t>(code_map_->Begin()),
104 reinterpret_cast<uintptr_t>(code_map_->End())));
105
106 if (live_bitmap_.get() == nullptr) {
107 PLOG(FATAL) << "creating bitmaps for the JIT code cache failed";
108 }
109
110 VLOG(jit) << "Created jit code cache: data size="
111 << PrettySize(data_map_->Size())
112 << ", code size="
113 << PrettySize(code_map_->Size());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800114}
115
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100116bool JitCodeCache::ContainsPc(const void* ptr) const {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100117 return code_map_->Begin() <= ptr && ptr < code_map_->End();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800118}
119
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100120class ScopedCodeCacheWrite {
121 public:
122 explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
123 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800124 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100125 ~ScopedCodeCacheWrite() {
126 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
127 }
128 private:
129 MemMap* const code_map_;
130
131 DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
132};
133
134uint8_t* JitCodeCache::CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100135 ArtMethod* method,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100136 const uint8_t* mapping_table,
137 const uint8_t* vmap_table,
138 const uint8_t* gc_map,
139 size_t frame_size_in_bytes,
140 size_t core_spill_mask,
141 size_t fp_spill_mask,
142 const uint8_t* code,
143 size_t code_size) {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100144 uint8_t* result = CommitCodeInternal(self,
145 method,
146 mapping_table,
147 vmap_table,
148 gc_map,
149 frame_size_in_bytes,
150 core_spill_mask,
151 fp_spill_mask,
152 code,
153 code_size);
154 if (result == nullptr) {
155 // Retry.
156 GarbageCollectCache(self);
157 result = CommitCodeInternal(self,
158 method,
159 mapping_table,
160 vmap_table,
161 gc_map,
162 frame_size_in_bytes,
163 core_spill_mask,
164 fp_spill_mask,
165 code,
166 code_size);
167 }
168 return result;
169}
170
171bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
172 bool in_collection = false;
173 while (collection_in_progress_) {
174 in_collection = true;
175 lock_cond_.Wait(self);
176 }
177 return in_collection;
178}
179
180static uintptr_t FromCodeToAllocation(const void* code) {
181 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
182 return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
183}
184
185void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
186 uintptr_t allocation = FromCodeToAllocation(code_ptr);
187 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
188 const uint8_t* data = method_header->GetNativeGcMap();
189 if (data != nullptr) {
190 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
191 }
192 data = method_header->GetMappingTable();
193 if (data != nullptr) {
194 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
195 }
196 // Use the offset directly to prevent sanity check that the method is
197 // compiled with optimizing.
198 // TODO(ngeoffray): Clean up.
199 if (method_header->vmap_table_offset_ != 0) {
200 data = method_header->code_ - method_header->vmap_table_offset_;
201 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
202 }
203 mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation));
204}
205
206void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
207 MutexLock mu(self, lock_);
208 // We do not check if a code cache GC is in progress, as this method comes
209 // with the classlinker_classes_lock_ held, and suspending ourselves could
210 // lead to a deadlock.
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000211 {
212 ScopedCodeCacheWrite scc(code_map_.get());
213 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
214 if (alloc.ContainsUnsafe(it->second)) {
215 FreeCode(it->first, it->second);
216 it = method_code_map_.erase(it);
217 } else {
218 ++it;
219 }
220 }
221 }
222 for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
223 ProfilingInfo* info = *it;
224 if (alloc.ContainsUnsafe(info->GetMethod())) {
225 info->GetMethod()->SetProfilingInfo(nullptr);
226 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
227 it = profiling_infos_.erase(it);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100228 } else {
229 ++it;
230 }
231 }
232}
233
234uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
235 ArtMethod* method,
236 const uint8_t* mapping_table,
237 const uint8_t* vmap_table,
238 const uint8_t* gc_map,
239 size_t frame_size_in_bytes,
240 size_t core_spill_mask,
241 size_t fp_spill_mask,
242 const uint8_t* code,
243 size_t code_size) {
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100244 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
245 // Ensure the header ends up at expected instruction alignment.
246 size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
247 size_t total_size = header_size + code_size;
248
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100249 OatQuickMethodHeader* method_header = nullptr;
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100250 uint8_t* code_ptr = nullptr;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100251
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100252 ScopedThreadSuspension sts(self, kSuspended);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100253 MutexLock mu(self, lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100254 WaitForPotentialCollectionToComplete(self);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100255 {
256 ScopedCodeCacheWrite scc(code_map_.get());
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100257 uint8_t* result = reinterpret_cast<uint8_t*>(
258 mspace_memalign(code_mspace_, alignment, total_size));
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100259 if (result == nullptr) {
260 return nullptr;
261 }
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100262 code_ptr = result + header_size;
263 DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100264
265 std::copy(code, code + code_size, code_ptr);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100266 method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100267 new (method_header) OatQuickMethodHeader(
268 (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
269 (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
270 (gc_map == nullptr) ? 0 : code_ptr - gc_map,
271 frame_size_in_bytes,
272 core_spill_mask,
273 fp_spill_mask,
274 code_size);
275 }
276
277 __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
278 reinterpret_cast<char*>(code_ptr + code_size));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100279 method_code_map_.Put(code_ptr, method);
280 // We have checked there was no collection in progress earlier. If we
281 // were, setting the entry point of a method would be unsafe, as the collection
282 // could delete it.
283 DCHECK(!collection_in_progress_);
284 method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100285 return reinterpret_cast<uint8_t*>(method_header);
286}
287
288size_t JitCodeCache::CodeCacheSize() {
289 MutexLock mu(Thread::Current(), lock_);
290 size_t bytes_allocated = 0;
291 mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
292 return bytes_allocated;
293}
294
295size_t JitCodeCache::DataCacheSize() {
296 MutexLock mu(Thread::Current(), lock_);
297 size_t bytes_allocated = 0;
298 mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
299 return bytes_allocated;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800300}
301
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100302size_t JitCodeCache::NumberOfCompiledCode() {
303 MutexLock mu(Thread::Current(), lock_);
304 return method_code_map_.size();
305}
306
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100307uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100308 size = RoundUp(size, sizeof(void*));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100309 uint8_t* result = nullptr;
310
311 {
312 ScopedThreadSuspension sts(self, kSuspended);
313 MutexLock mu(self, lock_);
314 WaitForPotentialCollectionToComplete(self);
315 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
316 }
317
318 if (result == nullptr) {
319 // Retry.
320 GarbageCollectCache(self);
321 ScopedThreadSuspension sts(self, kSuspended);
322 MutexLock mu(self, lock_);
323 WaitForPotentialCollectionToComplete(self);
324 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
325 }
326
327 return result;
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100328}
329
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800330uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100331 uint8_t* result = ReserveData(self, end - begin);
332 if (result == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800333 return nullptr; // Out of space in the data cache.
334 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100335 std::copy(begin, end, result);
336 return result;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800337}
338
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100339class MarkCodeVisitor FINAL : public StackVisitor {
340 public:
341 MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
342 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
343 code_cache_(code_cache_in),
344 bitmap_(code_cache_->GetLiveBitmap()) {}
345
346 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
347 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
348 if (method_header == nullptr) {
349 return true;
350 }
351 const void* code = method_header->GetCode();
352 if (code_cache_->ContainsPc(code)) {
353 // Use the atomic set version, as multiple threads are executing this code.
354 bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
355 }
356 return true;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800357 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100358
359 private:
360 JitCodeCache* const code_cache_;
361 CodeCacheBitmap* const bitmap_;
362};
363
364class MarkCodeClosure FINAL : public Closure {
365 public:
366 MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
367 : code_cache_(code_cache), barrier_(barrier) {}
368
369 void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
370 DCHECK(thread == Thread::Current() || thread->IsSuspended());
371 MarkCodeVisitor visitor(thread, code_cache_);
372 visitor.WalkStack();
Mathieu Chartier10d25082015-10-28 18:36:09 -0700373 barrier_->Pass(Thread::Current());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800374 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100375
376 private:
377 JitCodeCache* const code_cache_;
378 Barrier* const barrier_;
379};
380
381void JitCodeCache::GarbageCollectCache(Thread* self) {
382 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
383 LOG(INFO) << "Clearing code cache, code="
384 << PrettySize(CodeCacheSize())
385 << ", data=" << PrettySize(DataCacheSize());
386 }
387
388 size_t map_size = 0;
389 ScopedThreadSuspension sts(self, kSuspended);
390
391 // Walk over all compiled methods and set the entry points of these
392 // methods to interpreter.
393 {
394 MutexLock mu(self, lock_);
395 if (WaitForPotentialCollectionToComplete(self)) {
396 return;
397 }
398 collection_in_progress_ = true;
399 map_size = method_code_map_.size();
400 for (auto& it : method_code_map_) {
401 it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
402 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000403 for (ProfilingInfo* info : profiling_infos_) {
404 info->GetMethod()->SetProfilingInfo(nullptr);
405 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100406 }
407
408 // Run a checkpoint on all threads to mark the JIT compiled code they are running.
409 {
410 Barrier barrier(0);
Nicolas Geoffray62623402015-10-28 19:15:05 +0000411 size_t threads_running_checkpoint = 0;
412 {
413 // Walking the stack requires the mutator lock.
414 // We only take the lock when running the checkpoint and not waiting so that
415 // when we go back to suspended, we can execute checkpoints that were requested
416 // concurrently, and then move to waiting for our own checkpoint to finish.
417 ScopedObjectAccess soa(self);
418 MarkCodeClosure closure(this, &barrier);
419 threads_running_checkpoint =
420 Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
421 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100422 if (threads_running_checkpoint != 0) {
423 barrier.Increment(self, threads_running_checkpoint);
424 }
425 }
426
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100427 {
428 MutexLock mu(self, lock_);
429 DCHECK_EQ(map_size, method_code_map_.size());
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000430 // Free unused compiled code, and restore the entry point of used compiled code.
431 {
432 ScopedCodeCacheWrite scc(code_map_.get());
433 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
434 const void* code_ptr = it->first;
435 ArtMethod* method = it->second;
436 uintptr_t allocation = FromCodeToAllocation(code_ptr);
437 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
438 if (GetLiveBitmap()->Test(allocation)) {
439 method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
440 ++it;
441 } else {
442 method->ClearCounter();
443 DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
444 FreeCode(code_ptr, method);
445 it = method_code_map_.erase(it);
446 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100447 }
448 }
449 GetLiveBitmap()->Bitmap::Clear();
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000450
451 // Free all profiling info.
452 for (ProfilingInfo* info : profiling_infos_) {
453 DCHECK(info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr);
454 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
455 }
456 profiling_infos_.clear();
457
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100458 collection_in_progress_ = false;
459 lock_cond_.Broadcast(self);
460 }
461
462 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
463 LOG(INFO) << "After clearing code cache, code="
464 << PrettySize(CodeCacheSize())
465 << ", data=" << PrettySize(DataCacheSize());
466 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800467}
468
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100469
470OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
471 static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
472 if (kRuntimeISA == kArm) {
473 // On Thumb-2, the pc is offset by one.
474 --pc;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800475 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100476 if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
477 return nullptr;
478 }
479
480 MutexLock mu(Thread::Current(), lock_);
481 if (method_code_map_.empty()) {
482 return nullptr;
483 }
484 auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
485 --it;
486
487 const void* code_ptr = it->first;
488 OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
489 if (!method_header->Contains(pc)) {
490 return nullptr;
491 }
492 DCHECK_EQ(it->second, method)
493 << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
494 return method_header;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800495}
496
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000497ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
498 ArtMethod* method,
499 const std::vector<uint32_t>& entries,
500 bool retry_allocation) {
501 ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
502
503 if (info == nullptr && retry_allocation) {
504 GarbageCollectCache(self);
505 info = AddProfilingInfoInternal(self, method, entries);
506 }
507 return info;
508}
509
510ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
511 ArtMethod* method,
512 const std::vector<uint32_t>& entries) {
513 size_t profile_info_size = RoundUp(
514 sizeof(ProfilingInfo) + sizeof(ProfilingInfo::InlineCache) * entries.size(),
515 sizeof(void*));
516 ScopedThreadSuspension sts(self, kSuspended);
517 MutexLock mu(self, lock_);
518 WaitForPotentialCollectionToComplete(self);
519
520 // Check whether some other thread has concurrently created it.
521 ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
522 if (info != nullptr) {
523 return info;
524 }
525
526 uint8_t* data = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, profile_info_size));
527 if (data == nullptr) {
528 return nullptr;
529 }
530 info = new (data) ProfilingInfo(method, entries);
531 method->SetProfilingInfo(info);
532 profiling_infos_.push_back(info);
533 return info;
534}
535
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800536} // namespace jit
537} // namespace art