blob: 4c7cb1e36a92d14e27b0025c9f5655ac047a2ecd [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit_code_cache.h"
18
19#include <sstream>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "art_method-inl.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010022#include "entrypoints/runtime_asm_entrypoints.h"
23#include "gc/accounting/bitmap-inl.h"
Nicolas Geoffray26705e22015-10-28 12:50:11 +000024#include "jit/profiling_info.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010025#include "linear_alloc.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080026#include "mem_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080027#include "oat_file-inl.h"
Nicolas Geoffray62623402015-10-28 19:15:05 +000028#include "scoped_thread_state_change.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010029#include "thread_list.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080030
31namespace art {
32namespace jit {
33
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010034static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
35static constexpr int kProtData = PROT_READ | PROT_WRITE;
36static constexpr int kProtCode = PROT_READ | PROT_EXEC;
37
38#define CHECKED_MPROTECT(memory, size, prot) \
39 do { \
40 int rc = mprotect(memory, size, prot); \
41 if (UNLIKELY(rc != 0)) { \
42 errno = rc; \
43 PLOG(FATAL) << "Failed to mprotect jit code cache"; \
44 } \
45 } while (false) \
46
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080047JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
48 CHECK_GT(capacity, 0U);
49 CHECK_LT(capacity, kMaxCapacity);
50 std::string error_str;
51 // Map name specific for android_os_Debug.cpp accounting.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010052 MemMap* data_map = MemMap::MapAnonymous(
53 "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str);
54 if (data_map == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080055 std::ostringstream oss;
56 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
57 *error_msg = oss.str();
58 return nullptr;
59 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010060
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000061 // Data cache is 1 / 2 of the map.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010062 // TODO: Make this variable?
Nicolas Geoffray4e915fb2015-10-28 17:39:47 +000063 size_t data_size = RoundUp(data_map->Size() / 2, kPageSize);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010064 size_t code_size = data_map->Size() - data_size;
65 uint8_t* divider = data_map->Begin() + data_size;
66
67 // We need to have 32 bit offsets from method headers in code cache which point to things
68 // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
69 MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
70 if (code_map == nullptr) {
71 std::ostringstream oss;
72 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
73 *error_msg = oss.str();
74 return nullptr;
75 }
76 DCHECK_EQ(code_map->Size(), code_size);
77 DCHECK_EQ(code_map->Begin(), divider);
78 return new JitCodeCache(code_map, data_map);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080079}
80
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010081JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
82 : lock_("Jit code cache", kJitCodeCacheLock),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010083 lock_cond_("Jit code cache variable", lock_),
84 collection_in_progress_(false),
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010085 code_map_(code_map),
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010086 data_map_(data_map) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010087
88 code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
89 data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
90
91 if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
92 PLOG(FATAL) << "create_mspace_with_base failed";
93 }
94
95 // Prevent morecore requests from the mspace.
96 mspace_set_footprint_limit(code_mspace_, code_map_->Size());
97 mspace_set_footprint_limit(data_mspace_, data_map_->Size());
98
99 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
100 CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100101
102 live_bitmap_.reset(CodeCacheBitmap::Create("code-cache-bitmap",
103 reinterpret_cast<uintptr_t>(code_map_->Begin()),
104 reinterpret_cast<uintptr_t>(code_map_->End())));
105
106 if (live_bitmap_.get() == nullptr) {
107 PLOG(FATAL) << "creating bitmaps for the JIT code cache failed";
108 }
109
110 VLOG(jit) << "Created jit code cache: data size="
111 << PrettySize(data_map_->Size())
112 << ", code size="
113 << PrettySize(code_map_->Size());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800114}
115
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100116bool JitCodeCache::ContainsPc(const void* ptr) const {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100117 return code_map_->Begin() <= ptr && ptr < code_map_->End();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800118}
119
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100120class ScopedCodeCacheWrite {
121 public:
122 explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
123 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800124 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100125 ~ScopedCodeCacheWrite() {
126 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
127 }
128 private:
129 MemMap* const code_map_;
130
131 DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
132};
133
134uint8_t* JitCodeCache::CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100135 ArtMethod* method,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100136 const uint8_t* mapping_table,
137 const uint8_t* vmap_table,
138 const uint8_t* gc_map,
139 size_t frame_size_in_bytes,
140 size_t core_spill_mask,
141 size_t fp_spill_mask,
142 const uint8_t* code,
143 size_t code_size) {
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100144 uint8_t* result = CommitCodeInternal(self,
145 method,
146 mapping_table,
147 vmap_table,
148 gc_map,
149 frame_size_in_bytes,
150 core_spill_mask,
151 fp_spill_mask,
152 code,
153 code_size);
154 if (result == nullptr) {
155 // Retry.
156 GarbageCollectCache(self);
157 result = CommitCodeInternal(self,
158 method,
159 mapping_table,
160 vmap_table,
161 gc_map,
162 frame_size_in_bytes,
163 core_spill_mask,
164 fp_spill_mask,
165 code,
166 code_size);
167 }
168 return result;
169}
170
171bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
172 bool in_collection = false;
173 while (collection_in_progress_) {
174 in_collection = true;
175 lock_cond_.Wait(self);
176 }
177 return in_collection;
178}
179
180static uintptr_t FromCodeToAllocation(const void* code) {
181 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
182 return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
183}
184
185void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
186 uintptr_t allocation = FromCodeToAllocation(code_ptr);
187 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
188 const uint8_t* data = method_header->GetNativeGcMap();
189 if (data != nullptr) {
190 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
191 }
192 data = method_header->GetMappingTable();
193 if (data != nullptr) {
194 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
195 }
196 // Use the offset directly to prevent sanity check that the method is
197 // compiled with optimizing.
198 // TODO(ngeoffray): Clean up.
199 if (method_header->vmap_table_offset_ != 0) {
200 data = method_header->code_ - method_header->vmap_table_offset_;
201 mspace_free(data_mspace_, const_cast<uint8_t*>(data));
202 }
203 mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation));
204}
205
206void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
207 MutexLock mu(self, lock_);
208 // We do not check if a code cache GC is in progress, as this method comes
209 // with the classlinker_classes_lock_ held, and suspending ourselves could
210 // lead to a deadlock.
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000211 {
212 ScopedCodeCacheWrite scc(code_map_.get());
213 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
214 if (alloc.ContainsUnsafe(it->second)) {
215 FreeCode(it->first, it->second);
216 it = method_code_map_.erase(it);
217 } else {
218 ++it;
219 }
220 }
221 }
222 for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
223 ProfilingInfo* info = *it;
224 if (alloc.ContainsUnsafe(info->GetMethod())) {
225 info->GetMethod()->SetProfilingInfo(nullptr);
226 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
227 it = profiling_infos_.erase(it);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100228 } else {
229 ++it;
230 }
231 }
232}
233
234uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
235 ArtMethod* method,
236 const uint8_t* mapping_table,
237 const uint8_t* vmap_table,
238 const uint8_t* gc_map,
239 size_t frame_size_in_bytes,
240 size_t core_spill_mask,
241 size_t fp_spill_mask,
242 const uint8_t* code,
243 size_t code_size) {
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100244 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
245 // Ensure the header ends up at expected instruction alignment.
246 size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
247 size_t total_size = header_size + code_size;
248
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100249 OatQuickMethodHeader* method_header = nullptr;
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100250 uint8_t* code_ptr = nullptr;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100251
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100252 ScopedThreadSuspension sts(self, kSuspended);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100253 MutexLock mu(self, lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100254 WaitForPotentialCollectionToComplete(self);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100255 {
256 ScopedCodeCacheWrite scc(code_map_.get());
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100257 uint8_t* result = reinterpret_cast<uint8_t*>(
258 mspace_memalign(code_mspace_, alignment, total_size));
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100259 if (result == nullptr) {
260 return nullptr;
261 }
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100262 code_ptr = result + header_size;
263 DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100264
265 std::copy(code, code + code_size, code_ptr);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100266 method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100267 new (method_header) OatQuickMethodHeader(
268 (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
269 (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
270 (gc_map == nullptr) ? 0 : code_ptr - gc_map,
271 frame_size_in_bytes,
272 core_spill_mask,
273 fp_spill_mask,
274 code_size);
275 }
276
277 __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
278 reinterpret_cast<char*>(code_ptr + code_size));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100279 method_code_map_.Put(code_ptr, method);
280 // We have checked there was no collection in progress earlier. If we
281 // were, setting the entry point of a method would be unsafe, as the collection
282 // could delete it.
283 DCHECK(!collection_in_progress_);
284 method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100285 return reinterpret_cast<uint8_t*>(method_header);
286}
287
288size_t JitCodeCache::CodeCacheSize() {
289 MutexLock mu(Thread::Current(), lock_);
290 size_t bytes_allocated = 0;
291 mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
292 return bytes_allocated;
293}
294
295size_t JitCodeCache::DataCacheSize() {
296 MutexLock mu(Thread::Current(), lock_);
297 size_t bytes_allocated = 0;
298 mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
299 return bytes_allocated;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800300}
301
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100302size_t JitCodeCache::NumberOfCompiledCode() {
303 MutexLock mu(Thread::Current(), lock_);
304 return method_code_map_.size();
305}
306
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100307uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100308 size = RoundUp(size, sizeof(void*));
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100309 uint8_t* result = nullptr;
310
311 {
312 ScopedThreadSuspension sts(self, kSuspended);
313 MutexLock mu(self, lock_);
314 WaitForPotentialCollectionToComplete(self);
315 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
316 }
317
318 if (result == nullptr) {
319 // Retry.
320 GarbageCollectCache(self);
321 ScopedThreadSuspension sts(self, kSuspended);
322 MutexLock mu(self, lock_);
323 WaitForPotentialCollectionToComplete(self);
324 result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
325 }
326
327 return result;
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100328}
329
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800330uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100331 uint8_t* result = ReserveData(self, end - begin);
332 if (result == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800333 return nullptr; // Out of space in the data cache.
334 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100335 std::copy(begin, end, result);
336 return result;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800337}
338
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100339class MarkCodeVisitor FINAL : public StackVisitor {
340 public:
341 MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
342 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
343 code_cache_(code_cache_in),
344 bitmap_(code_cache_->GetLiveBitmap()) {}
345
346 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
347 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
348 if (method_header == nullptr) {
349 return true;
350 }
351 const void* code = method_header->GetCode();
352 if (code_cache_->ContainsPc(code)) {
353 // Use the atomic set version, as multiple threads are executing this code.
354 bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
355 }
356 return true;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800357 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100358
359 private:
360 JitCodeCache* const code_cache_;
361 CodeCacheBitmap* const bitmap_;
362};
363
364class MarkCodeClosure FINAL : public Closure {
365 public:
366 MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
367 : code_cache_(code_cache), barrier_(barrier) {}
368
369 void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
370 DCHECK(thread == Thread::Current() || thread->IsSuspended());
371 MarkCodeVisitor visitor(thread, code_cache_);
372 visitor.WalkStack();
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +0000373 if (kIsDebugBuild) {
374 // The stack walking code queries the side instrumentation stack if it
375 // sees an instrumentation exit pc, so the JIT code of methods in that stack
376 // must have been seen. We sanity check this below.
377 for (const instrumentation::InstrumentationStackFrame& frame
378 : *thread->GetInstrumentationStack()) {
379 // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in
380 // its stack frame, it is not the method owning return_pc_. We just pass null to
381 // LookupMethodHeader: the method is only checked against in debug builds.
382 OatQuickMethodHeader* method_header =
383 code_cache_->LookupMethodHeader(frame.return_pc_, nullptr);
384 if (method_header != nullptr) {
385 const void* code = method_header->GetCode();
386 CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
387 }
388 }
389 }
Mathieu Chartier10d25082015-10-28 18:36:09 -0700390 barrier_->Pass(Thread::Current());
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800391 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100392
393 private:
394 JitCodeCache* const code_cache_;
395 Barrier* const barrier_;
396};
397
398void JitCodeCache::GarbageCollectCache(Thread* self) {
399 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
400 LOG(INFO) << "Clearing code cache, code="
401 << PrettySize(CodeCacheSize())
402 << ", data=" << PrettySize(DataCacheSize());
403 }
404
405 size_t map_size = 0;
406 ScopedThreadSuspension sts(self, kSuspended);
407
408 // Walk over all compiled methods and set the entry points of these
409 // methods to interpreter.
410 {
411 MutexLock mu(self, lock_);
412 if (WaitForPotentialCollectionToComplete(self)) {
413 return;
414 }
415 collection_in_progress_ = true;
416 map_size = method_code_map_.size();
417 for (auto& it : method_code_map_) {
418 it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
419 }
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000420 for (ProfilingInfo* info : profiling_infos_) {
421 info->GetMethod()->SetProfilingInfo(nullptr);
422 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100423 }
424
425 // Run a checkpoint on all threads to mark the JIT compiled code they are running.
426 {
427 Barrier barrier(0);
Nicolas Geoffray62623402015-10-28 19:15:05 +0000428 size_t threads_running_checkpoint = 0;
429 {
430 // Walking the stack requires the mutator lock.
431 // We only take the lock when running the checkpoint and not waiting so that
432 // when we go back to suspended, we can execute checkpoints that were requested
433 // concurrently, and then move to waiting for our own checkpoint to finish.
434 ScopedObjectAccess soa(self);
435 MarkCodeClosure closure(this, &barrier);
436 threads_running_checkpoint =
437 Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
438 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100439 if (threads_running_checkpoint != 0) {
440 barrier.Increment(self, threads_running_checkpoint);
441 }
442 }
443
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100444 {
445 MutexLock mu(self, lock_);
446 DCHECK_EQ(map_size, method_code_map_.size());
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000447 // Free unused compiled code, and restore the entry point of used compiled code.
448 {
449 ScopedCodeCacheWrite scc(code_map_.get());
450 for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
451 const void* code_ptr = it->first;
452 ArtMethod* method = it->second;
453 uintptr_t allocation = FromCodeToAllocation(code_ptr);
454 const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
455 if (GetLiveBitmap()->Test(allocation)) {
456 method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
457 ++it;
458 } else {
459 method->ClearCounter();
460 DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
461 FreeCode(code_ptr, method);
462 it = method_code_map_.erase(it);
463 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100464 }
465 }
466 GetLiveBitmap()->Bitmap::Clear();
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000467
468 // Free all profiling info.
469 for (ProfilingInfo* info : profiling_infos_) {
470 DCHECK(info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr);
471 mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
472 }
473 profiling_infos_.clear();
474
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100475 collection_in_progress_ = false;
476 lock_cond_.Broadcast(self);
477 }
478
479 if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
480 LOG(INFO) << "After clearing code cache, code="
481 << PrettySize(CodeCacheSize())
482 << ", data=" << PrettySize(DataCacheSize());
483 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800484}
485
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100486
487OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
488 static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
489 if (kRuntimeISA == kArm) {
490 // On Thumb-2, the pc is offset by one.
491 --pc;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800492 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100493 if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
494 return nullptr;
495 }
496
497 MutexLock mu(Thread::Current(), lock_);
498 if (method_code_map_.empty()) {
499 return nullptr;
500 }
501 auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
502 --it;
503
504 const void* code_ptr = it->first;
505 OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
506 if (!method_header->Contains(pc)) {
507 return nullptr;
508 }
Nicolas Geoffray5a23d2e2015-11-03 18:58:57 +0000509 if (kIsDebugBuild && method != nullptr) {
510 DCHECK_EQ(it->second, method)
511 << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
512 }
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100513 return method_header;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800514}
515
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000516ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
517 ArtMethod* method,
518 const std::vector<uint32_t>& entries,
519 bool retry_allocation) {
520 ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
521
522 if (info == nullptr && retry_allocation) {
523 GarbageCollectCache(self);
524 info = AddProfilingInfoInternal(self, method, entries);
525 }
526 return info;
527}
528
529ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
530 ArtMethod* method,
531 const std::vector<uint32_t>& entries) {
532 size_t profile_info_size = RoundUp(
533 sizeof(ProfilingInfo) + sizeof(ProfilingInfo::InlineCache) * entries.size(),
534 sizeof(void*));
535 ScopedThreadSuspension sts(self, kSuspended);
536 MutexLock mu(self, lock_);
537 WaitForPotentialCollectionToComplete(self);
538
539 // Check whether some other thread has concurrently created it.
540 ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
541 if (info != nullptr) {
542 return info;
543 }
544
545 uint8_t* data = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, profile_info_size));
546 if (data == nullptr) {
547 return nullptr;
548 }
549 info = new (data) ProfilingInfo(method, entries);
550 method->SetProfilingInfo(info);
551 profiling_infos_.push_back(info);
552 return info;
553}
554
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800555} // namespace jit
556} // namespace art