Split profile recording from jit compilation
We still use ProfileInfo objects to record profile information. That
gives us the flexibility to add the inline caches in the future and the
convenience of the already implemented GC.
If UseJIT is false and SaveProfilingInfo true, we will only record the
ProfileInfo and never launch compilation tasks.
Bug: 27916886
Change-Id: I6e4768dc5d58f2f85f947b276b4244aa11ce3fca
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 1491a18..606302b 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -60,7 +60,7 @@
// TODO: Investigate why are we doing the work again for this method and try to avoid it.
LOG(WARNING) << "Method processed more than once: "
<< PrettyMethod(ref.dex_method_index, *ref.dex_file);
- if (!Runtime::Current()->UseJit()) {
+ if (!Runtime::Current()->UseJitCompilation()) {
DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size());
DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size());
}
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 5c0253c..bace014 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -54,7 +54,8 @@
}
// Only need dequicken info for JIT so far.
- if (Runtime::Current()->UseJit() && !verified_method->GenerateDequickenMap(method_verifier)) {
+ if (Runtime::Current()->UseJitCompilation() &&
+ !verified_method->GenerateDequickenMap(method_verifier)) {
return nullptr;
}
}
@@ -72,7 +73,7 @@
}
const DexFileReference* VerifiedMethod::GetDequickenIndex(uint32_t dex_pc) const {
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
auto it = dequicken_map_.find(dex_pc);
return (it != dequicken_map_.end()) ? &it->second : nullptr;
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index be82956..6cba8b7 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -473,7 +473,7 @@
const DexFile& dex_file, const DexFile::ClassDef& class_def)
SHARED_REQUIRES(Locks::mutator_lock_) {
auto* const runtime = Runtime::Current();
- if (runtime->UseJit() || driver.GetCompilerOptions().VerifyAtRuntime()) {
+ if (runtime->UseJitCompilation() || driver.GetCompilerOptions().VerifyAtRuntime()) {
// Verify at runtime shouldn't dex to dex since we didn't resolve of verify.
return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
}
@@ -1268,7 +1268,7 @@
bool CompilerDriver::CanAssumeClassIsLoaded(mirror::Class* klass) {
Runtime* runtime = Runtime::Current();
if (!runtime->IsAotCompiler()) {
- DCHECK(runtime->UseJit());
+ DCHECK(runtime->UseJitCompilation());
// Having the klass reference here implies that the klass is already loaded.
return true;
}
@@ -1289,7 +1289,7 @@
if ((IsBootImage() &&
IsImageClass(dex_cache->GetDexFile()->StringDataByIdx(
dex_cache->GetDexFile()->GetTypeId(type_idx).descriptor_idx_))) ||
- Runtime::Current()->UseJit()) {
+ Runtime::Current()->UseJitCompilation()) {
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
result = (resolved_class != nullptr);
}
@@ -1307,7 +1307,7 @@
// See also Compiler::ResolveDexFile
bool result = false;
- if (IsBootImage() || Runtime::Current()->UseJit()) {
+ if (IsBootImage() || Runtime::Current()->UseJitCompilation()) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
@@ -1319,7 +1319,7 @@
result = true;
} else {
// Just check whether the dex cache already has the string.
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
result = (dex_cache->GetResolvedString(string_idx) != nullptr);
}
}
@@ -1427,7 +1427,7 @@
} else {
return false;
}
- } else if (runtime->UseJit() && !heap->IsMovableObject(resolved_class)) {
+ } else if (runtime->UseJitCompilation() && !heap->IsMovableObject(resolved_class)) {
*is_type_initialized = resolved_class->IsInitialized();
// If the class may move around, then don't embed it as a direct pointer.
*use_direct_type_ptr = true;
@@ -1604,7 +1604,7 @@
}
}
}
- if (runtime->UseJit()) {
+ if (runtime->UseJitCompilation()) {
// If we are the JIT, then don't allow a direct call to the interpreter bridge since this will
// never be updated even after we compile the method.
if (cl->IsQuickToInterpreterBridge(
@@ -1636,7 +1636,7 @@
bool must_use_direct_pointers = false;
mirror::DexCache* dex_cache = declaring_class->GetDexCache();
if (target_method->dex_file == dex_cache->GetDexFile() &&
- !(runtime->UseJit() && dex_cache->GetResolvedMethod(
+ !(runtime->UseJitCompilation() && dex_cache->GetResolvedMethod(
method->GetDexMethodIndex(), pointer_size) == nullptr)) {
target_method->dex_method_index = method->GetDexMethodIndex();
} else {
@@ -1673,7 +1673,7 @@
break;
}
}
- if (method_in_image || compiling_boot || runtime->UseJit()) {
+ if (method_in_image || compiling_boot || runtime->UseJitCompilation()) {
// We know we must be able to get to the method in the image, so use that pointer.
// In the case where we are the JIT, we can always use direct pointers since we know where
// the method and its code are / will be. We don't sharpen to interpreter bridge since we
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 45d23fe..197e473 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5183,10 +5183,10 @@
case HLoadString::LoadKind::kBootImageAddress:
break;
case HLoadString::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
// We disable pc-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
// TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index e8e6b68..9680f2b 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4010,10 +4010,10 @@
case HLoadString::LoadKind::kBootImageAddress:
break;
case HLoadString::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1a4e62e..c8a510d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -5975,7 +5975,7 @@
DCHECK(GetCompilerOptions().GetCompilePic());
FALLTHROUGH_INTENDED;
case HLoadString::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJit()); // Note: boot image is also non-JIT.
+ DCHECK(!Runtime::Current()->UseJitCompilation()); // Note: boot image is also non-JIT.
// We disable pc-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
// TODO: Create as many X86ComputeBaseMethodAddress instructions as needed for methods
@@ -5987,7 +5987,7 @@
case HLoadString::LoadKind::kBootImageAddress:
break;
case HLoadString::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 59cc444..1540ea5 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5411,10 +5411,10 @@
case HLoadString::LoadKind::kBootImageAddress:
break;
case HLoadString::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index ff4b9a7..d98f828 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -308,7 +308,7 @@
// Check if we can use an inline cache.
ArtMethod* caller = graph_->GetArtMethod();
- if (Runtime::Current()->UseJit()) {
+ if (Runtime::Current()->UseJitCompilation()) {
// Under JIT, we should always know the caller.
DCHECK(caller != nullptr);
ScopedProfilingInfoInlineUse spiis(caller, soa.Self());
@@ -623,7 +623,7 @@
ArtMethod* resolved_method,
const InlineCache& ic) {
// This optimization only works under JIT for now.
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
if (graph_->GetInstructionSet() == kMips64) {
// TODO: Support HClassTableGet for mips64.
return false;
@@ -802,7 +802,7 @@
if (!method->GetDeclaringClass()->IsVerified()) {
uint16_t class_def_idx = method->GetDeclaringClass()->GetDexClassDefIndex();
- if (Runtime::Current()->UseJit() ||
+ if (Runtime::Current()->UseJitCompilation() ||
!compiler_driver_->IsMethodVerifiedWithoutFailures(
method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 7a1bb31..08bd35f 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -99,7 +99,7 @@
if (direct_method != 0u) { // Should we use a direct pointer to the method?
// Note: For JIT, kDirectAddressWithFixup doesn't make sense at all and while
// kDirectAddress would be fine for image methods, we don't support it at the moment.
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
if (direct_method != static_cast<uintptr_t>(-1)) { // Is the method pointer known now?
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
method_load_data = direct_method;
@@ -109,7 +109,7 @@
} else { // Use dex cache.
DCHECK_EQ(target_method.dex_file, &graph_->GetDexFile());
if (use_pc_relative_instructions) { // Can we use PC-relative access to the dex cache arrays?
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
DexCacheArraysLayout layout(GetInstructionSetPointerSize(codegen_->GetInstructionSet()),
&graph_->GetDexFile());
@@ -121,7 +121,7 @@
if (direct_code != 0u) { // Should we use a direct pointer to the code?
// Note: For JIT, kCallPCRelative and kCallDirectWithFixup don't make sense at all and
// while kCallDirect would be fine for image methods, we don't support it at the moment.
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
if (direct_code != static_cast<uintptr_t>(-1)) { // Is the code pointer known now?
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect;
direct_code_ptr = direct_code;
@@ -174,7 +174,7 @@
if (compiler_driver_->IsBootImage()) {
// Compiling boot image. Resolve the string and allocate it if needed.
- DCHECK(!runtime->UseJit());
+ DCHECK(!runtime->UseJitCompilation());
mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
CHECK(string != nullptr);
if (!compiler_driver_->GetSupportBootImageFixup()) {
@@ -187,7 +187,7 @@
? HLoadString::LoadKind::kBootImageLinkTimePcRelative
: HLoadString::LoadKind::kBootImageLinkTimeAddress;
}
- } else if (runtime->UseJit()) {
+ } else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
mirror::String* string = dex_cache->GetResolvedString(string_index);