Merge "Workaround invokesuper underspecified behavior."
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index dde3cdb..0235a30 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -363,11 +363,20 @@
ifndef LIBART_IMG_TARGET_BASE_ADDRESS
$(error LIBART_IMG_TARGET_BASE_ADDRESS unset)
endif
+
+ART_TARGET_CFLAGS += $(art_cflags) -DART_TARGET \
+ -DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS) \
+
+ifeq ($(ART_TARGET_LINUX),true)
+# Setting ART_TARGET_LINUX to true compiles art/ assuming that the target device
+# will be running linux rather than android.
+ART_TARGET_CFLAGS += -DART_TARGET_LINUX
+else
# The ART_TARGET_ANDROID macro is passed to target builds, which check
# against it instead of against __ANDROID__ (which is provided by target
# toolchains).
-ART_TARGET_CFLAGS += $(art_cflags) -DART_TARGET -DART_TARGET_ANDROID \
- -DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS) \
+ART_TARGET_CFLAGS += -DART_TARGET_ANDROID
+endif
ART_TARGET_CFLAGS += $(art_target_cflags)
ART_TARGET_ASFLAGS += $(art_asflags)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index cd463ec..3b459c3 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -650,11 +650,11 @@
ifeq ($(ART_BUILD_TARGET),true)
$(foreach file,$(RUNTIME_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),,libbacktrace)))
- $(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),art/compiler,libartd-compiler libbacktrace)))
+ $(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),art/compiler,libartd-compiler libbacktrace libnativeloader)))
endif
ifeq ($(ART_BUILD_HOST),true)
$(foreach file,$(RUNTIME_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),,libbacktrace)))
- $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),art/compiler,libartd-compiler libbacktrace)))
+ $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),art/compiler,libartd-compiler libbacktrace libnativeloader)))
endif
# Used outside the art project to get a list of the current tests
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 81b854e..7c53e01 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -461,8 +461,8 @@
* Test successes
*/
{
- EXPECT_SINGLE_PARSE_VALUE(true, "-Xusejit:true", M::UseJIT);
- EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJIT);
+ EXPECT_SINGLE_PARSE_VALUE(true, "-Xusejit:true", M::UseJitCompilation);
+ EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJitCompilation);
}
{
EXPECT_SINGLE_PARSE_VALUE(
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index c0a00cc..4797540 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -620,6 +620,8 @@
log_verbosity.verifier = true;
} else if (verbose_options[j] == "image") {
log_verbosity.image = true;
+ } else if (verbose_options[j] == "systrace-locks") {
+ log_verbosity.systrace_lock_logging = true;
} else {
return Result::Usage(std::string("Unknown -verbose option ") + verbose_options[j]);
}
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index f75a252..bf29e1c 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -180,6 +180,7 @@
isa,
instruction_set_features_.get(),
/* boot_image */ true,
+ /* app_image */ false,
GetImageClasses(),
GetCompiledClasses(),
GetCompiledMethods(),
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 1491a18..606302b 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -60,7 +60,7 @@
// TODO: Investigate why are we doing the work again for this method and try to avoid it.
LOG(WARNING) << "Method processed more than once: "
<< PrettyMethod(ref.dex_method_index, *ref.dex_file);
- if (!Runtime::Current()->UseJit()) {
+ if (!Runtime::Current()->UseJitCompilation()) {
DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size());
DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size());
}
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 5c0253c..bace014 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -54,7 +54,8 @@
}
// Only need dequicken info for JIT so far.
- if (Runtime::Current()->UseJit() && !verified_method->GenerateDequickenMap(method_verifier)) {
+ if (Runtime::Current()->UseJitCompilation() &&
+ !verified_method->GenerateDequickenMap(method_verifier)) {
return nullptr;
}
}
@@ -72,7 +73,7 @@
}
const DexFileReference* VerifiedMethod::GetDequickenIndex(uint32_t dex_pc) const {
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
auto it = dequicken_map_.find(dex_pc);
return (it != dequicken_map_.end()) ? &it->second : nullptr;
}
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 9e0c22c..6863f42 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -36,6 +36,7 @@
/* instruction_set_ */ kNone,
/* instruction_set_features */ nullptr,
/* boot_image */ false,
+ /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index be82956..1ab1d31 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -341,6 +341,7 @@
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
bool boot_image,
+ bool app_image,
std::unordered_set<std::string>* image_classes,
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
@@ -363,6 +364,7 @@
compiled_methods_(MethodTable::key_compare()),
non_relative_linker_patch_count_(0u),
boot_image_(boot_image),
+ app_image_(app_image),
image_classes_(image_classes),
classes_to_compile_(compiled_classes),
methods_to_compile_(compiled_methods),
@@ -473,7 +475,7 @@
const DexFile& dex_file, const DexFile::ClassDef& class_def)
SHARED_REQUIRES(Locks::mutator_lock_) {
auto* const runtime = Runtime::Current();
- if (runtime->UseJit() || driver.GetCompilerOptions().VerifyAtRuntime()) {
+ if (runtime->UseJitCompilation() || driver.GetCompilerOptions().VerifyAtRuntime()) {
// Verify at runtime shouldn't dex to dex since we didn't resolve of verify.
return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
}
@@ -945,7 +947,7 @@
class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
public:
- ResolveCatchBlockExceptionsClassVisitor(
+ explicit ResolveCatchBlockExceptionsClassVisitor(
std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
: exceptions_to_resolve_(exceptions_to_resolve) {}
@@ -1268,7 +1270,7 @@
bool CompilerDriver::CanAssumeClassIsLoaded(mirror::Class* klass) {
Runtime* runtime = Runtime::Current();
if (!runtime->IsAotCompiler()) {
- DCHECK(runtime->UseJit());
+ DCHECK(runtime->UseJitCompilation());
// Having the klass reference here implies that the klass is already loaded.
return true;
}
@@ -1289,7 +1291,7 @@
if ((IsBootImage() &&
IsImageClass(dex_cache->GetDexFile()->StringDataByIdx(
dex_cache->GetDexFile()->GetTypeId(type_idx).descriptor_idx_))) ||
- Runtime::Current()->UseJit()) {
+ Runtime::Current()->UseJitCompilation()) {
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
result = (resolved_class != nullptr);
}
@@ -1307,7 +1309,7 @@
// See also Compiler::ResolveDexFile
bool result = false;
- if (IsBootImage() || Runtime::Current()->UseJit()) {
+ if (IsBootImage() || Runtime::Current()->UseJitCompilation()) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
@@ -1319,7 +1321,7 @@
result = true;
} else {
// Just check whether the dex cache already has the string.
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
result = (dex_cache->GetResolvedString(string_idx) != nullptr);
}
}
@@ -1427,7 +1429,7 @@
} else {
return false;
}
- } else if (runtime->UseJit() && !heap->IsMovableObject(resolved_class)) {
+ } else if (runtime->UseJitCompilation() && !heap->IsMovableObject(resolved_class)) {
*is_type_initialized = resolved_class->IsInitialized();
// If the class may move around, then don't embed it as a direct pointer.
*use_direct_type_ptr = true;
@@ -1604,7 +1606,7 @@
}
}
}
- if (runtime->UseJit()) {
+ if (runtime->UseJitCompilation()) {
// If we are the JIT, then don't allow a direct call to the interpreter bridge since this will
// never be updated even after we compile the method.
if (cl->IsQuickToInterpreterBridge(
@@ -1636,7 +1638,7 @@
bool must_use_direct_pointers = false;
mirror::DexCache* dex_cache = declaring_class->GetDexCache();
if (target_method->dex_file == dex_cache->GetDexFile() &&
- !(runtime->UseJit() && dex_cache->GetResolvedMethod(
+ !(runtime->UseJitCompilation() && dex_cache->GetResolvedMethod(
method->GetDexMethodIndex(), pointer_size) == nullptr)) {
target_method->dex_method_index = method->GetDexMethodIndex();
} else {
@@ -1673,7 +1675,7 @@
break;
}
}
- if (method_in_image || compiling_boot || runtime->UseJit()) {
+ if (method_in_image || compiling_boot || runtime->UseJitCompilation()) {
// We know we must be able to get to the method in the image, so use that pointer.
// In the case where we are the JIT, we can always use direct pointers since we know where
// the method and its code are / will be. We don't sharpen to interpreter bridge since we
@@ -2440,9 +2442,12 @@
context.ForAll(0, dex_file.NumClassDefs(), &visitor, init_thread_count);
}
-class InitializeArrayClassVisitor : public ClassVisitor {
+class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor {
public:
virtual bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
+ return true;
+ }
if (klass->IsArrayClass()) {
StackHandleScope<1> hs(Thread::Current());
Runtime::Current()->GetClassLinker()->EnsureInitialized(hs.Self(),
@@ -2450,6 +2455,10 @@
true,
true);
}
+ // Create the conflict tables.
+ if (!klass->IsTemp() && klass->ShouldHaveEmbeddedImtAndVTable()) {
+ Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass);
+ }
return true;
}
};
@@ -2462,13 +2471,15 @@
CHECK(dex_file != nullptr);
InitializeClasses(class_loader, *dex_file, dex_files, timings);
}
- {
+ if (boot_image_ || app_image_) {
// Make sure that we call EnsureIntiailized on all the array classes to call
// SetVerificationAttempted so that the access flags are set. If we do not do this they get
// changed at runtime resulting in more dirty image pages.
+ // Also create conflict tables.
+ // Only useful if we are compiling an image (image_classes_ is not null).
ScopedObjectAccess soa(Thread::Current());
- InitializeArrayClassVisitor visitor;
- Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
+ InitializeArrayClassesAndCreateConflictTablesVisitor visitor;
+ Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&visitor);
}
if (IsBootImage()) {
// Prune garbage objects created during aborted transactions.
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index d63dffa..19a1ecc 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -92,6 +92,7 @@
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
bool boot_image,
+ bool app_image,
std::unordered_set<std::string>* image_classes,
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
@@ -652,6 +653,7 @@
size_t non_relative_linker_patch_count_ GUARDED_BY(compiled_methods_lock_);
const bool boot_image_;
+ const bool app_image_;
// If image_ is true, specifies the classes that will be included in the image.
// Note if image_classes_ is null, all classes are included in the image.
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 1bd4c3a..f20dba3 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -21,7 +21,7 @@
namespace art {
CompilerOptions::CompilerOptions()
- : compiler_filter_(kDefaultCompilerFilter),
+ : compiler_filter_(CompilerFilter::kDefaultCompilerFilter),
huge_method_threshold_(kDefaultHugeMethodThreshold),
large_method_threshold_(kDefaultLargeMethodThreshold),
small_method_threshold_(kDefaultSmallMethodThreshold),
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index c67ab6e..6bbd3c5 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -31,7 +31,6 @@
class CompilerOptions FINAL {
public:
// Guide heuristics to determine whether to compile method if profile data not available.
- static const CompilerFilter::Filter kDefaultCompilerFilter = CompilerFilter::kSpeed;
static const size_t kDefaultHugeMethodThreshold = 10000;
static const size_t kDefaultLargeMethodThreshold = 600;
static const size_t kDefaultSmallMethodThreshold = 60;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 8bb462c..00ff522 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -653,8 +653,7 @@
for (ImageInfo& image_info : image_infos_) {
ImageSection unused_sections[ImageHeader::kSectionCount];
const size_t length = RoundUp(
- image_info.CreateImageSections(target_ptr_size_, unused_sections),
- kPageSize);
+ image_info.CreateImageSections(unused_sections), kPageSize);
std::string error_msg;
image_info.image_.reset(MemMap::MapAnonymous("image writer image",
@@ -1214,6 +1213,20 @@
AssignMethodOffset(&m, type, oat_index);
}
(any_dirty ? dirty_methods_ : clean_methods_) += num_methods;
+
+ // Assign offsets for all runtime methods in the IMT since these may hold conflict tables
+ // live.
+ if (as_klass->ShouldHaveEmbeddedImtAndVTable()) {
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ ArtMethod* imt_method = as_klass->GetEmbeddedImTableEntry(i, target_ptr_size_);
+ DCHECK(imt_method != nullptr);
+ if (imt_method->IsRuntimeMethod() &&
+ !IsInBootImage(imt_method) &&
+ !NativeRelocationAssigned(imt_method)) {
+ AssignMethodOffset(imt_method, kNativeObjectRelocationTypeRuntimeMethod, oat_index);
+ }
+ }
+ }
}
} else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
@@ -1237,13 +1250,37 @@
}
}
+bool ImageWriter::NativeRelocationAssigned(void* ptr) const {
+ return native_object_relocations_.find(ptr) != native_object_relocations_.end();
+}
+
+void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) {
+ // No offset, or already assigned.
+ if (table == nullptr || NativeRelocationAssigned(table)) {
+ return;
+ }
+ CHECK(!IsInBootImage(table));
+ // If the method is a conflict method we also want to assign the conflict table offset.
+ ImageInfo& image_info = GetImageInfo(oat_index);
+ const size_t size = table->ComputeSize(target_ptr_size_);
+ native_object_relocations_.emplace(
+ table,
+ NativeObjectRelocation {
+ oat_index,
+ image_info.bin_slot_sizes_[kBinIMTConflictTable],
+ kNativeObjectRelocationTypeIMTConflictTable});
+ image_info.bin_slot_sizes_[kBinIMTConflictTable] += size;
+}
+
void ImageWriter::AssignMethodOffset(ArtMethod* method,
NativeObjectRelocationType type,
size_t oat_index) {
DCHECK(!IsInBootImage(method));
- auto it = native_object_relocations_.find(method);
- CHECK(it == native_object_relocations_.end()) << "Method " << method << " already assigned "
+ CHECK(!NativeRelocationAssigned(method)) << "Method " << method << " already assigned "
<< PrettyMethod(method);
+ if (method->IsRuntimeMethod()) {
+ TryAssignConflictTableOffset(method->GetImtConflictTable(target_ptr_size_), oat_index);
+ }
ImageInfo& image_info = GetImageInfo(oat_index);
size_t& offset = image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(type)];
native_object_relocations_.emplace(method, NativeObjectRelocation { oat_index, offset, type });
@@ -1292,8 +1329,7 @@
// know where image_roots is going to end up
image_objects_offset_begin_ = RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment
- // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
- heap->VisitObjects(WalkFieldsCallback, this);
+ const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
// Write the image runtime methods.
image_methods_[ImageHeader::kResolutionMethod] = runtime->GetResolutionMethod();
image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod();
@@ -1303,31 +1339,19 @@
runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
image_methods_[ImageHeader::kRefsAndArgsSaveMethod] =
runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
-
- // Add room for fake length prefixed array for holding the image methods.
- const auto image_method_type = kNativeObjectRelocationTypeArtMethodArrayClean;
- auto it = native_object_relocations_.find(&image_method_array_);
- CHECK(it == native_object_relocations_.end());
- ImageInfo& default_image_info = GetImageInfo(GetDefaultOatIndex());
- size_t& offset =
- default_image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)];
- if (!compile_app_image_) {
- native_object_relocations_.emplace(&image_method_array_,
- NativeObjectRelocation { GetDefaultOatIndex(), offset, image_method_type });
- }
- size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
- const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize(
- 0, ArtMethod::Size(target_ptr_size_), method_alignment);
- CHECK_ALIGNED_PARAM(array_size, method_alignment);
- offset += array_size;
+ // Visit image methods first to have the main runtime methods in the first image.
for (auto* m : image_methods_) {
CHECK(m != nullptr);
CHECK(m->IsRuntimeMethod());
DCHECK_EQ(compile_app_image_, IsInBootImage(m)) << "Trampolines should be in boot image";
if (!IsInBootImage(m)) {
- AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean, GetDefaultOatIndex());
+ AssignMethodOffset(m, kNativeObjectRelocationTypeRuntimeMethod, GetDefaultOatIndex());
}
}
+
+ // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
+ heap->VisitObjects(WalkFieldsCallback, this);
+
// Calculate size of the dex cache arrays slot and prepare offsets.
PrepareDexCacheArraySlots();
@@ -1346,15 +1370,22 @@
for (ImageInfo& image_info : image_infos_) {
size_t bin_offset = image_objects_offset_begin_;
for (size_t i = 0; i != kBinSize; ++i) {
+ switch (i) {
+ case kBinArtMethodClean:
+ case kBinArtMethodDirty: {
+ bin_offset = RoundUp(bin_offset, method_alignment);
+ break;
+ }
+ case kBinIMTConflictTable: {
+ bin_offset = RoundUp(bin_offset, target_ptr_size_);
+ break;
+ }
+ default: {
+ // Normal alignment.
+ }
+ }
image_info.bin_slot_offsets_[i] = bin_offset;
bin_offset += image_info.bin_slot_sizes_[i];
- if (i == kBinArtField) {
- static_assert(kBinArtField + 1 == kBinArtMethodClean, "Methods follow fields.");
- static_assert(alignof(ArtField) == 4u, "ArtField alignment is 4.");
- DCHECK_ALIGNED(bin_offset, 4u);
- DCHECK(method_alignment == 4u || method_alignment == 8u);
- bin_offset = RoundUp(bin_offset, method_alignment);
- }
}
// NOTE: There may be additional padding between the bin slots and the intern table.
DCHECK_EQ(image_info.image_end_,
@@ -1367,9 +1398,7 @@
image_info.image_begin_ = global_image_begin_ + image_offset;
image_info.image_offset_ = image_offset;
ImageSection unused_sections[ImageHeader::kSectionCount];
- image_info.image_size_ = RoundUp(
- image_info.CreateImageSections(target_ptr_size_, unused_sections),
- kPageSize);
+ image_info.image_size_ = RoundUp(image_info.CreateImageSections(unused_sections), kPageSize);
// There should be no gaps until the next image.
image_offset += image_info.image_size_;
}
@@ -1396,42 +1425,52 @@
// Note that image_info.image_end_ is left at end of used mirror object section.
}
-size_t ImageWriter::ImageInfo::CreateImageSections(size_t target_ptr_size,
- ImageSection* out_sections) const {
+size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const {
DCHECK(out_sections != nullptr);
+
+ // Do not round up any sections here that are represented by the bins since it will break
+ // offsets.
+
// Objects section
- auto* objects_section = &out_sections[ImageHeader::kSectionObjects];
+ ImageSection* objects_section = &out_sections[ImageHeader::kSectionObjects];
*objects_section = ImageSection(0u, image_end_);
- size_t cur_pos = objects_section->End();
+
// Add field section.
- auto* field_section = &out_sections[ImageHeader::kSectionArtFields];
- *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]);
+ ImageSection* field_section = &out_sections[ImageHeader::kSectionArtFields];
+ *field_section = ImageSection(bin_slot_offsets_[kBinArtField], bin_slot_sizes_[kBinArtField]);
CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset());
- cur_pos = field_section->End();
- // Round up to the alignment the required by the method section.
- cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size));
+
// Add method section.
- auto* methods_section = &out_sections[ImageHeader::kSectionArtMethods];
- *methods_section = ImageSection(cur_pos,
- bin_slot_sizes_[kBinArtMethodClean] +
- bin_slot_sizes_[kBinArtMethodDirty]);
- CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
- cur_pos = methods_section->End();
+ ImageSection* methods_section = &out_sections[ImageHeader::kSectionArtMethods];
+ *methods_section = ImageSection(
+ bin_slot_offsets_[kBinArtMethodClean],
+ bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]);
+
+ // Conflict tables section.
+ ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables];
+ *imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable],
+ bin_slot_sizes_[kBinIMTConflictTable]);
+
+ // Runtime methods section.
+ ImageSection* runtime_methods_section = &out_sections[ImageHeader::kSectionRuntimeMethods];
+ *runtime_methods_section = ImageSection(bin_slot_offsets_[kBinRuntimeMethod],
+ bin_slot_sizes_[kBinRuntimeMethod]);
+
// Add dex cache arrays section.
- auto* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
- *dex_cache_arrays_section = ImageSection(cur_pos, bin_slot_sizes_[kBinDexCacheArray]);
- CHECK_EQ(bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset());
- cur_pos = dex_cache_arrays_section->End();
+ ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
+ *dex_cache_arrays_section = ImageSection(bin_slot_offsets_[kBinDexCacheArray],
+ bin_slot_sizes_[kBinDexCacheArray]);
+
// Round up to the alignment the string table expects. See HashSet::WriteToMemory.
- cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
+ size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t));
// Calculate the size of the interned strings.
- auto* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
+ ImageSection* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
*interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
cur_pos = interned_strings_section->End();
// Round up to the alignment the class table expects. See HashSet::WriteToMemory.
cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
// Calculate the size of the class table section.
- auto* class_table_section = &out_sections[ImageHeader::kSectionClassTable];
+ ImageSection* class_table_section = &out_sections[ImageHeader::kSectionClassTable];
*class_table_section = ImageSection(cur_pos, class_table_bytes_);
cur_pos = class_table_section->End();
// Image end goes right before the start of the image bitmap.
@@ -1446,7 +1485,7 @@
// Create the image sections.
ImageSection sections[ImageHeader::kSectionCount];
- const size_t image_end = image_info.CreateImageSections(target_ptr_size_, sections);
+ const size_t image_end = image_info.CreateImageSections(sections);
// Finally bitmap section.
const size_t bitmap_bytes = image_info.image_bitmap_->Size();
@@ -1531,8 +1570,20 @@
ImageWriter* const image_writer_;
};
+void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) {
+ const size_t count = orig->NumEntries(target_ptr_size_);
+ for (size_t i = 0; i < count; ++i) {
+ ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_);
+ ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_);
+ copy->SetInterfaceMethod(i, target_ptr_size_, NativeLocationInImage(interface_method));
+ copy->SetImplementationMethod(i,
+ target_ptr_size_,
+ NativeLocationInImage(implementation_method));
+ }
+}
+
void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
- ImageInfo& image_info = GetImageInfo(oat_index);
+ const ImageInfo& image_info = GetImageInfo(oat_index);
// Copy ArtFields and methods to their locations and update the array for convenience.
for (auto& pair : native_object_relocations_) {
NativeObjectRelocation& relocation = pair.second;
@@ -1550,6 +1601,7 @@
GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass()));
break;
}
+ case kNativeObjectRelocationTypeRuntimeMethod:
case kNativeObjectRelocationTypeArtMethodClean:
case kNativeObjectRelocationTypeArtMethodDirty: {
CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first),
@@ -1575,26 +1627,22 @@
case kNativeObjectRelocationTypeDexCacheArray:
// Nothing to copy here, everything is done in FixupDexCache().
break;
+ case kNativeObjectRelocationTypeIMTConflictTable: {
+ auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first);
+ CopyAndFixupImtConflictTable(
+ orig_table,
+ new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_));
+ break;
+ }
}
}
// Fixup the image method roots.
auto* image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
- const ImageSection& methods_section = image_header->GetMethodsSection();
for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
ArtMethod* method = image_methods_[i];
CHECK(method != nullptr);
- // Only place runtime methods in the image of the default oat file.
- if (method->IsRuntimeMethod() && oat_index != GetDefaultOatIndex()) {
- continue;
- }
if (!IsInBootImage(method)) {
- auto it = native_object_relocations_.find(method);
- CHECK(it != native_object_relocations_.end()) << "No forwarding for " << PrettyMethod(method);
- NativeObjectRelocation& relocation = it->second;
- CHECK(methods_section.Contains(relocation.offset)) << relocation.offset << " not in "
- << methods_section;
- CHECK(relocation.IsArtMethodRelocation()) << relocation.type;
- method = reinterpret_cast<ArtMethod*>(global_image_begin_ + it->second.offset);
+ method = NativeLocationInImage(method);
}
image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), method);
}
@@ -2057,24 +2105,28 @@
// The resolution method has a special trampoline to call.
Runtime* runtime = Runtime::Current();
- if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
- copy->SetEntryPointFromQuickCompiledCodePtrSize(
- GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_);
- } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
- orig == runtime->GetImtUnimplementedMethod())) {
- copy->SetEntryPointFromQuickCompiledCodePtrSize(
- GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_);
- } else if (UNLIKELY(orig->IsRuntimeMethod())) {
- bool found_one = false;
- for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) {
- auto idx = static_cast<Runtime::CalleeSaveType>(i);
- if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) {
- found_one = true;
- break;
+ if (orig->IsRuntimeMethod()) {
+ ImtConflictTable* orig_table = orig->GetImtConflictTable(target_ptr_size_);
+ if (orig_table != nullptr) {
+ // Special IMT conflict method, normal IMT conflict method or unimplemented IMT method.
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(
+ GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_);
+ copy->SetImtConflictTable(NativeLocationInImage(orig_table), target_ptr_size_);
+ } else if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(
+ GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_);
+ } else {
+ bool found_one = false;
+ for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) {
+ auto idx = static_cast<Runtime::CalleeSaveType>(i);
+ if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) {
+ found_one = true;
+ break;
+ }
}
+ CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig);
+ CHECK(copy->IsRuntimeMethod());
}
- CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig);
- CHECK(copy->IsRuntimeMethod());
} else {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
@@ -2141,6 +2193,10 @@
return kBinArtMethodDirty;
case kNativeObjectRelocationTypeDexCacheArray:
return kBinDexCacheArray;
+ case kNativeObjectRelocationTypeRuntimeMethod:
+ return kBinRuntimeMethod;
+ case kNativeObjectRelocationTypeIMTConflictTable:
+ return kBinIMTConflictTable;
}
UNREACHABLE();
}
@@ -2242,7 +2298,6 @@
compile_app_image_(compile_app_image),
target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
image_infos_(oat_filenames.size()),
- image_method_array_(ImageHeader::kImageMethodsCount),
dirty_methods_(0u),
clean_methods_(0u),
image_storage_mode_(image_storage_mode),
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 0cb6aea..51976c5 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -169,6 +169,10 @@
// ArtMethods may be dirty if the class has native methods or a declaring class that isn't
// initialized.
kBinArtMethodDirty,
+ // Conflict tables (clean).
+ kBinIMTConflictTable,
+ // Runtime methods (always clean, do not have a length prefix array).
+ kBinRuntimeMethod,
// Dex cache arrays have a special slot for PC-relative addressing. Since they are
// huge, and as such their dirtiness is not important for the clean/dirty separation,
// we arbitrarily keep them at the end of the native data.
@@ -186,6 +190,8 @@
kNativeObjectRelocationTypeArtMethodArrayClean,
kNativeObjectRelocationTypeArtMethodDirty,
kNativeObjectRelocationTypeArtMethodArrayDirty,
+ kNativeObjectRelocationTypeRuntimeMethod,
+ kNativeObjectRelocationTypeIMTConflictTable,
kNativeObjectRelocationTypeDexCacheArray,
};
friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
@@ -240,7 +246,7 @@
// Create the image sections into the out sections variable, returns the size of the image
// excluding the bitmap.
- size_t CreateImageSections(size_t target_ptr_size, ImageSection* out_sections) const;
+ size_t CreateImageSections(ImageSection* out_sections) const;
std::unique_ptr<MemMap> image_; // Memory mapped for generating the image.
@@ -395,6 +401,8 @@
void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void FixupClass(mirror::Class* orig, mirror::Class* copy)
SHARED_REQUIRES(Locks::mutator_lock_);
void FixupObject(mirror::Object* orig, mirror::Object* copy)
@@ -425,6 +433,11 @@
size_t oat_index)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
+ // relocation.
+ void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Return true if klass is loaded by the boot class loader but not in the boot image.
bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -481,6 +494,9 @@
// remove duplicates in the multi image and app image case.
mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_);
+ // Return true if there already exists a native allocation for an object.
+ bool NativeRelocationAssigned(void* ptr) const;
+
const CompilerDriver& compiler_driver_;
// Beginning target image address for the first image.
@@ -517,16 +533,14 @@
bool IsArtMethodRelocation() const {
return type == kNativeObjectRelocationTypeArtMethodClean ||
- type == kNativeObjectRelocationTypeArtMethodDirty;
+ type == kNativeObjectRelocationTypeArtMethodDirty ||
+ type == kNativeObjectRelocationTypeRuntimeMethod;
}
};
std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
// Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
- // Fake length prefixed array for image methods. This array does not contain the actual
- // ArtMethods. We only use it for the header and relocation addresses.
- LengthPrefixedArray<ArtMethod> image_method_array_;
// Counters for measurements, used for logging only.
uint64_t dirty_methods_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 5de9842..1785338 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -88,7 +88,7 @@
JitCompiler::JitCompiler() {
compiler_options_.reset(new CompilerOptions(
- CompilerOptions::kDefaultCompilerFilter,
+ CompilerFilter::kDefaultCompilerFilter,
CompilerOptions::kDefaultHugeMethodThreshold,
CompilerOptions::kDefaultLargeMethodThreshold,
CompilerOptions::kDefaultSmallMethodThreshold,
@@ -155,7 +155,8 @@
Compiler::kOptimizing,
instruction_set,
instruction_set_features_.get(),
- /* image */ false,
+ /* boot_image */ false,
+ /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index cf836a9..c4c2399 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -31,6 +31,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "mirror/stack_trace_element.h"
+#include "nativeloader/native_loader.h"
#include "runtime.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
@@ -53,6 +54,11 @@
check_generic_jni_ = false;
}
+ void TearDown() OVERRIDE {
+ android::ResetNativeLoader();
+ CommonCompilerTest::TearDown();
+ }
+
void SetCheckGenericJni(bool generic) {
check_generic_jni_ = generic;
}
@@ -92,11 +98,13 @@
CompileForTest(class_loader_, direct, method_name, method_sig);
// Start runtime.
Thread::Current()->TransitionFromSuspendedToRunnable();
+ android::InitializeNativeLoader();
bool started = runtime_->Start();
CHECK(started);
}
// JNI operations after runtime start.
env_ = Thread::Current()->GetJniEnv();
+ library_search_path_ = env_->NewStringUTF("");
jklass_ = env_->FindClass("MyClassNatives");
ASSERT_TRUE(jklass_ != nullptr) << method_name << " " << method_sig;
@@ -168,6 +176,7 @@
void StackArgsSignExtendedMips64Impl();
JNIEnv* env_;
+ jstring library_search_path_;
jmethodID jmethod_;
bool check_generic_jni_;
};
@@ -220,7 +229,7 @@
std::string reason;
ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
- LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason))
+ LoadNativeLibrary(env_, "", class_loader_, library_search_path_, &reason))
<< reason;
jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24);
@@ -235,7 +244,7 @@
std::string reason;
ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
- LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason))
+ LoadNativeLibrary(env_, "", class_loader_, library_search_path_, &reason))
<< reason;
jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42);
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index c07de79..ec69107 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -51,6 +51,7 @@
instruction_set,
/* instruction_set_features*/ nullptr,
/* boot_image */ false,
+ /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 73b16d5..5b19284 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -112,6 +112,7 @@
insn_set,
insn_features_.get(),
/* boot_image */ false,
+ /* app_image */ false,
/* image_classes */ nullptr,
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index e804bee..8da9f06 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1127,17 +1127,23 @@
return target_offset;
}
- mirror::Class* GetTargetType(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
- mirror::DexCache* dex_cache = (dex_file_ == patch.TargetTypeDexFile())
+ mirror::DexCache* GetDexCache(const DexFile* target_dex_file)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ return (target_dex_file == dex_file_)
? dex_cache_
- : class_linker_->FindDexCache(Thread::Current(), *patch.TargetTypeDexFile());
+ : class_linker_->FindDexCache(Thread::Current(), *target_dex_file);
+ }
+
+ mirror::Class* GetTargetType(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::DexCache* dex_cache = GetDexCache(patch.TargetTypeDexFile());
mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
CHECK(type != nullptr);
return type;
}
mirror::String* GetTargetString(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
- mirror::String* string = dex_cache_->GetResolvedString(patch.TargetStringIndex());
+ mirror::DexCache* dex_cache = GetDexCache(patch.TargetStringDexFile());
+ mirror::String* string = dex_cache->GetResolvedString(patch.TargetStringIndex());
DCHECK(string != nullptr);
DCHECK(writer_->HasBootImage() ||
Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string));
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 659c6f8..6c6e5af 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -552,7 +552,11 @@
DCHECK(!IsAddedBlock(block));
first_index_bounds_check_map_.clear();
HGraphVisitor::VisitBasicBlock(block);
- AddComparesWithDeoptimization(block);
+ // We should never deoptimize from an osr method, otherwise we might wrongly optimize
+ // code dominated by the deoptimization.
+ if (!GetGraph()->IsCompilingOsr()) {
+ AddComparesWithDeoptimization(block);
+ }
}
void Finish() {
@@ -796,6 +800,27 @@
ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
+ } else if (cond == kCondNE || cond == kCondEQ) {
+ if (left->IsArrayLength() && lower.IsConstant() && upper.IsConstant()) {
+ // Special case:
+ // length == [c,d] yields [c, d] along true
+ // length != [c,d] yields [c, d] along false
+ if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) {
+ ValueRange* new_range = new (GetGraph()->GetArena())
+ ValueRange(GetGraph()->GetArena(), lower, upper);
+ ApplyRangeFromComparison(
+ left, block, cond == kCondEQ ? true_successor : false_successor, new_range);
+ }
+ // In addition:
+ // length == 0 yields [1, max] along false
+ // length != 0 yields [1, max] along true
+ if (lower.GetConstant() == 0 && upper.GetConstant() == 0) {
+ ValueRange* new_range = new (GetGraph()->GetArena())
+ ValueRange(GetGraph()->GetArena(), ValueBound(nullptr, 1), ValueBound::Max());
+ ApplyRangeFromComparison(
+ left, block, cond == kCondEQ ? false_successor : true_successor, new_range);
+ }
+ }
}
}
@@ -951,13 +976,7 @@
void VisitIf(HIf* instruction) OVERRIDE {
if (instruction->InputAt(0)->IsCondition()) {
HCondition* cond = instruction->InputAt(0)->AsCondition();
- IfCondition cmp = cond->GetCondition();
- if (cmp == kCondGT || cmp == kCondGE ||
- cmp == kCondLT || cmp == kCondLE) {
- HInstruction* left = cond->GetLeft();
- HInstruction* right = cond->GetRight();
- HandleIf(instruction, left, right, cmp);
- }
+ HandleIf(instruction, cond->GetLeft(), cond->GetRight(), cond->GetCondition());
}
}
@@ -1358,6 +1377,11 @@
if (loop->IsIrreducible()) {
return false;
}
+ // We should never deoptimize from an osr method, otherwise we might wrongly optimize
+ // code dominated by the deoptimization.
+ if (GetGraph()->IsCompilingOsr()) {
+ return false;
+ }
// A try boundary preheader is hard to handle.
// TODO: remove this restriction.
if (loop->GetPreHeader()->GetLastInstruction()->IsTryBoundary()) {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 45d23fe..197e473 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5183,10 +5183,10 @@
case HLoadString::LoadKind::kBootImageAddress:
break;
case HLoadString::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
// We disable pc-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
// TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index e8e6b68..9680f2b 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4010,10 +4010,10 @@
case HLoadString::LoadKind::kBootImageAddress:
break;
case HLoadString::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index e73e880..6dc480b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3303,17 +3303,6 @@
int shift;
CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
- NearLabel ndiv;
- NearLabel end;
- // If numerator is 0, the result is 0, no computation needed.
- __ testl(eax, eax);
- __ j(kNotEqual, &ndiv);
-
- __ xorl(out, out);
- __ jmp(&end);
-
- __ Bind(&ndiv);
-
// Save the numerator.
__ movl(num, eax);
@@ -3348,7 +3337,6 @@
} else {
__ movl(eax, edx);
}
- __ Bind(&end);
}
void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instruction) {
@@ -5977,7 +5965,7 @@
DCHECK(GetCompilerOptions().GetCompilePic());
FALLTHROUGH_INTENDED;
case HLoadString::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJit()); // Note: boot image is also non-JIT.
+ DCHECK(!Runtime::Current()->UseJitCompilation()); // Note: boot image is also non-JIT.
// We disable pc-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
// TODO: Create as many X86ComputeBaseMethodAddress instructions as needed for methods
@@ -5989,7 +5977,7 @@
case HLoadString::LoadKind::kBootImageAddress:
break;
case HLoadString::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5576d83..96ec09c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -3390,16 +3390,6 @@
__ movl(numerator, eax);
- NearLabel no_div;
- NearLabel end;
- __ testl(eax, eax);
- __ j(kNotEqual, &no_div);
-
- __ xorl(out, out);
- __ jmp(&end);
-
- __ Bind(&no_div);
-
__ movl(eax, Immediate(magic));
__ imull(numerator);
@@ -3425,7 +3415,6 @@
} else {
__ movl(eax, edx);
}
- __ Bind(&end);
} else {
int64_t imm = second.GetConstant()->AsLongConstant()->GetValue();
@@ -5413,10 +5402,10 @@
case HLoadString::LoadKind::kBootImageAddress:
break;
case HLoadString::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 96837a8..968e267 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -258,6 +258,15 @@
VisitInstruction(check);
}
+void GraphChecker::VisitDeoptimize(HDeoptimize* deopt) {
+ if (GetGraph()->IsCompilingOsr()) {
+ AddError(StringPrintf("A graph compiled OSR cannot have a HDeoptimize instruction"));
+ }
+
+ // Perform the instruction base checks too.
+ VisitInstruction(deopt);
+}
+
void GraphChecker::VisitTryBoundary(HTryBoundary* try_boundary) {
ArrayRef<HBasicBlock* const> handlers = try_boundary->GetExceptionHandlers();
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 83b1984..3060c80 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -57,6 +57,7 @@
void VisitCheckCast(HCheckCast* check) OVERRIDE;
void VisitCondition(HCondition* op) OVERRIDE;
void VisitConstant(HConstant* instruction) OVERRIDE;
+ void VisitDeoptimize(HDeoptimize* instruction) OVERRIDE;
void VisitIf(HIf* instruction) OVERRIDE;
void VisitInstanceOf(HInstanceOf* check) OVERRIDE;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index ff4b9a7..59de895 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -308,7 +308,7 @@
// Check if we can use an inline cache.
ArtMethod* caller = graph_->GetArtMethod();
- if (Runtime::Current()->UseJit()) {
+ if (Runtime::Current()->UseJitCompilation()) {
// Under JIT, we should always know the caller.
DCHECK(caller != nullptr);
ScopedProfilingInfoInlineUse spiis(caller, soa.Self());
@@ -322,7 +322,13 @@
return false;
} else if (ic.IsMonomorphic()) {
MaybeRecordStat(kMonomorphicCall);
- return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic);
+ if (outermost_graph_->IsCompilingOsr()) {
+ // If we are compiling OSR, we pretend this call is polymorphic, as we may come from the
+ // interpreter and it may have seen different receiver types.
+ return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic);
+ } else {
+ return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic);
+ }
} else if (ic.IsPolymorphic()) {
MaybeRecordStat(kPolymorphicCall);
return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic);
@@ -510,6 +516,11 @@
bool deoptimize = all_targets_inlined &&
(i != InlineCache::kIndividualCacheSize - 1) &&
(ic.GetTypeAt(i + 1) == nullptr);
+
+ if (outermost_graph_->IsCompilingOsr()) {
+ // We do not support HDeoptimize in OSR methods.
+ deoptimize = false;
+ }
HInstruction* compare = AddTypeGuard(
receiver, cursor, bb_cursor, class_index, is_referrer, invoke_instruction, deoptimize);
if (deoptimize) {
@@ -623,7 +634,7 @@
ArtMethod* resolved_method,
const InlineCache& ic) {
// This optimization only works under JIT for now.
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->UseJitCompilation());
if (graph_->GetInstructionSet() == kMips64) {
// TODO: Support HClassTableGet for mips64.
return false;
@@ -672,7 +683,8 @@
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- if (!TryInlineAndReplace(invoke_instruction, actual_method, /* do_rtp */ false)) {
+ HInstruction* return_replacement = nullptr;
+ if (!TryBuildAndInline(invoke_instruction, actual_method, &return_replacement)) {
return false;
}
@@ -701,9 +713,6 @@
}
HNotEqual* compare = new (graph_->GetArena()) HNotEqual(class_table_get, constant);
- HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
- compare, invoke_instruction->GetDexPc());
- // TODO: Extend reference type propagation to understand the guard.
if (cursor != nullptr) {
bb_cursor->InsertInstructionAfter(receiver_class, cursor);
} else {
@@ -711,8 +720,20 @@
}
bb_cursor->InsertInstructionAfter(class_table_get, receiver_class);
bb_cursor->InsertInstructionAfter(compare, class_table_get);
- bb_cursor->InsertInstructionAfter(deoptimize, compare);
- deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
+
+ if (outermost_graph_->IsCompilingOsr()) {
+ CreateDiamondPatternForPolymorphicInline(compare, return_replacement, invoke_instruction);
+ } else {
+ // TODO: Extend reference type propagation to understand the guard.
+ HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
+ compare, invoke_instruction->GetDexPc());
+ bb_cursor->InsertInstructionAfter(deoptimize, compare);
+ deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
+ if (return_replacement != nullptr) {
+ invoke_instruction->ReplaceWith(return_replacement);
+ }
+ invoke_instruction->GetBlock()->RemoveInstruction(invoke_instruction);
+ }
// Run type propagation to get the guard typed.
ReferenceTypePropagation rtp_fixup(graph_,
@@ -744,6 +765,12 @@
HInstruction** return_replacement) {
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
+ if (method->IsProxyMethod()) {
+ VLOG(compiler) << "Method " << PrettyMethod(method)
+ << " is not inlined because of unimplemented inline support for proxy methods.";
+ return false;
+ }
+
// Check whether we're allowed to inline. The outermost compilation unit is the relevant
// dex file here (though the transitivity of an inline chain would allow checking the calller).
if (!compiler_driver_->MayInline(method->GetDexFile(),
@@ -802,7 +829,7 @@
if (!method->GetDeclaringClass()->IsVerified()) {
uint16_t class_def_idx = method->GetDeclaringClass()->GetDexClassDefIndex();
- if (Runtime::Current()->UseJit() ||
+ if (Runtime::Current()->UseJitCompilation() ||
!compiler_driver_->IsMethodVerifiedWithoutFailures(
method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
@@ -1265,6 +1292,8 @@
size_t HInliner::RunOptimizations(HGraph* callee_graph,
const DexFile::CodeItem* code_item,
const DexCompilationUnit& dex_compilation_unit) {
+ // Note: if the outermost_graph_ is being compiled OSR, we should not run any
+ // optimization that could lead to a HDeoptimize. The following optimizations do not.
HDeadCodeElimination dce(callee_graph, stats_);
HConstantFolding fold(callee_graph);
HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_);
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 863dd1c..39a1313 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -30,6 +30,10 @@
// Temporary measure until we have caught up with the Java 7 definition of Math.round. b/26327751
static constexpr bool kRoundIsPlusPointFive = false;
+// Positive floating-point infinities.
+static constexpr uint32_t kPositiveInfinityFloat = 0x7f800000U;
+static constexpr uint64_t kPositiveInfinityDouble = UINT64_C(0x7ff0000000000000);
+
// Recognize intrinsics from HInvoke nodes.
class IntrinsicsRecognizer : public HOptimization {
public:
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 86b7bc1..4e3ace4 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1115,15 +1115,15 @@
ArenaAllocator* allocator,
bool start_at_zero) {
LocationSummary* locations = invoke->GetLocations();
- Register tmp_reg = locations->GetTemp(0).AsRegister<Register>();
// Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
// Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
- // or directly dispatch if we have a constant.
+ // or directly dispatch for a large constant, or omit slow-path for a small constant or a char.
SlowPathCode* slow_path = nullptr;
- if (invoke->InputAt(1)->IsIntConstant()) {
+ HInstruction* code_point = invoke->InputAt(1);
+ if (code_point->IsIntConstant()) {
if (static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()) >
std::numeric_limits<uint16_t>::max()) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
@@ -1134,16 +1134,18 @@
__ Bind(slow_path->GetExitLabel());
return;
}
- } else {
+ } else if (code_point->GetType() != Primitive::kPrimChar) {
Register char_reg = locations->InAt(1).AsRegister<Register>();
- __ LoadImmediate(tmp_reg, std::numeric_limits<uint16_t>::max());
- __ cmp(char_reg, ShifterOperand(tmp_reg));
+ // 0xffff is not modified immediate but 0x10000 is, so use `>= 0x10000` instead of `> 0xffff`.
+ __ cmp(char_reg,
+ ShifterOperand(static_cast<uint32_t>(std::numeric_limits<uint16_t>::max()) + 1));
slow_path = new (allocator) IntrinsicSlowPathARM(invoke);
codegen->AddSlowPath(slow_path);
- __ b(slow_path->GetEntryLabel(), HI);
+ __ b(slow_path->GetEntryLabel(), HS);
}
if (start_at_zero) {
+ Register tmp_reg = locations->GetTemp(0).AsRegister<Register>();
DCHECK_EQ(tmp_reg, R2);
// Start-index = 0.
__ LoadImmediate(tmp_reg, 0);
@@ -1170,7 +1172,7 @@
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
locations->SetOut(Location::RegisterLocation(R0));
- // Need a temp for slow-path codepoint compare, and need to send start-index=0.
+ // Need to send start-index=0.
locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
}
@@ -1190,9 +1192,6 @@
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
locations->SetOut(Location::RegisterLocation(R0));
-
- // Need a temp for slow-path codepoint compare.
- locations->AddTemp(Location::RequiresRegister());
}
void IntrinsicCodeGeneratorARM::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1985,6 +1984,56 @@
__ Bind(&done);
}
+void IntrinsicLocationsBuilderARM::VisitFloatIsInfinite(HInvoke* invoke) {
+ CreateFPToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitFloatIsInfinite(HInvoke* invoke) {
+ ArmAssembler* const assembler = GetAssembler();
+ LocationSummary* const locations = invoke->GetLocations();
+ const Register out = locations->Out().AsRegister<Register>();
+ // Shifting left by 1 bit makes the value encodable as an immediate operand;
+ // we don't care about the sign bit anyway.
+ constexpr uint32_t infinity = kPositiveInfinityFloat << 1U;
+
+ __ vmovrs(out, locations->InAt(0).AsFpuRegister<SRegister>());
+ // We don't care about the sign bit, so shift left.
+ __ Lsl(out, out, 1);
+ __ eor(out, out, ShifterOperand(infinity));
+ // If the result is 0, then it has 32 leading zeros, and less than that otherwise.
+ __ clz(out, out);
+ // Any number less than 32 logically shifted right by 5 bits results in 0;
+ // the same operation on 32 yields 1.
+ __ Lsr(out, out, 5);
+}
+
+void IntrinsicLocationsBuilderARM::VisitDoubleIsInfinite(HInvoke* invoke) {
+ CreateFPToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitDoubleIsInfinite(HInvoke* invoke) {
+ ArmAssembler* const assembler = GetAssembler();
+ LocationSummary* const locations = invoke->GetLocations();
+ const Register out = locations->Out().AsRegister<Register>();
+ // The highest 32 bits of double precision positive infinity separated into
+ // two constants encodable as immediate operands.
+ constexpr uint32_t infinity_high = 0x7f000000U;
+ constexpr uint32_t infinity_high2 = 0x00f00000U;
+
+ static_assert((infinity_high | infinity_high2) == static_cast<uint32_t>(kPositiveInfinityDouble >> 32U),
+ "The constants do not add up to the high 32 bits of double precision positive infinity.");
+ __ vmovrrd(IP, out, FromLowSToD(locations->InAt(0).AsFpuRegisterPairLow<SRegister>()));
+ __ eor(out, out, ShifterOperand(infinity_high));
+ __ eor(out, out, ShifterOperand(infinity_high2));
+ // We don't care about the sign bit, so shift left.
+ __ orr(out, IP, ShifterOperand(out, LSL, 1));
+ // If the result is 0, then it has 32 leading zeros, and less than that otherwise.
+ __ clz(out, out);
+ // Any number less than 32 logically shifted right by 5 bits results in 0;
+ // the same operation on 32 yields 1.
+ __ Lsr(out, out, 5);
+}
+
UNIMPLEMENTED_INTRINSIC(ARM, IntegerBitCount)
UNIMPLEMENTED_INTRINSIC(ARM, LongBitCount)
UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble)
@@ -2001,8 +2050,6 @@
UNIMPLEMENTED_INTRINSIC(ARM, UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(ARM, SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ARM, ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(ARM, FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(ARM, DoubleIsInfinite)
UNIMPLEMENTED_INTRINSIC(ARM, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 04ae3a6..cc5fd65 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -47,6 +47,7 @@
using helpers::WRegisterFrom;
using helpers::XRegisterFrom;
using helpers::InputRegisterAt;
+using helpers::OutputRegister;
namespace {
@@ -1173,31 +1174,118 @@
void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCall,
+ invoke->InputAt(1)->CanBeNull()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
kIntrinsified);
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
- locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
vixl::MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
+ Register str = XRegisterFrom(locations->InAt(0));
+ Register arg = XRegisterFrom(locations->InAt(1));
+ Register out = OutputRegister(invoke);
+
+ Register temp0 = WRegisterFrom(locations->GetTemp(0));
+ Register temp1 = WRegisterFrom(locations->GetTemp(1));
+ Register temp2 = WRegisterFrom(locations->GetTemp(2));
+
+ vixl::Label loop;
+ vixl::Label find_char_diff;
+ vixl::Label end;
+
+ // Get offsets of count and value fields within a string object.
+ const int32_t count_offset = mirror::String::CountOffset().Int32Value();
+ const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+
// Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
- Register argument = WRegisterFrom(locations->InAt(1));
- __ Cmp(argument, 0);
- SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
- codegen_->AddSlowPath(slow_path);
- __ B(eq, slow_path->GetEntryLabel());
+ // Take slow path and throw if input can be and is null.
+ SlowPathCodeARM64* slow_path = nullptr;
+ const bool can_slow_path = invoke->InputAt(1)->CanBeNull();
+ if (can_slow_path) {
+ slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Cbz(arg, slow_path->GetEntryLabel());
+ }
- __ Ldr(
- lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pStringCompareTo).Int32Value()));
- __ Blr(lr);
- __ Bind(slow_path->GetExitLabel());
+ // Reference equality check, return 0 if same reference.
+ __ Subs(out, str, arg);
+ __ B(&end, eq);
+ // Load lengths of this and argument strings.
+ __ Ldr(temp0, MemOperand(str.X(), count_offset));
+ __ Ldr(temp1, MemOperand(arg.X(), count_offset));
+ // Return zero if both strings are empty.
+ __ Orr(out, temp0, temp1);
+ __ Cbz(out, &end);
+ // out = length diff.
+ __ Subs(out, temp0, temp1);
+ // temp2 = min(len(str), len(arg)).
+ __ Csel(temp2, temp1, temp0, ge);
+ // Shorter string is empty?
+ __ Cbz(temp2, &end);
+
+ // Store offset of string value in preparation for comparison loop.
+ __ Mov(temp1, value_offset);
+
+ UseScratchRegisterScope scratch_scope(masm);
+ Register temp4 = scratch_scope.AcquireX();
+
+ // Assertions that must hold in order to compare strings 4 characters at a time.
+ DCHECK_ALIGNED(value_offset, 8);
+ static_assert(IsAligned<8>(kObjectAlignment), "String of odd length is not zero padded");
+
+ const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ DCHECK_EQ(char_size, 2u);
+
+ // Promote temp0 to an X reg, ready for LDR.
+ temp0 = temp0.X();
+
+ // Loop to compare 4x16-bit characters at a time (ok because of string data alignment).
+ __ Bind(&loop);
+ __ Ldr(temp4, MemOperand(str.X(), temp1));
+ __ Ldr(temp0, MemOperand(arg.X(), temp1));
+ __ Cmp(temp4, temp0);
+ __ B(ne, &find_char_diff);
+ __ Add(temp1, temp1, char_size * 4);
+ __ Subs(temp2, temp2, 4);
+ __ B(gt, &loop);
+ __ B(&end);
+
+ // Promote temp1 to an X reg, ready for EOR.
+ temp1 = temp1.X();
+
+ // Find the single 16-bit character difference.
+ __ Bind(&find_char_diff);
+ // Get the bit position of the first character that differs.
+ __ Eor(temp1, temp0, temp4);
+ __ Rbit(temp1, temp1);
+ __ Clz(temp1, temp1);
+ __ Bic(temp1, temp1, 0xf);
+ // If the number of 16-bit chars remaining <= the index where the difference occurs (0-3), then
+ // the difference occurs outside the remaining string data, so just return length diff (out).
+ __ Cmp(temp2, Operand(temp1, LSR, 4));
+ __ B(le, &end);
+ // Extract the characters and calculate the difference.
+ __ Lsr(temp0, temp0, temp1);
+ __ Lsr(temp4, temp4, temp1);
+ __ And(temp4, temp4, 0xffff);
+ __ Sub(out, temp4, Operand(temp0, UXTH));
+
+ __ Bind(&end);
+
+ if (can_slow_path) {
+ __ Bind(slow_path->GetExitLabel());
+ }
}
void IntrinsicLocationsBuilderARM64::VisitStringEquals(HInvoke* invoke) {
@@ -1302,15 +1390,15 @@
ArenaAllocator* allocator,
bool start_at_zero) {
LocationSummary* locations = invoke->GetLocations();
- Register tmp_reg = WRegisterFrom(locations->GetTemp(0));
// Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
// Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
- // or directly dispatch if we have a constant.
+ // or directly dispatch for a large constant, or omit slow-path for a small constant or a char.
SlowPathCodeARM64* slow_path = nullptr;
- if (invoke->InputAt(1)->IsIntConstant()) {
+ HInstruction* code_point = invoke->InputAt(1);
+ if (code_point->IsIntConstant()) {
if (static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()) > 0xFFFFU) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
// rare, so for simplicity just put the full slow-path down and branch unconditionally.
@@ -1320,17 +1408,17 @@
__ Bind(slow_path->GetExitLabel());
return;
}
- } else {
+ } else if (code_point->GetType() != Primitive::kPrimChar) {
Register char_reg = WRegisterFrom(locations->InAt(1));
- __ Mov(tmp_reg, 0xFFFF);
- __ Cmp(char_reg, Operand(tmp_reg));
+ __ Tst(char_reg, 0xFFFF0000);
slow_path = new (allocator) IntrinsicSlowPathARM64(invoke);
codegen->AddSlowPath(slow_path);
- __ B(hi, slow_path->GetEntryLabel());
+ __ B(ne, slow_path->GetEntryLabel());
}
if (start_at_zero) {
// Start-index = 0.
+ Register tmp_reg = WRegisterFrom(locations->GetTemp(0));
__ Mov(tmp_reg, 0);
}
@@ -1354,7 +1442,7 @@
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
- // Need a temp for slow-path codepoint compare, and need to send start_index=0.
+ // Need to send start_index=0.
locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2)));
}
@@ -1374,9 +1462,6 @@
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
-
- // Need a temp for slow-path codepoint compare.
- locations->AddTemp(Location::RequiresRegister());
}
void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -2201,9 +2286,46 @@
__ Bind(slow_path->GetExitLabel());
}
+static void GenIsInfinite(LocationSummary* locations,
+ bool is64bit,
+ vixl::MacroAssembler* masm) {
+ Operand infinity;
+ Register out;
+
+ if (is64bit) {
+ infinity = kPositiveInfinityDouble;
+ out = XRegisterFrom(locations->Out());
+ } else {
+ infinity = kPositiveInfinityFloat;
+ out = WRegisterFrom(locations->Out());
+ }
+
+ const Register zero = vixl::Assembler::AppropriateZeroRegFor(out);
+
+ MoveFPToInt(locations, is64bit, masm);
+ __ Eor(out, out, infinity);
+ // We don't care about the sign bit, so shift left.
+ __ Cmp(zero, Operand(out, LSL, 1));
+ __ Cset(out, eq);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitFloatIsInfinite(HInvoke* invoke) {
+ CreateFPToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) {
+ GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
+ CreateFPToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
+ GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(ARM64, FloatIsInfinite)
-UNIMPLEMENTED_INTRINSIC(ARM64, DoubleIsInfinite)
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 19c6a22..20b61f8 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2067,10 +2067,11 @@
// Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
- // Check for code points > 0xFFFF. Either a slow-path check when we
- // don't know statically, or directly dispatch if we have a constant.
+ // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
+ // or directly dispatch for a large constant, or omit slow-path for a small constant or a char.
SlowPathCodeMIPS* slow_path = nullptr;
- if (invoke->InputAt(1)->IsIntConstant()) {
+ HInstruction* code_point = invoke->InputAt(1);
+ if (code_point->IsIntConstant()) {
if (!IsUint<16>(invoke->InputAt(1)->AsIntConstant()->GetValue())) {
// Always needs the slow-path. We could directly dispatch to it,
// but this case should be rare, so for simplicity just put the
@@ -2081,7 +2082,7 @@
__ Bind(slow_path->GetExitLabel());
return;
}
- } else {
+ } else if (code_point->GetType() != Primitive::kPrimChar) {
Register char_reg = locations->InAt(1).AsRegister<Register>();
// The "bltu" conditional branch tests to see if the character value
// fits in a valid 16-bit (MIPS halfword) value. If it doesn't then
@@ -2283,10 +2284,10 @@
// If one, or more, of the exponent bits is zero, then the number can't be infinite.
if (type == Primitive::kPrimDouble) {
__ MoveFromFpuHigh(TMP, in);
- __ LoadConst32(AT, 0x7FF00000);
+ __ LoadConst32(AT, High32Bits(kPositiveInfinityDouble));
} else {
__ Mfc1(TMP, in);
- __ LoadConst32(AT, 0x7F800000);
+ __ LoadConst32(AT, kPositiveInfinityFloat);
}
__ Xor(TMP, TMP, AT);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 1524e1e..7188e1c 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1563,10 +1563,11 @@
// Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
- // Check for code points > 0xFFFF. Either a slow-path check when we
- // don't know statically, or directly dispatch if we have a constant.
+ // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
+ // or directly dispatch for a large constant, or omit slow-path for a small constant or a char.
SlowPathCodeMIPS64* slow_path = nullptr;
- if (invoke->InputAt(1)->IsIntConstant()) {
+ HInstruction* code_point = invoke->InputAt(1);
+ if (code_point->IsIntConstant()) {
if (!IsUint<16>(invoke->InputAt(1)->AsIntConstant()->GetValue())) {
// Always needs the slow-path. We could directly dispatch to it,
// but this case should be rare, so for simplicity just put the
@@ -1577,7 +1578,7 @@
__ Bind(slow_path->GetExitLabel());
return;
}
- } else {
+ } else if (code_point->GetType() != Primitive::kPrimChar) {
GpuRegister char_reg = locations->InAt(1).AsRegister<GpuRegister>();
__ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 4aab3e2..d0edeca 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1418,9 +1418,10 @@
DCHECK_EQ(out, EDI);
// Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
- // or directly dispatch if we have a constant.
+ // or directly dispatch for a large constant, or omit slow-path for a small constant or a char.
SlowPathCode* slow_path = nullptr;
- if (invoke->InputAt(1)->IsIntConstant()) {
+ HInstruction* code_point = invoke->InputAt(1);
+ if (code_point->IsIntConstant()) {
if (static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()) >
std::numeric_limits<uint16_t>::max()) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
@@ -1431,7 +1432,7 @@
__ Bind(slow_path->GetExitLabel());
return;
}
- } else {
+ } else if (code_point->GetType() != Primitive::kPrimChar) {
__ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max()));
slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
codegen->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 1d32dc7..4ee2368 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1517,9 +1517,10 @@
DCHECK_EQ(out.AsRegister(), RDI);
// Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
- // or directly dispatch if we have a constant.
+ // or directly dispatch for a large constant, or omit slow-path for a small constant or a char.
SlowPathCode* slow_path = nullptr;
- if (invoke->InputAt(1)->IsIntConstant()) {
+ HInstruction* code_point = invoke->InputAt(1);
+ if (code_point->IsIntConstant()) {
if (static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()) >
std::numeric_limits<uint16_t>::max()) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
@@ -1530,7 +1531,7 @@
__ Bind(slow_path->GetExitLabel());
return;
}
- } else {
+ } else if (code_point->GetType() != Primitive::kPrimChar) {
__ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max()));
slow_path = new (allocator) IntrinsicSlowPathX86_64(invoke);
codegen->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 2de4158..8a75a90 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -733,19 +733,14 @@
if (Primitive::PrimitiveKind(heap_value->GetType())
!= Primitive::PrimitiveKind(instruction->GetType())) {
// The only situation where the same heap location has different type is when
- // we do an array get from a null constant. In order to stay properly typed
- // we do not merge the array gets.
+ // we do an array get on an instruction that originates from the null constant
+ // (the null could be behind a field access, an array access, a null check or
+ // a bound type).
+ // In order to stay properly typed on primitive types, we do not eliminate
+ // the array gets.
if (kIsDebugBuild) {
DCHECK(heap_value->IsArrayGet()) << heap_value->DebugName();
DCHECK(instruction->IsArrayGet()) << instruction->DebugName();
- HInstruction* array = instruction->AsArrayGet()->GetArray();
- DCHECK(array->IsNullCheck()) << array->DebugName();
- HInstruction* input = HuntForOriginalReference(array->InputAt(0));
- DCHECK(input->IsNullConstant()) << input->DebugName();
- array = heap_value->AsArrayGet()->GetArray();
- DCHECK(array->IsNullCheck()) << array->DebugName();
- input = HuntForOriginalReference(array->InputAt(0));
- DCHECK(input->IsNullConstant()) << input->DebugName();
}
return;
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 6703695..eb9c381 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -206,6 +206,22 @@
return instruction;
}
+static bool UpdateDominatorOfSuccessor(HBasicBlock* block, HBasicBlock* successor) {
+ DCHECK(ContainsElement(block->GetSuccessors(), successor));
+
+ HBasicBlock* old_dominator = successor->GetDominator();
+ HBasicBlock* new_dominator =
+ (old_dominator == nullptr) ? block
+ : CommonDominator::ForPair(old_dominator, block);
+
+ if (old_dominator == new_dominator) {
+ return false;
+ } else {
+ successor->SetDominator(new_dominator);
+ return true;
+ }
+}
+
void HGraph::ComputeDominanceInformation() {
DCHECK(reverse_post_order_.empty());
reverse_post_order_.reserve(blocks_.size());
@@ -228,15 +244,7 @@
worklist.pop_back();
} else {
HBasicBlock* successor = current->GetSuccessors()[successors_visited[current_id]++];
-
- if (successor->GetDominator() == nullptr) {
- successor->SetDominator(current);
- } else {
- // The CommonDominator can work for multiple blocks as long as the
- // domination information doesn't change. However, since we're changing
- // that information here, we can use the finder only for pairs of blocks.
- successor->SetDominator(CommonDominator::ForPair(successor->GetDominator(), current));
- }
+ UpdateDominatorOfSuccessor(current, successor);
// Once all the forward edges have been visited, we know the immediate
// dominator of the block. We can then start visiting its successors.
@@ -248,6 +256,44 @@
}
}
+ // Check if the graph has back edges not dominated by their respective headers.
+ // If so, we need to update the dominators of those headers and recursively of
+ // their successors. We do that with a fix-point iteration over all blocks.
+ // The algorithm is guaranteed to terminate because it loops only if the sum
+ // of all dominator chains has decreased in the current iteration.
+ bool must_run_fix_point = false;
+ for (HBasicBlock* block : blocks_) {
+ if (block != nullptr &&
+ block->IsLoopHeader() &&
+ block->GetLoopInformation()->HasBackEdgeNotDominatedByHeader()) {
+ must_run_fix_point = true;
+ break;
+ }
+ }
+ if (must_run_fix_point) {
+ bool update_occurred = true;
+ while (update_occurred) {
+ update_occurred = false;
+ for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ for (HBasicBlock* successor : block->GetSuccessors()) {
+ update_occurred |= UpdateDominatorOfSuccessor(block, successor);
+ }
+ }
+ }
+ }
+
+ // Make sure that there are no remaining blocks whose dominator information
+ // needs to be updated.
+ if (kIsDebugBuild) {
+ for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ for (HBasicBlock* successor : block->GetSuccessors()) {
+ DCHECK(!UpdateDominatorOfSuccessor(block, successor));
+ }
+ }
+ }
+
// Populate `dominated_blocks_` information after computing all dominators.
// The potential presence of irreducible loops requires to do it after.
for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
@@ -595,20 +641,16 @@
blocks_.SetBit(header_->GetBlockId());
header_->SetInLoop(this);
- bool is_irreducible_loop = false;
- for (HBasicBlock* back_edge : GetBackEdges()) {
- DCHECK(back_edge->GetDominator() != nullptr);
- if (!header_->Dominates(back_edge)) {
- is_irreducible_loop = true;
- break;
- }
- }
+ bool is_irreducible_loop = HasBackEdgeNotDominatedByHeader();
if (is_irreducible_loop) {
ArenaBitVector visited(graph->GetArena(),
graph->GetBlocks().size(),
/* expandable */ false,
kArenaAllocGraphBuilder);
+ // Stop marking blocks at the loop header.
+ visited.SetBit(header_->GetBlockId());
+
for (HBasicBlock* back_edge : GetBackEdges()) {
PopulateIrreducibleRecursive(back_edge, &visited);
}
@@ -667,6 +709,16 @@
return last_position;
}
+bool HLoopInformation::HasBackEdgeNotDominatedByHeader() const {
+ for (HBasicBlock* back_edge : GetBackEdges()) {
+ DCHECK(back_edge->GetDominator() != nullptr);
+ if (!header_->Dominates(back_edge)) {
+ return true;
+ }
+ }
+ return false;
+}
+
bool HBasicBlock::Dominates(HBasicBlock* other) const {
// Walk up the dominator tree from `other`, to find out if `this`
// is an ancestor.
@@ -2396,6 +2448,7 @@
}
if (!NeedsEnvironment()) {
RemoveEnvironment();
+ SetSideEffects(SideEffects::None());
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index afb995d..829fe71 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -725,6 +725,8 @@
blocks_.ClearAllBits();
}
+ bool HasBackEdgeNotDominatedByHeader() const;
+
private:
// Internal recursive implementation of `Populate`.
void PopulateRecursive(HBasicBlock* block);
@@ -5530,6 +5532,7 @@
SetPackedFlag<kFlagIsInDexCache>(true);
DCHECK(!NeedsEnvironment());
RemoveEnvironment();
+ SetSideEffects(SideEffects::None());
}
size_t InputCount() const OVERRIDE {
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 7e1fd75..f2394f6 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -666,12 +666,6 @@
}
if (phi->GetBlock()->IsLoopHeader()) {
- if (!is_first_run_ && graph_->IsCompilingOsr()) {
- // Don't update the type of a loop phi when compiling OSR: we may have done
- // speculative optimizations dominating that phi, that do not hold at the
- // point the interpreter jumps to that loop header.
- return;
- }
// Set the initial type for the phi. Use the non back edge input for reaching
// a fixed point faster.
HInstruction* first_input = phi->InputAt(0);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 7a1bb31..08bd35f 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -99,7 +99,7 @@
if (direct_method != 0u) { // Should we use a direct pointer to the method?
// Note: For JIT, kDirectAddressWithFixup doesn't make sense at all and while
// kDirectAddress would be fine for image methods, we don't support it at the moment.
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
if (direct_method != static_cast<uintptr_t>(-1)) { // Is the method pointer known now?
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
method_load_data = direct_method;
@@ -109,7 +109,7 @@
} else { // Use dex cache.
DCHECK_EQ(target_method.dex_file, &graph_->GetDexFile());
if (use_pc_relative_instructions) { // Can we use PC-relative access to the dex cache arrays?
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
DexCacheArraysLayout layout(GetInstructionSetPointerSize(codegen_->GetInstructionSet()),
&graph_->GetDexFile());
@@ -121,7 +121,7 @@
if (direct_code != 0u) { // Should we use a direct pointer to the code?
// Note: For JIT, kCallPCRelative and kCallDirectWithFixup don't make sense at all and
// while kCallDirect would be fine for image methods, we don't support it at the moment.
- DCHECK(!Runtime::Current()->UseJit());
+ DCHECK(!Runtime::Current()->UseJitCompilation());
if (direct_code != static_cast<uintptr_t>(-1)) { // Is the code pointer known now?
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect;
direct_code_ptr = direct_code;
@@ -174,7 +174,7 @@
if (compiler_driver_->IsBootImage()) {
// Compiling boot image. Resolve the string and allocate it if needed.
- DCHECK(!runtime->UseJit());
+ DCHECK(!runtime->UseJitCompilation());
mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
CHECK(string != nullptr);
if (!compiler_driver_->GetSupportBootImageFixup()) {
@@ -187,7 +187,7 @@
? HLoadString::LoadKind::kBootImageLinkTimePcRelative
: HLoadString::LoadKind::kBootImageLinkTimeAddress;
}
- } else if (runtime->UseJit()) {
+ } else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
mirror::String* string = dex_cache->GetResolvedString(string_index);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 40dab74..1141fd1 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1003,6 +1003,15 @@
void AddBackEdgeUses(const HBasicBlock& block_at_use) {
DCHECK(block_at_use.IsInLoop());
+ if (block_at_use.GetGraph()->HasIrreducibleLoops()) {
+ // Linear order may not be well formed when irreducible loops are present,
+ // i.e. loop blocks may not be adjacent and a back edge may not be last,
+ // which violates assumptions made in this method.
+ return;
+ }
+
+ DCHECK(IsLinearOrderWellFormed(*block_at_use.GetGraph()));
+
// Add synthesized uses at the back edge of loops to help the register allocator.
// Note that this method is called in decreasing liveness order, to faciliate adding
// uses at the head of the `first_use_` linked list. Because below
@@ -1027,30 +1036,12 @@
if ((first_use_ != nullptr) && (first_use_->GetPosition() <= back_edge_use_position)) {
// There was a use already seen in this loop. Therefore the previous call to `AddUse`
// already inserted the backedge use. We can stop going outward.
- if (kIsDebugBuild) {
- if (!HasSynthesizeUseAt(back_edge_use_position)) {
- // There exists a use prior to `back_edge_use_position` but there is
- // no synthesized use at the back edge. This can happen in the presence
- // of irreducible loops, when blocks of the loop are not adjacent in
- // linear order, i.e. when there is an out-of-loop block between
- // `block_at_use` and `back_edge_position` that uses this interval.
- DCHECK(block_at_use.GetGraph()->HasIrreducibleLoops());
- DCHECK(!IsLinearOrderWellFormed(*block_at_use.GetGraph()));
- }
- }
+ DCHECK(HasSynthesizeUseAt(back_edge_use_position));
break;
}
- if (last_in_new_list != nullptr &&
- back_edge_use_position <= last_in_new_list->GetPosition()) {
- // Loops are not properly nested in the linear order, i.e. the back edge
- // of an outer loop preceeds blocks of an inner loop. This can happen
- // in the presence of irreducible loops.
- DCHECK(block_at_use.GetGraph()->HasIrreducibleLoops());
- DCHECK(!IsLinearOrderWellFormed(*block_at_use.GetGraph()));
- // We must bail out, otherwise we would generate an unsorted use list.
- break;
- }
+ DCHECK(last_in_new_list == nullptr ||
+ back_edge_use_position > last_in_new_list->GetPosition());
UsePosition* new_use = new (allocator_) UsePosition(
/* user */ nullptr,
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index aeb3109..0978ae1 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -139,8 +139,9 @@
continue;
}
- if (phi->InputCount() == 0) {
- DCHECK(phi->IsDead());
+ // If the phi is dead, we know we won't revive it and it will be removed,
+ // so don't process it.
+ if (phi->IsDead()) {
continue;
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 370583e..be38336 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1526,6 +1526,7 @@
instruction_set_,
instruction_set_features_.get(),
IsBootImage(),
+ IsAppImage(),
image_classes_.release(),
compiled_classes_.release(),
/* compiled_methods */ nullptr,
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 3c6a05d..d2ab699 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -59,6 +59,7 @@
#include "stack_map.h"
#include "ScopedLocalRef.h"
#include "thread_list.h"
+#include "type_lookup_table.h"
#include "verifier/method_verifier.h"
#include "well_known_classes.h"
@@ -573,8 +574,15 @@
os << StringPrintf("location: %s\n", oat_dex_file.GetDexFileLocation().c_str());
os << StringPrintf("checksum: 0x%08x\n", oat_dex_file.GetDexFileLocationChecksum());
- // Create the verifier early.
+ // Print embedded dex file data range.
+ const uint8_t* const oat_file_begin = oat_dex_file.GetOatFile()->Begin();
+ const uint8_t* const dex_file_pointer = oat_dex_file.GetDexFilePointer();
+ uint32_t dex_offset = dchecked_integral_cast<uint32_t>(dex_file_pointer - oat_file_begin);
+ os << StringPrintf("dex-file: 0x%08x..0x%08x\n",
+ dex_offset,
+ dchecked_integral_cast<uint32_t>(dex_offset + oat_dex_file.FileSize() - 1));
+ // Create the dex file early. A lot of print-out things depend on it.
std::string error_msg;
const DexFile* const dex_file = OpenDexFile(&oat_dex_file, &error_msg);
if (dex_file == nullptr) {
@@ -583,6 +591,16 @@
return false;
}
+ // Print lookup table, if it exists.
+ if (oat_dex_file.GetLookupTableData() != nullptr) {
+ uint32_t table_offset = dchecked_integral_cast<uint32_t>(
+ oat_dex_file.GetLookupTableData() - oat_file_begin);
+ uint32_t table_size = TypeLookupTable::RawDataLength(*dex_file);
+ os << StringPrintf("type-table: 0x%08x..0x%08x\n",
+ table_offset,
+ table_offset + table_size - 1);
+ }
+
VariableIndentationOutputStream vios(&os);
ScopedIndentation indent1(&vios);
for (size_t class_def_index = 0;
@@ -1416,11 +1434,10 @@
indent_os << "\n";
// TODO: Dump fields.
// Dump methods after.
- const auto& methods_section = image_header_.GetMethodsSection();
DumpArtMethodVisitor visitor(this);
- methods_section.VisitPackedArtMethods(&visitor,
- image_space_.Begin(),
- image_header_.GetPointerSize());
+ image_header_.VisitPackedArtMethods(&visitor,
+ image_space_.Begin(),
+ image_header_.GetPointerSize());
// Dump the large objects separately.
heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
indent_os << "\n";
@@ -1779,6 +1796,7 @@
DCHECK(method != nullptr);
const void* quick_oat_code_begin = GetQuickOatCodeBegin(method);
const void* quick_oat_code_end = GetQuickOatCodeEnd(method);
+ const size_t pointer_size = image_header_.GetPointerSize();
OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader));
if (method->IsNative()) {
@@ -1792,13 +1810,16 @@
image_header_.GetPointerSize())) {
indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code_begin);
}
- } else if (method->IsAbstract() ||
- method->IsCalleeSaveMethod() ||
- method->IsResolutionMethod() ||
- (method == Runtime::Current()->GetImtConflictMethod()) ||
- method->IsImtUnimplementedMethod() ||
- method->IsClassInitializer()) {
+ } else if (method->IsAbstract() || method->IsClassInitializer()) {
// Don't print information for these.
+ } else if (method->IsRuntimeMethod()) {
+ ImtConflictTable* table = method->GetImtConflictTable(image_header_.GetPointerSize());
+ if (table != nullptr) {
+ indent_os << "IMT conflict table " << table << " method: ";
+ for (size_t i = 0, count = table->NumEntries(pointer_size); i < count; ++i) {
+ indent_os << PrettyMethod(table->GetImplementationMethod(i, pointer_size)) << " ";
+ }
+ }
} else {
const DexFile::CodeItem* code_item = method->GetCodeItem();
size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 93e40af..0a7ffda 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -472,8 +472,7 @@
void PatchOat::PatchArtFields(const ImageHeader* image_header) {
PatchOatArtFieldVisitor visitor(this);
- const auto& section = image_header->GetImageSection(ImageHeader::kSectionArtFields);
- section.VisitPackedArtFields(&visitor, heap_->Begin());
+ image_header->VisitPackedArtFields(&visitor, heap_->Begin());
}
class PatchOatArtMethodVisitor : public ArtMethodVisitor {
@@ -490,10 +489,20 @@
};
void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
- const auto& section = image_header->GetMethodsSection();
const size_t pointer_size = InstructionSetPointerSize(isa_);
PatchOatArtMethodVisitor visitor(this);
- section.VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
+ image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
+}
+
+void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) {
+ const size_t pointer_size = InstructionSetPointerSize(isa_);
+ // We can safely walk target image since the conflict tables are independent.
+ image_header->VisitPackedImtConflictTables(
+ [this](ArtMethod* method) {
+ return RelocatedAddressOfPointer(method);
+ },
+ image_->Begin(),
+ pointer_size);
}
class FixupRootVisitor : public RootVisitor {
@@ -627,6 +636,7 @@
PatchArtFields(image_header);
PatchArtMethods(image_header);
+ PatchImtConflictTables(image_header);
PatchInternedStrings(image_header);
PatchClassTable(image_header);
// Patch dex file int/long arrays which point to ArtFields.
@@ -725,6 +735,7 @@
RelocatedAddressOfPointer(object->GetDexCacheResolvedTypes(pointer_size)), pointer_size);
copy->SetEntryPointFromQuickCompiledCodePtrSize(RelocatedAddressOfPointer(
object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)), pointer_size);
+ // No special handling for IMT conflict table since all pointers are moved by the same offset.
copy->SetEntryPointFromJniPtrSize(RelocatedAddressOfPointer(
object->GetEntryPointFromJniPtrSize(pointer_size)), pointer_size);
}
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 510ff1e..3ef837f 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -117,6 +117,8 @@
bool PatchImage(bool primary_image) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
+ void PatchImtConflictTables(const ImageHeader* image_header)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void PatchInternedStrings(const ImageHeader* image_header)
SHARED_REQUIRES(Locks::mutator_lock_);
void PatchClassTable(const ImageHeader* image_header)
diff --git a/profman/profman.cc b/profman/profman.cc
index 3e632bc..37a560d 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -216,7 +216,11 @@
}
void LogCompletionTime() {
- LOG(INFO) << "profman took " << PrettyDuration(NanoTime() - start_ns_);
+ static constexpr uint64_t kLogThresholdTime = MsToNs(100); // 100ms
+ uint64_t time_taken = NanoTime() - start_ns_;
+ if (time_taken > kLogThresholdTime) {
+ LOG(WARNING) << "profman took " << PrettyDuration(time_taken);
+ }
}
std::vector<std::string> profile_files_;
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index e358ff8..f0e9ac5 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -17,6 +17,7 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
+#include "entrypoints/quick/quick_default_init_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
@@ -47,67 +48,12 @@
extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t);
void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
- // JNI
- jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
-
- // Alloc
- ResetQuickAllocEntryPoints(qpoints);
+ DefaultInitEntryPoints(jpoints, qpoints);
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
qpoints->pCheckCast = art_quick_check_cast;
- // DexCache
- qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
- qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
- qpoints->pInitializeType = art_quick_initialize_type;
- qpoints->pResolveString = art_quick_resolve_string;
-
- // Field
- qpoints->pSet8Instance = art_quick_set8_instance;
- qpoints->pSet8Static = art_quick_set8_static;
- qpoints->pSet16Instance = art_quick_set16_instance;
- qpoints->pSet16Static = art_quick_set16_static;
- qpoints->pSet32Instance = art_quick_set32_instance;
- qpoints->pSet32Static = art_quick_set32_static;
- qpoints->pSet64Instance = art_quick_set64_instance;
- qpoints->pSet64Static = art_quick_set64_static;
- qpoints->pSetObjInstance = art_quick_set_obj_instance;
- qpoints->pSetObjStatic = art_quick_set_obj_static;
- qpoints->pGetByteInstance = art_quick_get_byte_instance;
- qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
- qpoints->pGetShortInstance = art_quick_get_short_instance;
- qpoints->pGetCharInstance = art_quick_get_char_instance;
- qpoints->pGet32Instance = art_quick_get32_instance;
- qpoints->pGet64Instance = art_quick_get64_instance;
- qpoints->pGetObjInstance = art_quick_get_obj_instance;
- qpoints->pGetByteStatic = art_quick_get_byte_static;
- qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
- qpoints->pGetShortStatic = art_quick_get_short_static;
- qpoints->pGetCharStatic = art_quick_get_char_static;
- qpoints->pGet32Static = art_quick_get32_static;
- qpoints->pGet64Static = art_quick_get64_static;
- qpoints->pGetObjStatic = art_quick_get_obj_static;
-
- // Array
- qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check;
- qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check;
- qpoints->pAputObject = art_quick_aput_obj;
- qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
-
- // JNI
- qpoints->pJniMethodStart = JniMethodStart;
- qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
- qpoints->pJniMethodEnd = JniMethodEnd;
- qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
- qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
- qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
- qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
-
- // Locks
- qpoints->pLockObject = art_quick_lock_object;
- qpoints->pUnlockObject = art_quick_unlock_object;
-
// Math
qpoints->pIdivmod = __aeabi_idivmod;
qpoints->pLdiv = __aeabi_ldivmod;
@@ -154,35 +100,6 @@
qpoints->pStringCompareTo = art_quick_string_compareto;
qpoints->pMemcpy = memcpy;
- // Invocation
- qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
- qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
- qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck =
- art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
- art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck =
- art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck =
- art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck =
- art_quick_invoke_virtual_trampoline_with_access_check;
-
- // Thread
- qpoints->pTestSuspend = art_quick_test_suspend;
-
- // Throws
- qpoints->pDeliverException = art_quick_deliver_exception;
- qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
- qpoints->pThrowDivZero = art_quick_throw_div_zero;
- qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
- qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
- qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
-
- // Deoptimization from compiled code.
- qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
-
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
qpoints->pReadBarrierMark = artReadBarrierMark;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index e6ff0aa..321b9d2 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -544,6 +544,15 @@
DELIVER_PENDING_EXCEPTION
END art_quick_lock_object
+ENTRY art_quick_lock_object_no_inline
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case we block
+ mov r1, r9 @ pass Thread::Current
+ bl artLockObjectFromCode @ (Object* obj, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_ZERO
+ DELIVER_PENDING_EXCEPTION
+END art_quick_lock_object_no_inline
+
/*
* Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
* r0 holds the possibly null object to lock.
@@ -601,6 +610,16 @@
DELIVER_PENDING_EXCEPTION
END art_quick_unlock_object
+ENTRY art_quick_unlock_object_no_inline
+ @ save callee saves in case exception allocation triggers GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2
+ mov r1, r9 @ pass Thread::Current
+ bl artUnlockObjectFromCode @ (Object* obj, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_ZERO
+ DELIVER_PENDING_EXCEPTION
+END art_quick_unlock_object_no_inline
+
/*
* Entry from managed code that calls artIsAssignableFromCode and on failure calls
* artThrowClassCastException.
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 4db9411..bf0f647 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -17,6 +17,7 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
+#include "entrypoints/quick/quick_default_init_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
@@ -30,67 +31,12 @@
const mirror::Class* ref_class);
void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
- // JNI
- jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
-
- // Alloc
- ResetQuickAllocEntryPoints(qpoints);
+ DefaultInitEntryPoints(jpoints, qpoints);
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
qpoints->pCheckCast = art_quick_check_cast;
- // DexCache
- qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
- qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
- qpoints->pInitializeType = art_quick_initialize_type;
- qpoints->pResolveString = art_quick_resolve_string;
-
- // Field
- qpoints->pSet8Instance = art_quick_set8_instance;
- qpoints->pSet8Static = art_quick_set8_static;
- qpoints->pSet16Instance = art_quick_set16_instance;
- qpoints->pSet16Static = art_quick_set16_static;
- qpoints->pSet32Instance = art_quick_set32_instance;
- qpoints->pSet32Static = art_quick_set32_static;
- qpoints->pSet64Instance = art_quick_set64_instance;
- qpoints->pSet64Static = art_quick_set64_static;
- qpoints->pSetObjInstance = art_quick_set_obj_instance;
- qpoints->pSetObjStatic = art_quick_set_obj_static;
- qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
- qpoints->pGetByteInstance = art_quick_get_byte_instance;
- qpoints->pGetCharInstance = art_quick_get_char_instance;
- qpoints->pGetShortInstance = art_quick_get_short_instance;
- qpoints->pGet32Instance = art_quick_get32_instance;
- qpoints->pGet64Instance = art_quick_get64_instance;
- qpoints->pGetObjInstance = art_quick_get_obj_instance;
- qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
- qpoints->pGetByteStatic = art_quick_get_byte_static;
- qpoints->pGetCharStatic = art_quick_get_char_static;
- qpoints->pGetShortStatic = art_quick_get_short_static;
- qpoints->pGet32Static = art_quick_get32_static;
- qpoints->pGet64Static = art_quick_get64_static;
- qpoints->pGetObjStatic = art_quick_get_obj_static;
-
- // Array
- qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check;
- qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check;
- qpoints->pAputObject = art_quick_aput_obj;
- qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
-
- // JNI
- qpoints->pJniMethodStart = JniMethodStart;
- qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
- qpoints->pJniMethodEnd = JniMethodEnd;
- qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
- qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
- qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
- qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
-
- // Locks
- qpoints->pLockObject = art_quick_lock_object;
- qpoints->pUnlockObject = art_quick_unlock_object;
-
// Math
// TODO null entrypoints not needed for ARM64 - generate inline.
qpoints->pCmpgDouble = nullptr;
@@ -134,38 +80,10 @@
// Intrinsics
qpoints->pIndexOf = art_quick_indexof;
- qpoints->pStringCompareTo = art_quick_string_compareto;
+ // The ARM64 StringCompareTo intrinsic does not call the runtime.
+ qpoints->pStringCompareTo = nullptr;
qpoints->pMemcpy = memcpy;
- // Invocation
- qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
- qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
- qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck =
- art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
- art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck =
- art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck =
- art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck =
- art_quick_invoke_virtual_trampoline_with_access_check;
-
- // Thread
- qpoints->pTestSuspend = art_quick_test_suspend;
-
- // Throws
- qpoints->pDeliverException = art_quick_deliver_exception;
- qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
- qpoints->pThrowDivZero = art_quick_throw_div_zero;
- qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
- qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
- qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
-
- // Deoptimization from compiled code.
- qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
-
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
qpoints->pReadBarrierMark = artReadBarrierMark;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index 613bb5c..cad13b2 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -39,7 +39,7 @@
if (!needs_a53_835769_fix) {
// Check to see if this is an expected variant.
static const char* arm64_known_variants[] = {
- "denver64", "kryo"
+ "denver64", "kryo", "exynos-m1"
};
if (!FindVariantInArray(arm64_known_variants, arraysize(arm64_known_variants), variant)) {
std::ostringstream os;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 1cdda2d..1fba09b 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1113,6 +1113,14 @@
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_lock_object
+ENTRY art_quick_lock_object_no_inline
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case we block
+ mov x1, xSELF // pass Thread::Current
+ bl artLockObjectFromCode // (Object* obj, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_W0_IS_ZERO_OR_DELIVER
+END art_quick_lock_object_no_inline
+
/*
* Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
* x0 holds the possibly null object to lock.
@@ -1171,6 +1179,14 @@
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_unlock_object
+ENTRY art_quick_unlock_object_no_inline
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case exception allocation triggers GC
+ mov x1, xSELF // pass Thread::Current
+ bl artUnlockObjectFromCode // (Object* obj, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_W0_IS_ZERO_OR_DELIVER
+END art_quick_unlock_object_no_inline
+
/*
* Entry from managed code that calls artIsAssignableFromCode and on failure calls
* artThrowClassCastException.
@@ -2187,108 +2203,3 @@
asr x0, x0, #1
ret
END art_quick_indexof
-
- /*
- * String's compareTo.
- *
- * TODO: Not very optimized.
- *
- * On entry:
- * x0: this object pointer
- * x1: comp object pointer
- *
- */
- .extern __memcmp16
-ENTRY art_quick_string_compareto
- mov x2, x0 // x0 is return, use x2 for first input.
- sub x0, x2, x1 // Same string object?
- cbnz x0,1f
- ret
-1: // Different string objects.
-
- ldr w4, [x2, #MIRROR_STRING_COUNT_OFFSET]
- ldr w3, [x1, #MIRROR_STRING_COUNT_OFFSET]
- add x2, x2, #MIRROR_STRING_VALUE_OFFSET
- add x1, x1, #MIRROR_STRING_VALUE_OFFSET
-
- /*
- * Now: Data* Count
- * first arg x2 w4
- * second arg x1 w3
- */
-
- // x0 := str1.length(w4) - str2.length(w3). ldr zero-extended w3/w4 into x3/x4.
- subs x0, x4, x3
- // Min(count1, count2) into w3.
- csel x3, x3, x4, ge
-
- // TODO: Tune this value.
- // Check for long string, do memcmp16 for them.
- cmp w3, #28 // Constant from arm32.
- bgt .Ldo_memcmp16
-
- /*
- * Now:
- * x2: *first string data
- * x1: *second string data
- * w3: iteration count
- * x0: return value if comparison equal
- * x4, x5, x6, x7: free
- */
-
- // Do a simple unrolled loop.
-.Lloop:
- // At least two more elements?
- subs w3, w3, #2
- b.lt .Lremainder_or_done
-
- ldrh w4, [x2], #2
- ldrh w5, [x1], #2
-
- ldrh w6, [x2], #2
- ldrh w7, [x1], #2
-
- subs w4, w4, w5
- b.ne .Lw4_result
-
- subs w6, w6, w7
- b.ne .Lw6_result
-
- b .Lloop
-
-.Lremainder_or_done:
- adds w3, w3, #1
- b.eq .Lremainder
- ret
-
-.Lremainder:
- ldrh w4, [x2], #2
- ldrh w5, [x1], #2
- subs w4, w4, w5
- b.ne .Lw4_result
- ret
-
-// Result is in w4
-.Lw4_result:
- sxtw x0, w4
- ret
-
-// Result is in w6
-.Lw6_result:
- sxtw x0, w6
- ret
-
-.Ldo_memcmp16:
- mov x14, x0 // Save x0 and LR. __memcmp16 does not use these temps.
- mov x15, xLR // TODO: Codify and check that?
-
- mov x0, x2
- uxtw x2, w3
- bl __memcmp16
-
- mov xLR, x15 // Restore LR.
-
- cmp x0, #0 // Check the memcmp difference.
- csel x0, x0, x14, ne // x0 := x0 != 0 ? x14(prev x0=length diff) : x1.
- ret
-END art_quick_string_compareto
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 51eb77f..45e33a8 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -59,6 +59,9 @@
extern "C" int64_t __moddi3(int64_t, int64_t);
void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
+ // Note: MIPS has asserts checking for the type of entrypoint. Don't move it
+ // to InitDefaultEntryPoints().
+
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
@@ -167,9 +170,14 @@
"Non-direct C stub marked direct.");
// Locks
- qpoints->pLockObject = art_quick_lock_object;
+ if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) {
+ qpoints->pLockObject = art_quick_lock_object_no_inline;
+ qpoints->pUnlockObject = art_quick_unlock_object_no_inline;
+ } else {
+ qpoints->pLockObject = art_quick_lock_object;
+ qpoints->pUnlockObject = art_quick_unlock_object;
+ }
static_assert(!IsDirectEntrypoint(kQuickLockObject), "Non-direct C stub marked direct.");
- qpoints->pUnlockObject = art_quick_unlock_object;
static_assert(!IsDirectEntrypoint(kQuickUnlockObject), "Non-direct C stub marked direct.");
// Math
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 8939a48..3ee26af 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -906,6 +906,16 @@
RETURN_IF_ZERO
END art_quick_lock_object
+ENTRY art_quick_lock_object_no_inline
+ beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
+ la $t9, artLockObjectFromCode
+ jalr $t9 # (Object* obj, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_lock_object_no_inline
+
/*
* Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
*/
@@ -920,6 +930,16 @@
RETURN_IF_ZERO
END art_quick_unlock_object
+ENTRY art_quick_unlock_object_no_inline
+ beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ la $t9, artUnlockObjectFromCode
+ jalr $t9 # (Object* obj, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_unlock_object_no_inline
+
/*
* Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
*/
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 4bdb38e..030c127 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -18,6 +18,7 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
+#include "entrypoints/quick/quick_default_init_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
@@ -57,67 +58,12 @@
extern "C" int64_t __moddi3(int64_t, int64_t);
void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
- // JNI
- jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
-
- // Alloc
- ResetQuickAllocEntryPoints(qpoints);
+ DefaultInitEntryPoints(jpoints, qpoints);
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
qpoints->pCheckCast = art_quick_check_cast;
- // DexCache
- qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
- qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
- qpoints->pInitializeType = art_quick_initialize_type;
- qpoints->pResolveString = art_quick_resolve_string;
-
- // Field
- qpoints->pSet8Instance = art_quick_set8_instance;
- qpoints->pSet8Static = art_quick_set8_static;
- qpoints->pSet16Instance = art_quick_set16_instance;
- qpoints->pSet16Static = art_quick_set16_static;
- qpoints->pSet32Instance = art_quick_set32_instance;
- qpoints->pSet32Static = art_quick_set32_static;
- qpoints->pSet64Instance = art_quick_set64_instance;
- qpoints->pSet64Static = art_quick_set64_static;
- qpoints->pSetObjInstance = art_quick_set_obj_instance;
- qpoints->pSetObjStatic = art_quick_set_obj_static;
- qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
- qpoints->pGetByteInstance = art_quick_get_byte_instance;
- qpoints->pGetCharInstance = art_quick_get_char_instance;
- qpoints->pGetShortInstance = art_quick_get_short_instance;
- qpoints->pGet32Instance = art_quick_get32_instance;
- qpoints->pGet64Instance = art_quick_get64_instance;
- qpoints->pGetObjInstance = art_quick_get_obj_instance;
- qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
- qpoints->pGetByteStatic = art_quick_get_byte_static;
- qpoints->pGetCharStatic = art_quick_get_char_static;
- qpoints->pGetShortStatic = art_quick_get_short_static;
- qpoints->pGet32Static = art_quick_get32_static;
- qpoints->pGet64Static = art_quick_get64_static;
- qpoints->pGetObjStatic = art_quick_get_obj_static;
-
- // Array
- qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check;
- qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check;
- qpoints->pAputObject = art_quick_aput_obj;
- qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
-
- // JNI
- qpoints->pJniMethodStart = JniMethodStart;
- qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
- qpoints->pJniMethodEnd = JniMethodEnd;
- qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
- qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
- qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
- qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
-
- // Locks
- qpoints->pLockObject = art_quick_lock_object;
- qpoints->pUnlockObject = art_quick_unlock_object;
-
// Math
qpoints->pCmpgDouble = CmpgDouble;
qpoints->pCmpgFloat = CmpgFloat;
@@ -144,35 +90,6 @@
qpoints->pStringCompareTo = art_quick_string_compareto;
qpoints->pMemcpy = memcpy;
- // Invocation
- qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
- qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
- qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck =
- art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
- art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck =
- art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck =
- art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck =
- art_quick_invoke_virtual_trampoline_with_access_check;
-
- // Thread
- qpoints->pTestSuspend = art_quick_test_suspend;
-
- // Throws
- qpoints->pDeliverException = art_quick_deliver_exception;
- qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
- qpoints->pThrowDivZero = art_quick_throw_div_zero;
- qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
- qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
- qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
-
- // Deoptimization from compiled code.
- qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
-
// TODO - use lld/scd instructions for Mips64
// Atomic 64-bit load/store
qpoints->pA64Load = QuasiAtomic::Read64;
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 5d0c94c..8f1a35a 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -971,6 +971,15 @@
RETURN_IF_ZERO
END art_quick_lock_object
+ENTRY art_quick_lock_object_no_inline
+ beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
+ jal artLockObjectFromCode # (Object* obj, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_lock_object_no_inline
+
/*
* Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
*/
@@ -984,6 +993,15 @@
RETURN_IF_ZERO
END art_quick_unlock_object
+ENTRY art_quick_unlock_object_no_inline
+ beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ jal artUnlockObjectFromCode # (Object* obj, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_unlock_object_no_inline
+
/*
* Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
*/
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 75d9073..02629e8 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1205,7 +1205,8 @@
TEST_F(StubTest, StringCompareTo) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
+ // There is no StringCompareTo runtime entrypoint for __aarch64__.
+#if defined(__i386__) || defined(__arm__) || \
defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__))
// TODO: Check the "Unresolved" allocation stubs
@@ -2010,14 +2011,14 @@
// that will create it: the runtime stub expects to be called by compiled code.
LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
- static ImtConflictTable::Entry empty_entry = { nullptr, nullptr };
- ImtConflictTable* empty_conflict_table = reinterpret_cast<ImtConflictTable*>(&empty_entry);
+ ImtConflictTable* empty_conflict_table =
+ Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc);
void* data = linear_alloc->Alloc(
self,
- ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table));
+ ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, sizeof(void*)));
ImtConflictTable* new_table = new (data) ImtConflictTable(
- empty_conflict_table, inf_contains, contains_amethod);
- conflict_method->SetImtConflictTable(new_table);
+ empty_conflict_table, inf_contains, contains_amethod, sizeof(void*));
+ conflict_method->SetImtConflictTable(new_table, sizeof(void*));
size_t result =
Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index e593f39..15a8571 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -17,6 +17,7 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
+#include "entrypoints/quick/quick_default_init_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "interpreter/interpreter.h"
@@ -33,67 +34,12 @@
extern "C" mirror::Object* art_quick_read_barrier_for_root_slow(GcRoot<mirror::Object>*);
void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
- // JNI
- jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
-
- // Alloc
- ResetQuickAllocEntryPoints(qpoints);
+ DefaultInitEntryPoints(jpoints, qpoints);
// Cast
qpoints->pInstanceofNonTrivial = art_quick_is_assignable;
qpoints->pCheckCast = art_quick_check_cast;
- // DexCache
- qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
- qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
- qpoints->pInitializeType = art_quick_initialize_type;
- qpoints->pResolveString = art_quick_resolve_string;
-
- // Field
- qpoints->pSet8Instance = art_quick_set8_instance;
- qpoints->pSet8Static = art_quick_set8_static;
- qpoints->pSet16Instance = art_quick_set16_instance;
- qpoints->pSet16Static = art_quick_set16_static;
- qpoints->pSet32Instance = art_quick_set32_instance;
- qpoints->pSet32Static = art_quick_set32_static;
- qpoints->pSet64Instance = art_quick_set64_instance;
- qpoints->pSet64Static = art_quick_set64_static;
- qpoints->pSetObjInstance = art_quick_set_obj_instance;
- qpoints->pSetObjStatic = art_quick_set_obj_static;
- qpoints->pGetByteInstance = art_quick_get_byte_instance;
- qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
- qpoints->pGetShortInstance = art_quick_get_short_instance;
- qpoints->pGetCharInstance = art_quick_get_char_instance;
- qpoints->pGet32Instance = art_quick_get32_instance;
- qpoints->pGet64Instance = art_quick_get64_instance;
- qpoints->pGetObjInstance = art_quick_get_obj_instance;
- qpoints->pGetByteStatic = art_quick_get_byte_static;
- qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
- qpoints->pGetShortStatic = art_quick_get_short_static;
- qpoints->pGetCharStatic = art_quick_get_char_static;
- qpoints->pGet32Static = art_quick_get32_static;
- qpoints->pGet64Static = art_quick_get64_static;
- qpoints->pGetObjStatic = art_quick_get_obj_static;
-
- // Array
- qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check;
- qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check;
- qpoints->pAputObject = art_quick_aput_obj;
- qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
-
- // JNI
- qpoints->pJniMethodStart = JniMethodStart;
- qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
- qpoints->pJniMethodEnd = JniMethodEnd;
- qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
- qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
- qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
- qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
-
- // Locks
- qpoints->pLockObject = art_quick_lock_object;
- qpoints->pUnlockObject = art_quick_unlock_object;
-
// More math.
qpoints->pCos = cos;
qpoints->pSin = sin;
@@ -128,35 +74,6 @@
qpoints->pStringCompareTo = art_quick_string_compareto;
qpoints->pMemcpy = art_quick_memcpy;
- // Invocation
- qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
- qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
- qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck =
- art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
- art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck =
- art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck =
- art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck =
- art_quick_invoke_virtual_trampoline_with_access_check;
-
- // Thread
- qpoints->pTestSuspend = art_quick_test_suspend;
-
- // Throws
- qpoints->pDeliverException = art_quick_deliver_exception;
- qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
- qpoints->pThrowDivZero = art_quick_throw_div_zero;
- qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
- qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
- qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
-
- // Deoptimize
- qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
-
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
qpoints->pReadBarrierMark = art_quick_read_barrier_mark;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 4f9b3f7..485da9f 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1075,6 +1075,22 @@
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object
+DEFINE_FUNCTION art_quick_lock_object_no_inline
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ // Outgoing argument set up
+ subl LITERAL(8), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(8)
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH eax // pass object
+ call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
+ addl LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_EAX_ZERO
+END_FUNCTION art_quick_lock_object_no_inline
+
+
DEFINE_FUNCTION art_quick_unlock_object
testl %eax, %eax // null check object/eax
jz .Lslow_unlock
@@ -1130,6 +1146,21 @@
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object
+DEFINE_FUNCTION art_quick_unlock_object_no_inline
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ // Outgoing argument set up
+ subl LITERAL(8), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(8)
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH eax // pass object
+ call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
+ addl LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_EAX_ZERO
+END_FUNCTION art_quick_unlock_object_no_inline
+
DEFINE_FUNCTION art_quick_is_assignable
PUSH eax // alignment padding
PUSH ecx // pass arg2 - obj->klass
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 0a5d14a..bd6df70 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -17,6 +17,9 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
+#if !defined(__APPLE__)
+#include "entrypoints/quick/quick_default_init_entrypoints.h"
+#endif
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/math_entrypoints.h"
#include "entrypoints/runtime_asm_entrypoints.h"
@@ -38,67 +41,12 @@
UNUSED(jpoints, qpoints);
UNIMPLEMENTED(FATAL);
#else
- // JNI
- jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
-
- // Alloc
- ResetQuickAllocEntryPoints(qpoints);
+ DefaultInitEntryPoints(jpoints, qpoints);
// Cast
qpoints->pInstanceofNonTrivial = art_quick_assignable_from_code;
qpoints->pCheckCast = art_quick_check_cast;
- // DexCache
- qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
- qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
- qpoints->pInitializeType = art_quick_initialize_type;
- qpoints->pResolveString = art_quick_resolve_string;
-
- // Field
- qpoints->pSet8Instance = art_quick_set8_instance;
- qpoints->pSet8Static = art_quick_set8_static;
- qpoints->pSet16Instance = art_quick_set16_instance;
- qpoints->pSet16Static = art_quick_set16_static;
- qpoints->pSet32Instance = art_quick_set32_instance;
- qpoints->pSet32Static = art_quick_set32_static;
- qpoints->pSet64Instance = art_quick_set64_instance;
- qpoints->pSet64Static = art_quick_set64_static;
- qpoints->pSetObjInstance = art_quick_set_obj_instance;
- qpoints->pSetObjStatic = art_quick_set_obj_static;
- qpoints->pGetByteInstance = art_quick_get_byte_instance;
- qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
- qpoints->pGetShortInstance = art_quick_get_short_instance;
- qpoints->pGetCharInstance = art_quick_get_char_instance;
- qpoints->pGet32Instance = art_quick_get32_instance;
- qpoints->pGet64Instance = art_quick_get64_instance;
- qpoints->pGetObjInstance = art_quick_get_obj_instance;
- qpoints->pGetByteStatic = art_quick_get_byte_static;
- qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
- qpoints->pGetShortStatic = art_quick_get_short_static;
- qpoints->pGetCharStatic = art_quick_get_char_static;
- qpoints->pGet32Static = art_quick_get32_static;
- qpoints->pGet64Static = art_quick_get64_static;
- qpoints->pGetObjStatic = art_quick_get_obj_static;
-
- // Array
- qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check;
- qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check;
- qpoints->pAputObject = art_quick_aput_obj;
- qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
-
- // JNI
- qpoints->pJniMethodStart = JniMethodStart;
- qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
- qpoints->pJniMethodEnd = JniMethodEnd;
- qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
- qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
- qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
- qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
-
- // Locks
- qpoints->pLockObject = art_quick_lock_object;
- qpoints->pUnlockObject = art_quick_unlock_object;
-
// More math.
qpoints->pCos = cos;
qpoints->pSin = sin;
@@ -132,35 +80,6 @@
qpoints->pStringCompareTo = art_quick_string_compareto;
qpoints->pMemcpy = art_quick_memcpy;
- // Invocation
- qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
- qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
- qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck =
- art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
- art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck =
- art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck =
- art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck =
- art_quick_invoke_virtual_trampoline_with_access_check;
-
- // Thread
- qpoints->pTestSuspend = art_quick_test_suspend;
-
- // Throws
- qpoints->pDeliverException = art_quick_deliver_exception;
- qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
- qpoints->pThrowDivZero = art_quick_throw_div_zero;
- qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
- qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
- qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
-
- // Deoptimize
- qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
-
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
qpoints->pReadBarrierMark = art_quick_read_barrier_mark;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 26e668e..8064ed6 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -894,57 +894,107 @@
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_FUNCTION art_quick_alloc_object_rosalloc
-// A handle-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
+// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
+//
+// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
+// RCX: scratch, r8: Thread::Current().
+MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel)
+ testl %edx, %edx // Check null class
+ jz RAW_VAR(slowPathLabel)
+ // Check class status.
+ cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%rdx)
+ jne RAW_VAR(slowPathLabel)
+ // No fake dependence needed on x86
+ // between status and flags load,
+ // since each load is a load-acquire,
+ // no loads reordering.
+ // Check access flags has
+ // kAccClassIsFinalizable
+ testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%rdx)
+ jnz RAW_VAR(slowPathLabel)
+ movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
+ movq THREAD_LOCAL_END_OFFSET(%r8), %rax // Load thread_local_end.
+ subq THREAD_LOCAL_POS_OFFSET(%r8), %rax // Compute the remaining buffer size.
+ movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %ecx // Load the object size.
+ cmpq %rax, %rcx // Check if it fits. OK to do this
+ // before rounding up the object size
+ // assuming the buf size alignment.
+ ja RAW_VAR(slowPathLabel)
+ addl LITERAL(OBJECT_ALIGNMENT_MASK), %ecx // Align the size by 8. (addr + 7) & ~7.
+ andl LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED), %ecx
+ movq THREAD_LOCAL_POS_OFFSET(%r8), %rax // Load thread_local_pos
+ // as allocated object.
+ addq %rax, %rcx // Add the object size.
+ movq %rcx, THREAD_LOCAL_POS_OFFSET(%r8) // Update thread_local_pos.
+ addq LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%r8) // Increase thread_local_objects.
+ // Store the class pointer in the header.
+ // No fence needed for x86.
+ POISON_HEAP_REF edx
+ movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
+ ret // Fast path succeeded.
+END_MACRO
+
+// The common slow path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
+MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ // Outgoing argument set up
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call VAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+END_MACRO
+
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
DEFINE_FUNCTION art_quick_alloc_object_tlab
// Fast path tlab allocation.
// RDI: uint32_t type_idx, RSI: ArtMethod*
// RDX, RCX, R8, R9: free. RAX: return val.
- // TODO: Add read barrier when this function is used.
- // Note this function can/should implement read barrier fast path only
- // (no read barrier slow path) because this is the fast path of tlab allocation.
- // We can fall back to the allocation slow path to do the read barrier slow path.
#if defined(USE_READ_BARRIER)
int3
int3
#endif
// Might need a special macro since rsi and edx is 32b/64b mismatched.
movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
- // TODO: Add read barrier when this function is used.
// Might need to break down into multiple instructions to get the base address in a register.
// Load the class
movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
- testl %edx, %edx // Check null class
- jz .Lart_quick_alloc_object_tlab_slow_path
- // Check class status.
- cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%rdx)
- jne .Lart_quick_alloc_object_tlab_slow_path
- // Check access flags has kAccClassIsFinalizable
- testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%rdx)
- jnz .Lart_quick_alloc_object_tlab_slow_path
- movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %ecx // Load the object size.
- addl LITERAL(OBJECT_ALIGNMENT_MASK), %ecx // Align the size by 8. (addr + 7) & ~7.
- andl LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED), %ecx
- movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
- movq THREAD_LOCAL_POS_OFFSET(%r8), %rax // Load thread_local_pos.
- addq %rax, %rcx // Add the object size.
- cmpq THREAD_LOCAL_END_OFFSET(%r8), %rcx // Check if it fits.
- ja .Lart_quick_alloc_object_tlab_slow_path
- movq %rcx, THREAD_LOCAL_POS_OFFSET(%r8) // Update thread_local_pos.
- addq LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%r8) // Increment thread_local_objects.
- // Store the class pointer in the header.
- // No fence needed for x86.
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
- ret // Fast path succeeded.
+ ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
.Lart_quick_alloc_object_tlab_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- // Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call SYMBOL(artAllocObjectFromCodeTLAB) // cxx_name(arg0, arg1, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeTLAB
END_FUNCTION art_quick_alloc_object_tlab
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB).
+DEFINE_FUNCTION art_quick_alloc_object_region_tlab
+ // Fast path region tlab allocation.
+ // RDI: uint32_t type_idx, RSI: ArtMethod*
+ // RDX, RCX, R8, R9: free. RAX: return val.
+#if !defined(USE_READ_BARRIER)
+ int3
+ int3
+#endif
+ // Might need a special macro since rsi and edx is 32b/64b mismatched.
+ movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
+ // Might need to break down into multiple instructions to get the base address in a register.
+ // Load the class
+ movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
+ cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
+ jne .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
+.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
+ ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
+.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
+ // The read barrier slow path. Mark the class.
+ PUSH rdi
+ PUSH rsi
+ // Outgoing argument set up
+ movq %rdx, %rdi // Pass the class as the first param.
+ call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
+ movq %rax, %rdx
+ POP rsi
+ POP rdi
+ jmp .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
+.Lart_quick_alloc_object_region_tlab_slow_path:
+ ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeRegionTLAB
+END_FUNCTION art_quick_alloc_object_region_tlab
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
@@ -994,6 +1044,14 @@
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object
+DEFINE_FUNCTION art_quick_lock_object_no_inline
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_EAX_ZERO
+END_FUNCTION art_quick_lock_object_no_inline
+
DEFINE_FUNCTION art_quick_unlock_object
testl %edi, %edi // null check object/edi
jz .Lslow_unlock
@@ -1037,6 +1095,14 @@
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object
+DEFINE_FUNCTION art_quick_unlock_object_no_inline
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_EAX_ZERO
+END_FUNCTION art_quick_unlock_object_no_inline
+
DEFINE_FUNCTION art_quick_check_cast
PUSH rdi // Save args for exc
PUSH rsi
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 6449efa..7647ad6 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -456,13 +456,18 @@
interface_method->VisitRoots(visitor, pointer_size);
}
visitor.VisitRoot(declaring_class_.AddressWithoutBarrier());
- // Runtime methods and native methods use the same field as the profiling info for
- // storing their own data (jni entrypoint for native methods, and ImtConflictTable for
- // some runtime methods).
- if (!IsNative() && !IsRuntimeMethod()) {
- ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size);
- if (profiling_info != nullptr) {
- profiling_info->VisitRoots(visitor);
+ // We know we don't have profiling information if the class hasn't been verified. Note
+ // that this check also ensures the IsNative call can be made, as IsNative expects a fully
+ // created class (and not a retired one).
+ if (klass->IsVerified()) {
+ // Runtime methods and native methods use the same field as the profiling info for
+ // storing their own data (jni entrypoint for native methods, and ImtConflictTable for
+ // some runtime methods).
+ if (!IsNative() && !IsRuntimeMethod()) {
+ ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size);
+ if (profiling_info != nullptr) {
+ profiling_info->VisitRoots(visitor);
+ }
}
}
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 34d19d1..06156f5 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -276,7 +276,7 @@
// Ensure that we won't be accidentally calling quick compiled code when -Xint.
if (kIsDebugBuild && runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
- CHECK(!runtime->UseJit());
+ CHECK(!runtime->UseJitCompilation());
const void* oat_quick_code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(this);
CHECK(oat_quick_code == nullptr || oat_quick_code != GetEntryPointFromQuickCompiledCode())
<< "Don't call compiled code when -Xint " << PrettyMethod(this);
@@ -481,7 +481,7 @@
// to the JIT code, but this would require taking the JIT code cache lock to notify
// it, which we do not want at this level.
Runtime* runtime = Runtime::Current();
- if (runtime->GetJit() != nullptr) {
+ if (runtime->UseJitCompilation()) {
if (runtime->GetJit()->GetCodeCache()->ContainsPc(GetEntryPointFromQuickCompiledCode())) {
SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), image_pointer_size);
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 08f0285..a012a5a 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -41,6 +41,7 @@
namespace mirror {
class Array;
class Class;
+class IfTable;
class PointerArray;
} // namespace mirror
@@ -50,66 +51,151 @@
// with the last entry being null to make an assembly implementation of a lookup
// faster.
class ImtConflictTable {
+ enum MethodIndex {
+ kMethodInterface,
+ kMethodImplementation,
+ kMethodCount, // Number of elements in enum.
+ };
+
public:
// Build a new table copying `other` and adding the new entry formed of
// the pair { `interface_method`, `implementation_method` }
ImtConflictTable(ImtConflictTable* other,
ArtMethod* interface_method,
- ArtMethod* implementation_method) {
- size_t index = 0;
- while (other->entries_[index].interface_method != nullptr) {
- entries_[index] = other->entries_[index];
- index++;
+ ArtMethod* implementation_method,
+ size_t pointer_size) {
+ const size_t count = other->NumEntries(pointer_size);
+ for (size_t i = 0; i < count; ++i) {
+ SetInterfaceMethod(i, pointer_size, other->GetInterfaceMethod(i, pointer_size));
+ SetImplementationMethod(i, pointer_size, other->GetImplementationMethod(i, pointer_size));
}
- entries_[index].interface_method = interface_method;
- entries_[index].implementation_method = implementation_method;
+ SetInterfaceMethod(count, pointer_size, interface_method);
+ SetImplementationMethod(count, pointer_size, implementation_method);
// Add the null marker.
- entries_[index + 1].interface_method = nullptr;
- entries_[index + 1].implementation_method = nullptr;
+ SetInterfaceMethod(count + 1, pointer_size, nullptr);
+ SetImplementationMethod(count + 1, pointer_size, nullptr);
+ }
+
+ // num_entries excludes the header.
+ ImtConflictTable(size_t num_entries, size_t pointer_size) {
+ SetInterfaceMethod(num_entries, pointer_size, nullptr);
+ SetImplementationMethod(num_entries, pointer_size, nullptr);
+ }
+
+ // Set an entry at an index.
+ void SetInterfaceMethod(size_t index, size_t pointer_size, ArtMethod* method) {
+ SetMethod(index * kMethodCount + kMethodInterface, pointer_size, method);
+ }
+
+ void SetImplementationMethod(size_t index, size_t pointer_size, ArtMethod* method) {
+ SetMethod(index * kMethodCount + kMethodImplementation, pointer_size, method);
+ }
+
+ ArtMethod* GetInterfaceMethod(size_t index, size_t pointer_size) const {
+ return GetMethod(index * kMethodCount + kMethodInterface, pointer_size);
+ }
+
+ ArtMethod* GetImplementationMethod(size_t index, size_t pointer_size) const {
+ return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size);
+ }
+
+ // Visit all of the entries.
+ // NO_THREAD_SAFETY_ANALYSIS for calling with held locks. Visitor is passed a pair of ArtMethod*
+ // and also returns one. The order is <interface, implementation>.
+ template<typename Visitor>
+ void Visit(const Visitor& visitor, size_t pointer_size) NO_THREAD_SAFETY_ANALYSIS {
+ uint32_t table_index = 0;
+ for (;;) {
+ ArtMethod* interface_method = GetInterfaceMethod(table_index, pointer_size);
+ if (interface_method == nullptr) {
+ break;
+ }
+ ArtMethod* implementation_method = GetImplementationMethod(table_index, pointer_size);
+ auto input = std::make_pair(interface_method, implementation_method);
+ std::pair<ArtMethod*, ArtMethod*> updated = visitor(input);
+ if (input.first != updated.first) {
+ SetInterfaceMethod(table_index, pointer_size, updated.first);
+ }
+ if (input.second != updated.second) {
+ SetImplementationMethod(table_index, pointer_size, updated.second);
+ }
+ ++table_index;
+ }
}
// Lookup the implementation ArtMethod associated to `interface_method`. Return null
// if not found.
- ArtMethod* Lookup(ArtMethod* interface_method) const {
+ ArtMethod* Lookup(ArtMethod* interface_method, size_t pointer_size) const {
uint32_t table_index = 0;
- ArtMethod* current_interface_method;
- while ((current_interface_method = entries_[table_index].interface_method) != nullptr) {
- if (current_interface_method == interface_method) {
- return entries_[table_index].implementation_method;
+ for (;;) {
+ ArtMethod* current_interface_method = GetInterfaceMethod(table_index, pointer_size);
+ if (current_interface_method == nullptr) {
+ break;
}
- table_index++;
+ if (current_interface_method == interface_method) {
+ return GetImplementationMethod(table_index, pointer_size);
+ }
+ ++table_index;
}
return nullptr;
}
- // Compute the size in bytes taken by this table.
- size_t ComputeSize() const {
+ // Compute the number of entries in this table.
+ size_t NumEntries(size_t pointer_size) const {
uint32_t table_index = 0;
- size_t total_size = 0;
- while ((entries_[table_index].interface_method) != nullptr) {
- total_size += sizeof(Entry);
- table_index++;
+ while (GetInterfaceMethod(table_index, pointer_size) != nullptr) {
+ ++table_index;
}
+ return table_index;
+ }
+
+ // Compute the size in bytes taken by this table.
+ size_t ComputeSize(size_t pointer_size) const {
// Add the end marker.
- return total_size + sizeof(Entry);
+ return ComputeSize(NumEntries(pointer_size), pointer_size);
}
// Compute the size in bytes needed for copying the given `table` and add
// one more entry.
- static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table) {
- return table->ComputeSize() + sizeof(Entry);
+ static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table, size_t pointer_size) {
+ return table->ComputeSize(pointer_size) + EntrySize(pointer_size);
}
- struct Entry {
- ArtMethod* interface_method;
- ArtMethod* implementation_method;
- };
+ // Compute size with a fixed number of entries.
+ static size_t ComputeSize(size_t num_entries, size_t pointer_size) {
+ return (num_entries + 1) * EntrySize(pointer_size); // Add one for null terminator.
+ }
+
+ static size_t EntrySize(size_t pointer_size) {
+ return pointer_size * static_cast<size_t>(kMethodCount);
+ }
private:
+ ArtMethod* GetMethod(size_t index, size_t pointer_size) const {
+ if (pointer_size == 8) {
+ return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data64_[index]));
+ } else {
+ DCHECK_EQ(pointer_size, 4u);
+ return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data32_[index]));
+ }
+ }
+
+ void SetMethod(size_t index, size_t pointer_size, ArtMethod* method) {
+ if (pointer_size == 8) {
+ data64_[index] = dchecked_integral_cast<uint64_t>(reinterpret_cast<uintptr_t>(method));
+ } else {
+ DCHECK_EQ(pointer_size, 4u);
+ data32_[index] = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(method));
+ }
+ }
+
// Array of entries that the assembly stubs will iterate over. Note that this is
// not fixed size, and we allocate data prior to calling the constructor
// of ImtConflictTable.
- Entry entries_[0];
+ union {
+ uint32_t data32_[0];
+ uint64_t data64_[0];
+ };
DISALLOW_COPY_AND_ASSIGN(ImtConflictTable);
};
@@ -265,6 +351,12 @@
SetAccessFlags(GetAccessFlags() | kAccSkipAccessChecks);
}
+ // Should this method be run in the interpreter and count locks (e.g., failed structured-
+ // locking verification)?
+ bool MustCountLocks() {
+ return (GetAccessFlags() & kAccMustCountLocks) != 0;
+ }
+
// Returns true if this method could be overridden by a default method.
bool IsOverridableByDefaultMethod() SHARED_REQUIRES(Locks::mutator_lock_);
@@ -351,7 +443,6 @@
// Find the method that this method overrides.
ArtMethod* FindOverriddenMethod(size_t pointer_size)
- REQUIRES(Roles::uninterruptible_)
SHARED_REQUIRES(Locks::mutator_lock_);
// Find the method index for this method within other_dexfile. If this method isn't present then
@@ -417,8 +508,8 @@
return reinterpret_cast<ImtConflictTable*>(GetEntryPointFromJniPtrSize(pointer_size));
}
- ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table) {
- SetEntryPointFromJniPtrSize(table, sizeof(void*));
+ ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table, size_t pointer_size) {
+ SetEntryPointFromJniPtrSize(table, pointer_size);
}
ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) {
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 97280c3..3b5b8b5 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -56,6 +56,7 @@
bool threads;
bool verifier;
bool image;
+ bool systrace_lock_logging; // Enabled with "-verbose:sys-locks".
};
// Global log verbosity setting, initialized by InitLogging.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index fa0107a..35c40cd 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -687,6 +687,9 @@
self->AssertNoPendingException();
}
+ // Create conflict tables that depend on the class linker.
+ runtime->FixupConflictTables();
+
FinishInit(self);
VLOG(startup) << "ClassLinker::InitFromCompiler exiting";
@@ -773,9 +776,13 @@
bool contains = false;
for (gc::space::ImageSpace* space : spaces) {
auto& header = space->GetImageHeader();
- auto& methods = header.GetMethodsSection();
- auto offset = reinterpret_cast<uint8_t*>(m) - space->Begin();
- contains |= methods.Contains(offset);
+ size_t offset = reinterpret_cast<uint8_t*>(m) - space->Begin();
+
+ const ImageSection& methods = header.GetMethodsSection();
+ contains = contains || methods.Contains(offset);
+
+ const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
+ contains = contains || runtime_methods.Contains(offset);
}
CHECK(contains) << m << " not found";
}
@@ -1438,20 +1445,14 @@
if (*out_forward_dex_cache_array) {
ScopedTrace timing("Fixup ArtMethod dex cache arrays");
FixupArtMethodArrayVisitor visitor(header);
- header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
- &visitor,
- space->Begin(),
- sizeof(void*));
+ header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*));
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
}
if (kVerifyArtMethodDeclaringClasses) {
ScopedTrace timing("Verify declaring classes");
ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_);
VerifyDeclaringClassVisitor visitor;
- header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
- &visitor,
- space->Begin(),
- sizeof(void*));
+ header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*));
}
return true;
}
@@ -1729,9 +1730,8 @@
// Set entry point to interpreter if in InterpretOnly mode.
if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
- const ImageSection& methods = header.GetMethodsSection();
SetInterpreterEntrypointArtMethodVisitor visitor(image_pointer_size_);
- methods.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_);
+ header.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_);
}
ClassTable* class_table = nullptr;
@@ -1800,10 +1800,7 @@
// This verification needs to happen after the classes have been added to the class loader.
// Since it ensures classes are in the class table.
VerifyClassInTableArtMethodVisitor visitor2(class_table);
- header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
- &visitor2,
- space->Begin(),
- sizeof(void*));
+ header.VisitPackedArtMethods(&visitor2, space->Begin(), sizeof(void*));
}
VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time);
return true;
@@ -2035,6 +2032,7 @@
Runtime* const runtime = Runtime::Current();
JavaVMExt* const vm = runtime->GetJavaVM();
vm->DeleteWeakGlobalRef(self, data.weak_root);
+ // Notify the JIT that we need to remove the methods and/or profiling info.
if (runtime->GetJit() != nullptr) {
jit::JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
if (code_cache != nullptr) {
@@ -2752,7 +2750,7 @@
}
if (runtime->IsNativeDebuggable()) {
- DCHECK(runtime->UseJit() && runtime->GetJit()->JitAtFirstUse());
+ DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse());
// If we are doing native debugging, ignore application's AOT code,
// since we want to JIT it with extra stackmaps for native debugging.
// On the other hand, keep all AOT code from the boot image, since the
@@ -5961,16 +5959,49 @@
}
}
-// Sets imt_ref appropriately for LinkInterfaceMethods.
-// If there is no method in the imt location of imt_ref it will store the given method there.
-// Otherwise it will set the conflict method which will figure out which method to use during
-// runtime.
-static void SetIMTRef(ArtMethod* unimplemented_method,
- ArtMethod* imt_conflict_method,
- size_t image_pointer_size,
- ArtMethod* current_method,
- /*out*/ArtMethod** imt_ref)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ArtMethod* ClassLinker::AddMethodToConflictTable(mirror::Class* klass,
+ ArtMethod* conflict_method,
+ ArtMethod* interface_method,
+ ArtMethod* method,
+ bool force_new_conflict_method) {
+ ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
+ Runtime* const runtime = Runtime::Current();
+ LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader());
+ bool new_entry = conflict_method == runtime->GetImtConflictMethod() || force_new_conflict_method;
+
+ // Create a new entry if the existing one is the shared conflict method.
+ ArtMethod* new_conflict_method = new_entry
+ ? runtime->CreateImtConflictMethod(linear_alloc)
+ : conflict_method;
+
+ // Allocate a new table. Note that we will leak this table at the next conflict,
+ // but that's a tradeoff compared to making the table fixed size.
+ void* data = linear_alloc->Alloc(
+ Thread::Current(), ImtConflictTable::ComputeSizeWithOneMoreEntry(current_table,
+ image_pointer_size_));
+ if (data == nullptr) {
+ LOG(ERROR) << "Failed to allocate conflict table";
+ return conflict_method;
+ }
+ ImtConflictTable* new_table = new (data) ImtConflictTable(current_table,
+ interface_method,
+ method,
+ image_pointer_size_);
+
+ // Do a fence to ensure threads see the data in the table before it is assigned
+ // to the conflict method.
+ // Note that there is a race in the presence of multiple threads and we may leak
+ // memory from the LinearAlloc, but that's a tradeoff compared to using
+ // atomic operations.
+ QuasiAtomic::ThreadFenceRelease();
+ new_conflict_method->SetImtConflictTable(new_table, image_pointer_size_);
+ return new_conflict_method;
+}
+
+void ClassLinker::SetIMTRef(ArtMethod* unimplemented_method,
+ ArtMethod* imt_conflict_method,
+ ArtMethod* current_method,
+ /*out*/ArtMethod** imt_ref) {
// Place method in imt if entry is empty, place conflict otherwise.
if (*imt_ref == unimplemented_method) {
*imt_ref = current_method;
@@ -5980,9 +6011,9 @@
// Note that we have checked IsRuntimeMethod, as there may be multiple different
// conflict methods.
MethodNameAndSignatureComparator imt_comparator(
- (*imt_ref)->GetInterfaceMethodIfProxy(image_pointer_size));
+ (*imt_ref)->GetInterfaceMethodIfProxy(image_pointer_size_));
if (imt_comparator.HasSameNameAndSignature(
- current_method->GetInterfaceMethodIfProxy(image_pointer_size))) {
+ current_method->GetInterfaceMethodIfProxy(image_pointer_size_))) {
*imt_ref = current_method;
} else {
*imt_ref = imt_conflict_method;
@@ -5995,6 +6026,151 @@
}
}
+void ClassLinker::FillIMTAndConflictTables(mirror::Class* klass) {
+ DCHECK(klass->ShouldHaveEmbeddedImtAndVTable()) << PrettyClass(klass);
+ DCHECK(!klass->IsTemp()) << PrettyClass(klass);
+ ArtMethod* imt[mirror::Class::kImtSize];
+ Runtime* const runtime = Runtime::Current();
+ ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod();
+ ArtMethod* const conflict_method = runtime->GetImtConflictMethod();
+ std::fill_n(imt, arraysize(imt), unimplemented_method);
+ if (klass->GetIfTable() != nullptr) {
+ FillIMTFromIfTable(klass->GetIfTable(),
+ unimplemented_method,
+ conflict_method,
+ klass,
+ true,
+ false,
+ &imt[0]);
+ }
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ klass->SetEmbeddedImTableEntry(i, imt[i], image_pointer_size_);
+ }
+}
+
+static inline uint32_t GetIMTIndex(ArtMethod* interface_method)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ return interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+}
+
+ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count,
+ LinearAlloc* linear_alloc,
+ size_t image_pointer_size) {
+ void* data = linear_alloc->Alloc(Thread::Current(),
+ ImtConflictTable::ComputeSize(count,
+ image_pointer_size));
+ return (data != nullptr) ? new (data) ImtConflictTable(count, image_pointer_size) : nullptr;
+}
+
+ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc) {
+ return CreateImtConflictTable(count, linear_alloc, image_pointer_size_);
+}
+
+void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
+ ArtMethod* unimplemented_method,
+ ArtMethod* imt_conflict_method,
+ mirror::Class* klass,
+ bool create_conflict_tables,
+ bool ignore_copied_methods,
+ ArtMethod** imt) {
+ uint32_t conflict_counts[mirror::Class::kImtSize] = {};
+ for (size_t i = 0, length = if_table->Count(); i < length; ++i) {
+ mirror::Class* interface = if_table->GetInterface(i);
+ const size_t num_virtuals = interface->NumVirtualMethods();
+ const size_t method_array_count = if_table->GetMethodArrayCount(i);
+ // Virtual methods can be larger than the if table methods if there are default methods.
+ DCHECK_GE(num_virtuals, method_array_count);
+ if (kIsDebugBuild) {
+ if (klass->IsInterface()) {
+ DCHECK_EQ(method_array_count, 0u);
+ } else {
+ DCHECK_EQ(interface->NumDeclaredVirtualMethods(), method_array_count);
+ }
+ }
+ if (method_array_count == 0) {
+ continue;
+ }
+ auto* method_array = if_table->GetMethodArray(i);
+ for (size_t j = 0; j < method_array_count; ++j) {
+ ArtMethod* implementation_method =
+ method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
+ if (ignore_copied_methods && implementation_method->IsCopied()) {
+ continue;
+ }
+ DCHECK(implementation_method != nullptr);
+ // Miranda methods cannot be used to implement an interface method, but they are safe to put
+ // in the IMT since their entrypoint is the interface trampoline. If we put any copied methods
+ // or interface methods in the IMT here they will not create extra conflicts since we compare
+ // names and signatures in SetIMTRef.
+ ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
+ const uint32_t imt_index = GetIMTIndex(interface_method);
+
+ // There is only any conflicts if all of the interface methods for an IMT slot don't have
+ // the same implementation method, keep track of this to avoid creating a conflict table in
+ // this case.
+
+ // Conflict table size for each IMT slot.
+ ++conflict_counts[imt_index];
+
+ SetIMTRef(unimplemented_method,
+ imt_conflict_method,
+ implementation_method,
+ /*out*/&imt[imt_index]);
+ }
+ }
+
+ if (create_conflict_tables) {
+ // Create the conflict tables.
+ LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader());
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ size_t conflicts = conflict_counts[i];
+ if (imt[i] == imt_conflict_method) {
+ ImtConflictTable* new_table = CreateImtConflictTable(conflicts, linear_alloc);
+ if (new_table != nullptr) {
+ ArtMethod* new_conflict_method =
+ Runtime::Current()->CreateImtConflictMethod(linear_alloc);
+ new_conflict_method->SetImtConflictTable(new_table, image_pointer_size_);
+ imt[i] = new_conflict_method;
+ } else {
+ LOG(ERROR) << "Failed to allocate conflict table";
+ imt[i] = imt_conflict_method;
+ }
+ } else {
+ DCHECK_NE(imt[i], imt_conflict_method);
+ }
+ }
+
+ for (size_t i = 0, length = if_table->Count(); i < length; ++i) {
+ mirror::Class* interface = if_table->GetInterface(i);
+ const size_t method_array_count = if_table->GetMethodArrayCount(i);
+ // Virtual methods can be larger than the if table methods if there are default methods.
+ if (method_array_count == 0) {
+ continue;
+ }
+ auto* method_array = if_table->GetMethodArray(i);
+ for (size_t j = 0; j < method_array_count; ++j) {
+ ArtMethod* implementation_method =
+ method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
+ if (ignore_copied_methods && implementation_method->IsCopied()) {
+ continue;
+ }
+ DCHECK(implementation_method != nullptr);
+ ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
+ const uint32_t imt_index = GetIMTIndex(interface_method);
+ if (!imt[imt_index]->IsRuntimeMethod() ||
+ imt[imt_index] == unimplemented_method ||
+ imt[imt_index] == imt_conflict_method) {
+ continue;
+ }
+ ImtConflictTable* table = imt[imt_index]->GetImtConflictTable(image_pointer_size_);
+ const size_t num_entries = table->NumEntries(image_pointer_size_);
+ table->SetInterfaceMethod(num_entries, image_pointer_size_, interface_method);
+ table->SetImplementationMethod(num_entries, image_pointer_size_, implementation_method);
+ }
+ }
+ }
+}
+
// Simple helper function that checks that no subtypes of 'val' are contained within the 'classes'
// set.
static bool NotSubinterfaceOfAny(const std::unordered_set<mirror::Class*>& classes,
@@ -6230,48 +6406,28 @@
}
}
-static void FillImtFromSuperClass(Handle<mirror::Class> klass,
- Handle<mirror::IfTable> iftable,
- ArtMethod* unimplemented_method,
- ArtMethod* imt_conflict_method,
- ArtMethod** out_imt,
- size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) {
+void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass,
+ ArtMethod* unimplemented_method,
+ ArtMethod* imt_conflict_method,
+ ArtMethod** imt) {
DCHECK(klass->HasSuperClass());
mirror::Class* super_class = klass->GetSuperClass();
if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- out_imt[i] = super_class->GetEmbeddedImTableEntry(i, pointer_size);
+ imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_);
}
} else {
// No imt in the super class, need to reconstruct from the iftable.
mirror::IfTable* if_table = super_class->GetIfTable();
- const size_t length = super_class->GetIfTableCount();
- for (size_t i = 0; i < length; ++i) {
- mirror::Class* interface = iftable->GetInterface(i);
- const size_t num_virtuals = interface->NumDeclaredVirtualMethods();
- const size_t method_array_count = if_table->GetMethodArrayCount(i);
- DCHECK_EQ(num_virtuals, method_array_count);
- if (method_array_count == 0) {
- continue;
- }
- auto* method_array = if_table->GetMethodArray(i);
- for (size_t j = 0; j < num_virtuals; ++j) {
- auto method = method_array->GetElementPtrSize<ArtMethod*>(j, pointer_size);
- DCHECK(method != nullptr) << PrettyClass(super_class);
- // Miranda methods cannot be used to implement an interface method and defaults should be
- // skipped in case we override it.
- if (method->IsDefault() || method->IsMiranda()) {
- continue;
- }
- ArtMethod* interface_method = interface->GetVirtualMethod(j, pointer_size);
- uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
- auto** imt_ref = &out_imt[imt_index];
- if (*imt_ref == unimplemented_method) {
- *imt_ref = method;
- } else if (*imt_ref != imt_conflict_method) {
- *imt_ref = imt_conflict_method;
- }
- }
+ if (if_table != nullptr) {
+ // Ignore copied methods since we will handle these in LinkInterfaceMethods.
+ FillIMTFromIfTable(if_table,
+ unimplemented_method,
+ imt_conflict_method,
+ klass.Get(),
+ /*create_conflict_table*/false,
+ /*ignore_copied_methods*/true,
+ /*out*/imt);
}
}
}
@@ -6314,13 +6470,10 @@
const bool extend_super_iftable = has_superclass;
if (has_superclass && fill_tables) {
FillImtFromSuperClass(klass,
- iftable,
unimplemented_method,
imt_conflict_method,
- out_imt,
- image_pointer_size_);
+ out_imt);
}
-
// Allocate method arrays before since we don't want miss visiting miranda method roots due to
// thread suspension.
if (fill_tables) {
@@ -6404,7 +6557,7 @@
auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_);
MethodNameAndSignatureComparator interface_name_comparator(
interface_method->GetInterfaceMethodIfProxy(image_pointer_size_));
- uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+ uint32_t imt_index = GetIMTIndex(interface_method);
ArtMethod** imt_ptr = &out_imt[imt_index];
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
@@ -6449,7 +6602,6 @@
// Place method in imt if entry is empty, place conflict otherwise.
SetIMTRef(unimplemented_method,
imt_conflict_method,
- image_pointer_size_,
vtable_method,
/*out*/imt_ptr);
}
@@ -6483,6 +6635,17 @@
// The method is not overridable by a default method (i.e. it is directly implemented
// in some class). Therefore move onto the next interface method.
continue;
+ } else {
+ // If the super-classes method is override-able by a default method we need to keep
+ // track of it since though it is override-able it is not guaranteed to be 'overridden'.
+ // If it turns out not to be overridden and we did not keep track of it we might add it
+ // to the vtable twice, causing corruption (vtable entries having inconsistent and
+ // illegal states, incorrect vtable size, and incorrect or inconsistent iftable entries)
+ // in this class and any subclasses.
+ DCHECK(vtable_impl == nullptr || vtable_impl == supers_method)
+ << "vtable_impl was " << PrettyMethod(vtable_impl) << " and not 'nullptr' or "
+ << PrettyMethod(supers_method) << " as expected. IFTable appears to be corrupt!";
+ vtable_impl = supers_method;
}
}
// If we haven't found it yet we should search through the interfaces for default methods.
@@ -6581,7 +6744,6 @@
method_array->SetElementPtrSize(j, current_method, image_pointer_size_);
SetIMTRef(unimplemented_method,
imt_conflict_method,
- image_pointer_size_,
current_method,
/*out*/imt_ptr);
}
@@ -7835,6 +7997,7 @@
VLOG(class_linker) << "Collecting class profile for dex file " << location
<< " types=" << num_types << " class_defs=" << num_class_defs;
DexCacheResolvedClasses resolved_classes(dex_file->GetLocation(),
+ dex_file->GetBaseLocation(),
dex_file->GetLocationChecksum());
size_t num_resolved = 0;
std::unordered_set<uint16_t> class_set;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 97a1036..ece171c 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -53,6 +53,7 @@
class StackTraceElement;
} // namespace mirror
+class ImtConflictTable;
template<class T> class Handle;
template<class T> class MutableHandle;
class InternTable;
@@ -610,6 +611,26 @@
const std::set<DexCacheResolvedClasses>& classes)
REQUIRES(!dex_lock_);
+ ArtMethod* AddMethodToConflictTable(mirror::Class* klass,
+ ArtMethod* conflict_method,
+ ArtMethod* interface_method,
+ ArtMethod* method,
+ bool force_new_conflict_method)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Create a conflict table with a specified capacity.
+ ImtConflictTable* CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc);
+
+ // Static version for when the class linker is not yet created.
+ static ImtConflictTable* CreateImtConflictTable(size_t count,
+ LinearAlloc* linear_alloc,
+ size_t pointer_size);
+
+
+ // Create the IMT and conflict tables for a class.
+ void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+
+
struct DexCacheData {
// Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
// not work properly.
@@ -1057,6 +1078,28 @@
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Sets imt_ref appropriately for LinkInterfaceMethods.
+ // If there is no method in the imt location of imt_ref it will store the given method there.
+ // Otherwise it will set the conflict method which will figure out which method to use during
+ // runtime.
+ void SetIMTRef(ArtMethod* unimplemented_method,
+ ArtMethod* imt_conflict_method,
+ ArtMethod* current_method,
+ /*out*/ArtMethod** imt_ref) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void FillIMTFromIfTable(mirror::IfTable* if_table,
+ ArtMethod* unimplemented_method,
+ ArtMethod* imt_conflict_method,
+ mirror::Class* klass,
+ bool create_conflict_tables,
+ bool ignore_copied_methods,
+ ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void FillImtFromSuperClass(Handle<mirror::Class> klass,
+ ArtMethod* unimplemented_method,
+ ArtMethod* imt_conflict_method,
+ ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
+
std::vector<const DexFile*> boot_class_path_;
std::vector<std::unique_ptr<const DexFile>> boot_dex_files_;
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index 6289d8a..e8d74dd 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -44,6 +44,8 @@
kEverything, // Compile everything capable of being compiled.
};
+ static const Filter kDefaultCompilerFilter = kSpeed;
+
// Returns true if an oat file with this compiler filter contains
// compiled executable code.
static bool IsCompilationEnabled(Filter filter);
diff --git a/runtime/dex_cache_resolved_classes.h b/runtime/dex_cache_resolved_classes.h
index 80c12cb..0febbed 100644
--- a/runtime/dex_cache_resolved_classes.h
+++ b/runtime/dex_cache_resolved_classes.h
@@ -26,8 +26,11 @@
// Data structure for passing around which classes belonging to a dex cache / dex file are resolved.
class DexCacheResolvedClasses {
public:
- DexCacheResolvedClasses(const std::string& dex_location, uint32_t location_checksum)
+ DexCacheResolvedClasses(const std::string& dex_location,
+ const std::string& base_location,
+ uint32_t location_checksum)
: dex_location_(dex_location),
+ base_location_(base_location),
location_checksum_(location_checksum) {}
// Only compare the key elements, ignore the resolved classes.
@@ -35,6 +38,7 @@
if (location_checksum_ != other.location_checksum_) {
return static_cast<int>(location_checksum_ - other.location_checksum_);
}
+ // Don't need to compare base_location_ since dex_location_ has more info.
return dex_location_.compare(other.dex_location_);
}
@@ -47,6 +51,10 @@
return dex_location_;
}
+ const std::string& GetBaseLocation() const {
+ return base_location_;
+ }
+
uint32_t GetLocationChecksum() const {
return location_checksum_;
}
@@ -57,6 +65,7 @@
private:
const std::string dex_location_;
+ const std::string base_location_;
const uint32_t location_checksum_;
// Array of resolved class def indexes.
mutable std::unordered_set<uint16_t> classes_;
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 3df4e98..bbffbbb 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -251,6 +251,14 @@
return true;
}
+bool DexFileVerifier::CheckSizeLimit(uint32_t size, uint32_t limit, const char* label) {
+ if (size > limit) {
+ ErrorStringPrintf("Size(%u) should not exceed limit(%u) for %s.", size, limit, label);
+ return false;
+ }
+ return true;
+}
+
bool DexFileVerifier::CheckHeader() {
// Check file size from the header.
uint32_t expected_size = header_->file_size_;
@@ -298,10 +306,12 @@
header_->type_ids_size_,
4,
"type-ids") &&
+ CheckSizeLimit(header_->type_ids_size_, DexFile::kDexNoIndex16, "type-ids") &&
CheckValidOffsetAndSize(header_->proto_ids_off_,
header_->proto_ids_size_,
4,
"proto-ids") &&
+ CheckSizeLimit(header_->proto_ids_size_, DexFile::kDexNoIndex16, "proto-ids") &&
CheckValidOffsetAndSize(header_->field_ids_off_,
header_->field_ids_size_,
4,
@@ -1786,13 +1796,8 @@
while (curr_it.HasNext() && prev_it.HasNext()) {
uint16_t prev_idx = prev_it.GetTypeIdx();
uint16_t curr_idx = curr_it.GetTypeIdx();
- if (prev_idx == DexFile::kDexNoIndex16) {
- break;
- }
- if (UNLIKELY(curr_idx == DexFile::kDexNoIndex16)) {
- ErrorStringPrintf("Out-of-order proto_id arguments");
- return false;
- }
+ DCHECK_NE(prev_idx, DexFile::kDexNoIndex16);
+ DCHECK_NE(curr_idx, DexFile::kDexNoIndex16);
if (prev_idx < curr_idx) {
break;
@@ -1804,6 +1809,12 @@
prev_it.Next();
curr_it.Next();
}
+ if (!curr_it.HasNext()) {
+ // Either a duplicate ProtoId or a ProtoId with a shorter argument list follows
+ // a ProtoId with a longer one. Both cases are forbidden by the specification.
+ ErrorStringPrintf("Out-of-order proto_id arguments");
+ return false;
+ }
}
}
@@ -2358,7 +2369,8 @@
static std::string GetStringOrError(const uint8_t* const begin,
const DexFile::Header* const header,
uint32_t string_idx) {
- if (header->string_ids_size_ < string_idx) {
+ // The `string_idx` is not guaranteed to be valid yet.
+ if (header->string_ids_size_ <= string_idx) {
return "(error)";
}
@@ -2375,9 +2387,11 @@
static std::string GetClassOrError(const uint8_t* const begin,
const DexFile::Header* const header,
uint32_t class_idx) {
- if (header->type_ids_size_ < class_idx) {
- return "(error)";
- }
+ // The `class_idx` is either `FieldId::class_idx_` or `MethodId::class_idx_` and
+ // it has already been checked in `DexFileVerifier::CheckClassDataItemField()`
+ // or `DexFileVerifier::CheckClassDataItemMethod()`, respectively, to match
+ // a valid defining class.
+ CHECK_LT(class_idx, header->type_ids_size_);
const DexFile::TypeId* type_id =
reinterpret_cast<const DexFile::TypeId*>(begin + header->type_ids_off_) + class_idx;
@@ -2390,9 +2404,8 @@
static std::string GetFieldDescriptionOrError(const uint8_t* const begin,
const DexFile::Header* const header,
uint32_t idx) {
- if (header->field_ids_size_ < idx) {
- return "(error)";
- }
+ // The `idx` has already been checked in `DexFileVerifier::CheckClassDataItemField()`.
+ CHECK_LT(idx, header->field_ids_size_);
const DexFile::FieldId* field_id =
reinterpret_cast<const DexFile::FieldId*>(begin + header->field_ids_off_) + idx;
@@ -2408,9 +2421,8 @@
static std::string GetMethodDescriptionOrError(const uint8_t* const begin,
const DexFile::Header* const header,
uint32_t idx) {
- if (header->method_ids_size_ < idx) {
- return "(error)";
- }
+ // The `idx` has already been checked in `DexFileVerifier::CheckClassDataItemMethod()`.
+ CHECK_LT(idx, header->method_ids_size_);
const DexFile::MethodId* method_id =
reinterpret_cast<const DexFile::MethodId*>(begin + header->method_ids_off_) + idx;
@@ -2608,7 +2620,13 @@
*error_msg = StringPrintf("Constructor %" PRIu32 "(%s) is not flagged correctly wrt/ static.",
method_index,
GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
- return false;
+ if (header_->GetVersion() >= DexFile::kDefaultMethodsVersion) {
+ return false;
+ } else {
+ // Allow in older versions, but warn.
+ LOG(WARNING) << "This dex file is invalid and will be rejected in the future. Error is: "
+ << *error_msg;
+ }
}
}
// Check that static and private methods, as well as constructors, are in the direct methods list,
@@ -2662,7 +2680,13 @@
*error_msg = StringPrintf("Constructor %u(%s) must not be abstract or native",
method_index,
GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
- return false;
+ if (header_->GetVersion() >= DexFile::kDefaultMethodsVersion) {
+ return false;
+ } else {
+ // Allow in older versions, but warn.
+ LOG(WARNING) << "This dex file is invalid and will be rejected in the future. Error is: "
+ << *error_msg;
+ }
}
if ((method_access_flags & kAccAbstract) != 0) {
// Abstract methods are not allowed to have the following flags.
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index be0e6d8..90409db 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -49,6 +49,8 @@
// Checks whether the offset is zero (when size is zero) or that the offset falls within the area
// claimed by the file.
bool CheckValidOffsetAndSize(uint32_t offset, uint32_t size, size_t alignment, const char* label);
+ // Checks whether the size is less than the limit.
+ bool CheckSizeLimit(uint32_t size, uint32_t limit, const char* label);
bool CheckIndex(uint32_t field, uint32_t limit, const char* label);
bool CheckHeader();
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 344d186..3741c1e 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -57,7 +57,14 @@
255, 255, 255, 255
};
-static inline uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
+// Make the Dex file version 37.
+static void MakeDexVersion37(DexFile* dex_file) {
+ size_t offset = OFFSETOF_MEMBER(DexFile::Header, magic_) + 6;
+ CHECK_EQ(*(dex_file->Begin() + offset), '5');
+ *(const_cast<uint8_t*>(dex_file->Begin()) + offset) = '7';
+}
+
+static inline std::unique_ptr<uint8_t[]> DecodeBase64(const char* src, size_t* dst_size) {
std::vector<uint8_t> tmp;
uint32_t t = 0, y = 0;
int g = 3;
@@ -100,7 +107,7 @@
*dst_size = 0;
}
std::copy(tmp.begin(), tmp.end(), dst.get());
- return dst.release();
+ return dst;
}
static void FixUpChecksum(uint8_t* dex_file) {
@@ -113,25 +120,18 @@
header->checksum_ = adler_checksum;
}
-// Custom deleter. Necessary to clean up the memory we use (to be able to mutate).
-struct DexFileDeleter {
- void operator()(DexFile* in) {
- if (in != nullptr) {
- delete[] in->Begin();
- delete in;
- }
- }
-};
-
-using DexFileUniquePtr = std::unique_ptr<DexFile, DexFileDeleter>;
-
class DexFileVerifierTest : public CommonRuntimeTest {
protected:
void VerifyModification(const char* dex_file_base64_content,
const char* location,
std::function<void(DexFile*)> f,
const char* expected_error) {
- DexFileUniquePtr dex_file(WrapAsDexFile(dex_file_base64_content));
+ size_t length;
+ std::unique_ptr<uint8_t[]> dex_bytes = DecodeBase64(dex_file_base64_content, &length);
+ CHECK(dex_bytes != nullptr);
+ // Note: `dex_file` will be destroyed before `dex_bytes`.
+ std::unique_ptr<DexFile> dex_file(
+ new DexFile(dex_bytes.get(), length, "tmp", 0, nullptr, nullptr));
f(dex_file.get());
FixUpChecksum(const_cast<uint8_t*>(dex_file->Begin()));
@@ -150,15 +150,6 @@
}
}
}
-
- private:
- static DexFile* WrapAsDexFile(const char* dex_file_content_in_base_64) {
- // Decode base64.
- size_t length;
- uint8_t* dex_bytes = DecodeBase64(dex_file_content_in_base_64, &length);
- CHECK(dex_bytes != nullptr);
- return new DexFile(dex_bytes, length, "tmp", 0, nullptr, nullptr);
- }
};
static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
@@ -290,7 +281,9 @@
// Find the method data for the first method with the given name (from class 0). Note: the pointer
// is to the access flags, so that the caller doesn't have to handle the leb128-encoded method-index
// delta.
-static const uint8_t* FindMethodData(const DexFile* dex_file, const char* name) {
+static const uint8_t* FindMethodData(const DexFile* dex_file,
+ const char* name,
+ /*out*/ uint32_t* method_idx = nullptr) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(0);
const uint8_t* class_data = dex_file->GetClassData(class_def);
@@ -316,6 +309,9 @@
const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
const char* str = dex_file->GetStringData(string_id);
if (strcmp(name, str) == 0) {
+ if (method_idx != nullptr) {
+ *method_idx = method_index;
+ }
DecodeUnsignedLeb128(&trailing);
return trailing;
}
@@ -449,6 +445,7 @@
kMethodFlagsTestDex,
"method_flags_constructor_native_nocode",
[&](DexFile* dex_file) {
+ MakeDexVersion37(dex_file);
ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
@@ -461,6 +458,7 @@
kMethodFlagsTestDex,
"method_flags_constructor_abstract_nocode",
[&](DexFile* dex_file) {
+ MakeDexVersion37(dex_file);
ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
@@ -521,6 +519,7 @@
kMethodFlagsTestDex,
"init_not_allowed_flags",
[&](DexFile* dex_file) {
+ MakeDexVersion37(dex_file);
ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized);
ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized);
@@ -683,6 +682,22 @@
}
}
+TEST_F(DexFileVerifierTest, B28552165) {
+ // Regression test for bad error string retrieval in different situations.
+ // Using invalid access flags to trigger the error.
+ VerifyModification(
+ kMethodFlagsTestDex,
+ "b28552165",
+ [](DexFile* dex_file) {
+ OrMaskToMethodFlags(dex_file, "foo", kAccPublic | kAccProtected);
+ uint32_t method_idx;
+ FindMethodData(dex_file, "foo", &method_idx);
+ auto* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(method_idx));
+ method_id->name_idx_ = dex_file->NumStringIds();
+ },
+ "Method may have only one of public/protected/private, LMethodFlags;.(error)");
+}
+
// Set of dex files for interface method tests. As it's not as easy to mutate method names, it's
// just easier to break up bad cases.
@@ -725,13 +740,6 @@
return result;
}
-// Make the Dex file version 37.
-static void MakeDexVersion37(DexFile* dex_file) {
- size_t offset = OFFSETOF_MEMBER(DexFile::Header, magic_) + 6;
- CHECK_EQ(*(dex_file->Begin() + offset), '5');
- *(const_cast<uint8_t*>(dex_file->Begin()) + offset) = '7';
-}
-
TEST_F(DexFileVerifierTest, MethodAccessFlagsInterfaces) {
VerifyModification(
kMethodFlagsInterface,
@@ -1436,4 +1444,81 @@
}
}
+// Generated from
+//
+// .class LOverloading;
+//
+// .super Ljava/lang/Object;
+//
+// .method public static foo()V
+// .registers 1
+// return-void
+// .end method
+//
+// .method public static foo(I)V
+// .registers 1
+// return-void
+// .end method
+static const char kProtoOrderingTestDex[] =
+ "ZGV4CjAzNQA1L+ABE6voQ9Lr4Ci//efB53oGnDr5PinsAQAAcAAAAHhWNBIAAAAAAAAAAFgBAAAG"
+ "AAAAcAAAAAQAAACIAAAAAgAAAJgAAAAAAAAAAAAAAAIAAACwAAAAAQAAAMAAAAAMAQAA4AAAAOAA"
+ "AADjAAAA8gAAAAYBAAAJAQAADQEAAAAAAAABAAAAAgAAAAMAAAADAAAAAwAAAAAAAAAEAAAAAwAA"
+ "ABQBAAABAAAABQAAAAEAAQAFAAAAAQAAAAAAAAACAAAAAAAAAP////8AAAAASgEAAAAAAAABSQAN"
+ "TE92ZXJsb2FkaW5nOwASTGphdmEvbGFuZy9PYmplY3Q7AAFWAAJWSQADZm9vAAAAAQAAAAAAAAAA"
+ "AAAAAAAAAAEAAAAAAAAAAAAAAAEAAAAOAAAAAQABAAAAAAAAAAAAAQAAAA4AAAACAAAJpAIBCbgC"
+ "AAAMAAAAAAAAAAEAAAAAAAAAAQAAAAYAAABwAAAAAgAAAAQAAACIAAAAAwAAAAIAAACYAAAABQAA"
+ "AAIAAACwAAAABgAAAAEAAADAAAAAAiAAAAYAAADgAAAAARAAAAEAAAAUAQAAAxAAAAIAAAAcAQAA"
+ "ASAAAAIAAAAkAQAAACAAAAEAAABKAQAAABAAAAEAAABYAQAA";
+
+TEST_F(DexFileVerifierTest, ProtoOrdering) {
+ {
+ // The input dex file should be good before modification.
+ ScratchFile tmp;
+ std::string error_msg;
+ std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kProtoOrderingTestDex,
+ tmp.GetFilename().c_str(),
+ &error_msg));
+ ASSERT_TRUE(raw.get() != nullptr) << error_msg;
+ }
+
+ // Modify the order of the ProtoIds for two overloads of "foo" with the
+ // same return type and one having longer parameter list than the other.
+ for (size_t i = 0; i != 2; ++i) {
+ VerifyModification(
+ kProtoOrderingTestDex,
+ "proto_ordering",
+ [i](DexFile* dex_file) {
+ uint32_t method_idx;
+ const uint8_t* data = FindMethodData(dex_file, "foo", &method_idx);
+ CHECK(data != nullptr);
+ // There should be 2 methods called "foo".
+ CHECK_LT(method_idx + 1u, dex_file->NumMethodIds());
+ CHECK_EQ(dex_file->GetMethodId(method_idx).name_idx_,
+ dex_file->GetMethodId(method_idx + 1).name_idx_);
+ CHECK_EQ(dex_file->GetMethodId(method_idx).proto_idx_ + 1u,
+ dex_file->GetMethodId(method_idx + 1).proto_idx_);
+ // Their return types should be the same.
+ uint32_t proto1_idx = dex_file->GetMethodId(method_idx).proto_idx_;
+ const DexFile::ProtoId& proto1 = dex_file->GetProtoId(proto1_idx);
+ const DexFile::ProtoId& proto2 = dex_file->GetProtoId(proto1_idx + 1u);
+ CHECK_EQ(proto1.return_type_idx_, proto2.return_type_idx_);
+ // And the first should not have any parameters while the second should have some.
+ CHECK(!DexFileParameterIterator(*dex_file, proto1).HasNext());
+ CHECK(DexFileParameterIterator(*dex_file, proto2).HasNext());
+ if (i == 0) {
+ // Swap the proto parameters and shorties to break the ordering.
+ std::swap(const_cast<uint32_t&>(proto1.parameters_off_),
+ const_cast<uint32_t&>(proto2.parameters_off_));
+ std::swap(const_cast<uint32_t&>(proto1.shorty_idx_),
+ const_cast<uint32_t&>(proto2.shorty_idx_));
+ } else {
+ // Copy the proto parameters and shorty to create duplicate proto id.
+ const_cast<uint32_t&>(proto1.parameters_off_) = proto2.parameters_off_;
+ const_cast<uint32_t&>(proto1.shorty_idx_) = proto2.shorty_idx_;
+ }
+ },
+ "Out-of-order proto_id arguments");
+ }
+}
+
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index 4e01d80..f3a0d2f 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -77,6 +77,10 @@
extern "C" void art_quick_lock_object(art::mirror::Object*);
extern "C" void art_quick_unlock_object(art::mirror::Object*);
+// Lock entrypoints that do not inline any behavior (e.g., thin-locks).
+extern "C" void art_quick_lock_object_no_inline(art::mirror::Object*);
+extern "C" void art_quick_unlock_object_no_inline(art::mirror::Object*);
+
// Math entrypoints.
extern "C" int64_t art_quick_d2l(double);
extern "C" int64_t art_quick_f2l(float);
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
new file mode 100644
index 0000000..5dafa8b
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_INIT_ENTRYPOINTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_INIT_ENTRYPOINTS_H_
+
+#include "base/logging.h"
+#include "entrypoints/jni/jni_entrypoints.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "quick_alloc_entrypoints.h"
+#include "quick_default_externs.h"
+#include "quick_entrypoints.h"
+
+namespace art {
+
+void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
+ // JNI
+ jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
+
+ // Alloc
+ ResetQuickAllocEntryPoints(qpoints);
+
+ // DexCache
+ qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
+ qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
+ qpoints->pInitializeType = art_quick_initialize_type;
+ qpoints->pResolveString = art_quick_resolve_string;
+
+ // Field
+ qpoints->pSet8Instance = art_quick_set8_instance;
+ qpoints->pSet8Static = art_quick_set8_static;
+ qpoints->pSet16Instance = art_quick_set16_instance;
+ qpoints->pSet16Static = art_quick_set16_static;
+ qpoints->pSet32Instance = art_quick_set32_instance;
+ qpoints->pSet32Static = art_quick_set32_static;
+ qpoints->pSet64Instance = art_quick_set64_instance;
+ qpoints->pSet64Static = art_quick_set64_static;
+ qpoints->pSetObjInstance = art_quick_set_obj_instance;
+ qpoints->pSetObjStatic = art_quick_set_obj_static;
+ qpoints->pGetByteInstance = art_quick_get_byte_instance;
+ qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
+ qpoints->pGetShortInstance = art_quick_get_short_instance;
+ qpoints->pGetCharInstance = art_quick_get_char_instance;
+ qpoints->pGet32Instance = art_quick_get32_instance;
+ qpoints->pGet64Instance = art_quick_get64_instance;
+ qpoints->pGetObjInstance = art_quick_get_obj_instance;
+ qpoints->pGetByteStatic = art_quick_get_byte_static;
+ qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
+ qpoints->pGetShortStatic = art_quick_get_short_static;
+ qpoints->pGetCharStatic = art_quick_get_char_static;
+ qpoints->pGet32Static = art_quick_get32_static;
+ qpoints->pGet64Static = art_quick_get64_static;
+ qpoints->pGetObjStatic = art_quick_get_obj_static;
+
+ // Array
+ qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check;
+ qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check;
+ qpoints->pAputObject = art_quick_aput_obj;
+ qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
+
+ // JNI
+ qpoints->pJniMethodStart = JniMethodStart;
+ qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ qpoints->pJniMethodEnd = JniMethodEnd;
+ qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
+ qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+ qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
+
+ // Locks
+ if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) {
+ qpoints->pLockObject = art_quick_lock_object_no_inline;
+ qpoints->pUnlockObject = art_quick_unlock_object_no_inline;
+ } else {
+ qpoints->pLockObject = art_quick_lock_object;
+ qpoints->pUnlockObject = art_quick_unlock_object;
+ }
+
+ // Invocation
+ qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
+ qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
+ qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
+
+ // Thread
+ qpoints->pTestSuspend = art_quick_test_suspend;
+
+ // Throws
+ qpoints->pDeliverException = art_quick_deliver_exception;
+ qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
+ qpoints->pThrowDivZero = art_quick_throw_div_zero;
+ qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
+ qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
+ qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
+
+ // Deoptimize
+ qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_INIT_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index da6af72..e9cdbb7 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2174,7 +2174,8 @@
imt_index % mirror::Class::kImtSize, sizeof(void*));
if (LIKELY(conflict_method->IsRuntimeMethod())) {
ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
- method = current_table->Lookup(interface_method);
+ DCHECK(current_table != nullptr);
+ method = current_table->Lookup(interface_method, sizeof(void*));
} else {
// It seems we aren't really a conflict method!
method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*));
@@ -2225,34 +2226,13 @@
ArtMethod* conflict_method = cls->GetEmbeddedImTableEntry(
imt_index % mirror::Class::kImtSize, sizeof(void*));
if (conflict_method->IsRuntimeMethod()) {
- ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
- Runtime* runtime = Runtime::Current();
- LinearAlloc* linear_alloc = (cls->GetClassLoader() == nullptr)
- ? runtime->GetLinearAlloc()
- : cls->GetClassLoader()->GetAllocator();
- bool is_new_entry = (conflict_method == runtime->GetImtConflictMethod());
-
- // Create a new entry if the existing one is the shared conflict method.
- ArtMethod* new_conflict_method = is_new_entry
- ? runtime->CreateImtConflictMethod(linear_alloc)
- : conflict_method;
-
- // Allocate a new table. Note that we will leak this table at the next conflict,
- // but that's a tradeoff compared to making the table fixed size.
- void* data = linear_alloc->Alloc(
- self, ImtConflictTable::ComputeSizeWithOneMoreEntry(current_table));
- CHECK(data != nullptr) << "Out of memory";
- ImtConflictTable* new_table = new (data) ImtConflictTable(
- current_table, interface_method, method);
-
- // Do a fence to ensure threads see the data in the table before it is assigned
- // to the conlict method.
- // Note that there is a race in the presence of multiple threads and we may leak
- // memory from the LinearAlloc, but that's a tradeoff compared to using
- // atomic operations.
- QuasiAtomic::ThreadFenceRelease();
- new_conflict_method->SetImtConflictTable(new_table);
- if (is_new_entry) {
+ ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
+ cls.Get(),
+ conflict_method,
+ interface_method,
+ method,
+ /*force_new_conflict_method*/false);
+ if (new_conflict_method != conflict_method) {
// Update the IMT if we create a new conflict method. No fence needed here, as the
// data is consistent.
cls->SetEmbeddedImTableEntry(imt_index % mirror::Class::kImtSize,
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index c2f772f..df5aa0a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2687,8 +2687,8 @@
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
- if ((gc_type == collector::kGcTypeFull) && runtime->UseJit()) {
- // It's time to clear all inline caches, in case some classes can be unloaded.
+ // It's time to clear all inline caches, in case some classes can be unloaded.
+ if ((gc_type == collector::kGcTypeFull) && (runtime->GetJit() != nullptr)) {
runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self);
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index d386c74..78c570f 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -880,7 +880,7 @@
class ForwardObjectAdapter {
public:
- ALWAYS_INLINE ForwardObjectAdapter(const FixupVisitor* visitor) : visitor_(visitor) {}
+ ALWAYS_INLINE explicit ForwardObjectAdapter(const FixupVisitor* visitor) : visitor_(visitor) {}
template <typename T>
ALWAYS_INLINE T* operator()(T* src) const {
@@ -893,7 +893,7 @@
class ForwardCodeAdapter {
public:
- ALWAYS_INLINE ForwardCodeAdapter(const FixupVisitor* visitor)
+ ALWAYS_INLINE explicit ForwardCodeAdapter(const FixupVisitor* visitor)
: visitor_(visitor) {}
template <typename T>
@@ -914,10 +914,26 @@
pointer_size_(pointer_size) {}
virtual void Visit(ArtMethod* method) NO_THREAD_SAFETY_ANALYSIS {
- if (fixup_heap_objects_) {
- method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this), pointer_size_);
+ // TODO: Separate visitor for runtime vs normal methods.
+ if (UNLIKELY(method->IsRuntimeMethod())) {
+ ImtConflictTable* table = method->GetImtConflictTable(pointer_size_);
+ if (table != nullptr) {
+ ImtConflictTable* new_table = ForwardObject(table);
+ if (table != new_table) {
+ method->SetImtConflictTable(new_table, pointer_size_);
+ }
+ }
+ const void* old_code = method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_);
+ const void* new_code = ForwardCode(old_code);
+ if (old_code != new_code) {
+ method->SetEntryPointFromQuickCompiledCodePtrSize(new_code, pointer_size_);
+ }
+ } else {
+ if (fixup_heap_objects_) {
+ method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this), pointer_size_);
+ }
+ method->UpdateEntrypoints<kWithoutReadBarrier>(ForwardCodeAdapter(this), pointer_size_);
}
- method->UpdateEntrypoints<kWithoutReadBarrier>(ForwardCodeAdapter(this), pointer_size_);
}
private:
@@ -1018,6 +1034,7 @@
const ImageSection& objects_section = image_header.GetImageSection(ImageHeader::kSectionObjects);
uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
+ FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat);
if (fixup_image) {
// Two pass approach, fix up all classes first, then fix up non class-objects.
// The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
@@ -1037,7 +1054,6 @@
ScopedObjectAccess soa(Thread::Current());
timing.NewTiming("Fixup objects");
bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
- FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat);
// Fixup image roots.
CHECK(app_image.InSource(reinterpret_cast<uintptr_t>(
image_header.GetImageRoots<kWithoutReadBarrier>())));
@@ -1104,19 +1120,18 @@
boot_oat,
app_image,
app_oat);
- image_header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
- &method_visitor,
- target_base,
- pointer_size);
+ image_header.VisitPackedArtMethods(&method_visitor, target_base, pointer_size);
}
if (fixup_image) {
{
// Only touches objects in the app image, no need for mutator lock.
TimingLogger::ScopedTiming timing("Fixup fields", &logger);
FixupArtFieldVisitor field_visitor(boot_image, boot_oat, app_image, app_oat);
- image_header.GetImageSection(ImageHeader::kSectionArtFields).VisitPackedArtFields(
- &field_visitor,
- target_base);
+ image_header.VisitPackedArtFields(&field_visitor, target_base);
+ }
+ {
+ TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
+ image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size);
}
// In the app image case, the image methods are actually in the boot image.
image_header.RelocateImageMethods(boot_image.Delta());
diff --git a/runtime/globals.h b/runtime/globals.h
index e7ea6f3..477cbdf 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -51,11 +51,31 @@
static constexpr bool kIsDebugBuild = true;
#endif
-// Whether or not this is a target (vs host) build. Useful in conditionals where ART_TARGET isn't.
+// ART_TARGET - Defined for target builds of ART.
+// ART_TARGET_LINUX - Defined for target Linux builds of ART.
+// ART_TARGET_ANDROID - Defined for target Android builds of ART.
+// Note: Either ART_TARGET_LINUX or ART_TARGET_ANDROID need to be set when ART_TARGET is set.
+// Note: When ART_TARGET_LINUX is defined mem_map.h will not be using Ashmem for memory mappings
+// (usually only available on Android kernels).
#if defined(ART_TARGET)
+// Useful in conditionals where ART_TARGET isn't.
static constexpr bool kIsTargetBuild = true;
+#if defined(ART_TARGET_LINUX)
+static constexpr bool kIsTargetLinux = true;
+#elif defined(ART_TARGET_ANDROID)
+static constexpr bool kIsTargetLinux = false;
+#else
+#error "Either ART_TARGET_LINUX or ART_TARGET_ANDROID needs to be defined for target builds."
+#endif
#else
static constexpr bool kIsTargetBuild = false;
+#if defined(ART_TARGET_LINUX)
+#error "ART_TARGET_LINUX defined for host build."
+#elif defined(ART_TARGET_ANDROID)
+#error "ART_TARGET_ANDROID defined for host build."
+#else
+static constexpr bool kIsTargetLinux = false;
+#endif
#endif
// Garbage collector constants.
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 3885c60..9895395 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -505,6 +505,7 @@
// Walk the roots and the heap.
output_->StartNewRecord(HPROF_TAG_HEAP_DUMP_SEGMENT, kHprofTime);
+ simple_roots_.clear();
runtime->VisitRoots(this);
runtime->VisitImageRoots(this);
runtime->GetHeap()->VisitObjectsPaused(VisitObjectCallback, this);
@@ -884,6 +885,14 @@
gc::EqAllocRecordTypesPtr<gc::AllocRecordStackTraceElement>> frames_;
std::unordered_map<const mirror::Object*, const gc::AllocRecordStackTrace*> allocation_records_;
+ // Set used to keep track of what simple root records we have already
+ // emitted, to avoid emitting duplicate entries. The simple root records are
+ // those that contain no other information than the root type and the object
+ // id. A pair of root type and object id is packed into a uint64_t, with
+ // the root type in the upper 32 bits and the object id in the lower 32
+ // bits.
+ std::unordered_set<uint64_t> simple_roots_;
+
friend class GcRootVisitor;
DISALLOW_COPY_AND_ASSIGN(Hprof);
};
@@ -962,10 +971,14 @@
case HPROF_ROOT_MONITOR_USED:
case HPROF_ROOT_INTERNED_STRING:
case HPROF_ROOT_DEBUGGER:
- case HPROF_ROOT_VM_INTERNAL:
- __ AddU1(heap_tag);
- __ AddObjectId(obj);
+ case HPROF_ROOT_VM_INTERNAL: {
+ uint64_t key = (static_cast<uint64_t>(heap_tag) << 32) | PointerToLowMemUInt32(obj);
+ if (simple_roots_.insert(key).second) {
+ __ AddU1(heap_tag);
+ __ AddObjectId(obj);
+ }
break;
+ }
// ID: object ID
// ID: JNI global ref ID
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index e3307d8..ea75a62 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -19,6 +19,8 @@
#include "image.h"
+#include "art_method.h"
+
namespace art {
template <ReadBarrierOption kReadBarrierOption>
@@ -42,6 +44,20 @@
return image_roots;
}
+template <typename Visitor>
+inline void ImageHeader::VisitPackedImtConflictTables(const Visitor& visitor,
+ uint8_t* base,
+ size_t pointer_size) const {
+ const ImageSection& section = GetImageSection(kSectionIMTConflictTables);
+ for (size_t pos = 0; pos < section.Size(); ) {
+ auto* table = reinterpret_cast<ImtConflictTable*>(base + section.Offset() + pos);
+ table->Visit([&visitor](const std::pair<ArtMethod*, ArtMethod*>& methods) {
+ return std::make_pair(visitor(methods.first), visitor(methods.second));
+ }, pointer_size);
+ pos += table->ComputeSize(pointer_size);
+ }
+}
+
} // namespace art
#endif // ART_RUNTIME_IMAGE_INL_H_
diff --git a/runtime/image.cc b/runtime/image.cc
index 1f54e3e..a9552c2 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '7', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '9', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
@@ -147,9 +147,10 @@
return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End();
}
-void ImageSection::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
- for (size_t pos = 0; pos < Size(); ) {
- auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + Offset() + pos);
+void ImageHeader::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
+ const ImageSection& fields = GetFieldsSection();
+ for (size_t pos = 0; pos < fields.Size(); ) {
+ auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + fields.Offset() + pos);
for (size_t i = 0; i < array->size(); ++i) {
visitor->Visit(&array->At(i, sizeof(ArtField)));
}
@@ -157,18 +158,25 @@
}
}
-void ImageSection::VisitPackedArtMethods(ArtMethodVisitor* visitor,
- uint8_t* base,
- size_t pointer_size) const {
+void ImageHeader::VisitPackedArtMethods(ArtMethodVisitor* visitor,
+ uint8_t* base,
+ size_t pointer_size) const {
const size_t method_alignment = ArtMethod::Alignment(pointer_size);
const size_t method_size = ArtMethod::Size(pointer_size);
- for (size_t pos = 0; pos < Size(); ) {
- auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + Offset() + pos);
+ const ImageSection& methods = GetMethodsSection();
+ for (size_t pos = 0; pos < methods.Size(); ) {
+ auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + methods.Offset() + pos);
for (size_t i = 0; i < array->size(); ++i) {
visitor->Visit(&array->At(i, method_size, method_alignment));
}
pos += array->ComputeSize(array->size(), method_size, method_alignment);
}
+ const ImageSection& runtime_methods = GetRuntimeMethodsSection();
+ for (size_t pos = 0; pos < runtime_methods.Size(); ) {
+ auto* method = reinterpret_cast<ArtMethod*>(base + runtime_methods.Offset() + pos);
+ visitor->Visit(method);
+ pos += method_size;
+ }
}
} // namespace art
diff --git a/runtime/image.h b/runtime/image.h
index 8e5dbad..2ea9af7 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -64,12 +64,6 @@
return offset - offset_ < size_;
}
- // Visit ArtMethods in the section starting at base.
- void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const;
-
- // Visit ArtMethods in the section starting at base.
- void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const;
-
private:
uint32_t offset_;
uint32_t size_;
@@ -200,6 +194,8 @@
kSectionObjects,
kSectionArtFields,
kSectionArtMethods,
+ kSectionRuntimeMethods,
+ kSectionIMTConflictTables,
kSectionDexCacheArrays,
kSectionInternedStrings,
kSectionClassTable,
@@ -211,10 +207,19 @@
void SetImageMethod(ImageMethod index, ArtMethod* method);
const ImageSection& GetImageSection(ImageSections index) const;
+
const ImageSection& GetMethodsSection() const {
return GetImageSection(kSectionArtMethods);
}
+ const ImageSection& GetRuntimeMethodsSection() const {
+ return GetImageSection(kSectionRuntimeMethods);
+ }
+
+ const ImageSection& GetFieldsSection() const {
+ return GetImageSection(ImageHeader::kSectionArtFields);
+ }
+
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::Object* GetImageRoot(ImageRoot image_root) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -265,6 +270,19 @@
return boot_image_size_ != 0u;
}
+ // Visit ArtMethods in the section starting at base. Includes runtime methods.
+ // TODO: Delete base parameter if it is always equal to GetImageBegin.
+ void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const;
+
+ // Visit ArtMethods in the section starting at base.
+ // TODO: Delete base parameter if it is always equal to GetImageBegin.
+ void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const;
+
+ template <typename Visitor>
+ void VisitPackedImtConflictTables(const Visitor& visitor,
+ uint8_t* base,
+ size_t pointer_size) const;
+
private:
static const uint8_t kImageMagic[4];
static const uint8_t kImageVersion[4];
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 97dbe5d..6c630cc 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -303,8 +303,13 @@
shadow_frame.GetMethod()->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
+ // Lock counting is a special version of accessibility checks, and for simplicity and
+ // reduction of template parameters, we gate it behind access-checks mode.
+ ArtMethod* method = shadow_frame.GetMethod();
+ DCHECK(!method->SkipAccessChecks() || !method->MustCountLocks());
+
bool transaction_active = Runtime::Current()->IsActiveTransaction();
- if (LIKELY(shadow_frame.GetMethod()->SkipAccessChecks())) {
+ if (LIKELY(method->SkipAccessChecks())) {
// Enter the "without access check" interpreter.
if (kInterpreterImplKind == kMterpImplKind) {
if (transaction_active) {
@@ -487,6 +492,10 @@
// Are we executing the first shadow frame?
bool first = true;
while (shadow_frame != nullptr) {
+ // We do not want to recover lock state for lock counting when deoptimizing. Currently,
+ // the compiler should not have compiled a method that failed structured-locking checks.
+ DCHECK(!shadow_frame->GetMethod()->MustCountLocks());
+
self->SetTopOfShadowStack(shadow_frame);
const DexFile::CodeItem* code_item = shadow_frame->GetMethod()->GetCodeItem();
const uint32_t dex_pc = shadow_frame->GetDexPC();
@@ -506,8 +515,24 @@
// instruction, as it already executed.
// TODO: should be tested more once b/17586779 is fixed.
const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]);
- DCHECK(instr->IsInvoke());
- new_dex_pc = dex_pc + instr->SizeInCodeUnits();
+ if (instr->IsInvoke()) {
+ new_dex_pc = dex_pc + instr->SizeInCodeUnits();
+ } else if (instr->Opcode() == Instruction::NEW_INSTANCE) {
+ // It's possible to deoptimize at a NEW_INSTANCE dex instruciton that's for a
+ // java string, which is turned into a call into StringFactory.newEmptyString();
+ if (kIsDebugBuild) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ mirror::Class* klass = class_linker->ResolveType(
+ instr->VRegB_21c(), shadow_frame->GetMethod());
+ DCHECK(klass->IsStringClass());
+ }
+ // Skip the dex instruction since we essentially come back from an invocation.
+ new_dex_pc = dex_pc + instr->SizeInCodeUnits();
+ } else {
+ DCHECK(false) << "Unexpected instruction opcode " << instr->Opcode()
+ << " at dex_pc " << dex_pc
+ << " of method: " << PrettyMethod(shadow_frame->GetMethod(), false);
+ }
} else {
// Nothing to do, the dex_pc is the one at which the code requested
// the deoptimization.
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index e5b89e2..69376fd 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -95,7 +95,9 @@
StackHandleScope<1> hs(self);
Handle<Object> h_ref(hs.NewHandle(ref));
h_ref->MonitorEnter(self);
- frame->GetLockCountData().AddMonitor<kMonitorCounting>(self, h_ref.Get());
+ if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
+ frame->GetLockCountData().AddMonitor(self, h_ref.Get());
+ }
}
template <bool kMonitorCounting>
@@ -107,7 +109,19 @@
StackHandleScope<1> hs(self);
Handle<Object> h_ref(hs.NewHandle(ref));
h_ref->MonitorExit(self);
- frame->GetLockCountData().RemoveMonitorOrThrow<kMonitorCounting>(self, h_ref.Get());
+ if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
+ frame->GetLockCountData().RemoveMonitorOrThrow(self, h_ref.Get());
+ }
+}
+
+template <bool kMonitorCounting>
+static inline bool DoMonitorCheckOnExit(Thread* self, ShadowFrame* frame)
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_) {
+ if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
+ return frame->GetLockCountData().CheckAllMonitorsReleasedOrThrow(self);
+ }
+ return true;
}
void AbortTransactionF(Thread* self, const char* fmt, ...)
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 13cfb98..f03036b 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -104,8 +104,7 @@
} HANDLE_INSTRUCTION_END();
#define HANDLE_MONITOR_CHECKS() \
- if (!shadow_frame.GetLockCountData(). \
- CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self)) { \
+ if (!DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame)) { \
HANDLE_PENDING_EXCEPTION(); \
}
@@ -2584,7 +2583,7 @@
instrumentation);
if (found_dex_pc == DexFile::kDexNoIndex) {
// Structured locking is to be enforced for abnormal termination, too.
- shadow_frame.GetLockCountData().CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self);
+ DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame);
return JValue(); /* Handled in caller. */
} else {
int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 4323d4f..18330ba 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -34,8 +34,7 @@
instrumentation); \
if (found_dex_pc == DexFile::kDexNoIndex) { \
/* Structured locking is to be enforced for abnormal termination, too. */ \
- shadow_frame.GetLockCountData(). \
- CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self); \
+ DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame); \
if (interpret_one_instruction) { \
/* Signal mterp to return to caller */ \
shadow_frame.SetDexPC(DexFile::kDexNoIndex); \
@@ -57,8 +56,7 @@
} while (false)
#define HANDLE_MONITOR_CHECKS() \
- if (!shadow_frame.GetLockCountData(). \
- CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self)) { \
+ if (!DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame)) { \
HANDLE_PENDING_EXCEPTION(); \
}
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index e46f9cd..f78e1bc 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -12985,6 +12985,7 @@
* not-taken path. All Dalvik not-taken conditional branch offsets are 2.
*/
.L_check_not_taken_osr:
+ EXPORT_PC
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %ecx
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index 62dce6e..031cec8 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -11961,6 +11961,7 @@
* not-taken path. All Dalvik not-taken conditional branch offsets are 2.
*/
.L_check_not_taken_osr:
+ EXPORT_PC
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
movl $2, OUT_32_ARG2
diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S
index fa03e78..e8c8ca8 100644
--- a/runtime/interpreter/mterp/x86/footer.S
+++ b/runtime/interpreter/mterp/x86/footer.S
@@ -234,6 +234,7 @@
* not-taken path. All Dalvik not-taken conditional branch offsets are 2.
*/
.L_check_not_taken_osr:
+ EXPORT_PC
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %ecx
diff --git a/runtime/interpreter/mterp/x86_64/footer.S b/runtime/interpreter/mterp/x86_64/footer.S
index 54d0cb1..f78f163 100644
--- a/runtime/interpreter/mterp/x86_64/footer.S
+++ b/runtime/interpreter/mterp/x86_64/footer.S
@@ -213,6 +213,7 @@
* not-taken path. All Dalvik not-taken conditional branch offsets are 2.
*/
.L_check_not_taken_osr:
+ EXPORT_PC
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
movl $$2, OUT_32_ARG2
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index a41fd45..d983a9f 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -318,6 +318,7 @@
}
JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
delete raw_vm->GetRuntime();
+ android::ResetNativeLoader();
return JNI_OK;
}
@@ -942,6 +943,11 @@
if (!Runtime::Create(options, ignore_unrecognized)) {
return JNI_ERR;
}
+
+ // Initialize native loader. This step makes sure we have
+ // everything set up before we start using JNI.
+ android::InitializeNativeLoader();
+
Runtime* runtime = Runtime::Current();
bool started = runtime->Start();
if (!started) {
@@ -950,6 +956,7 @@
LOG(WARNING) << "CreateJavaVM failed";
return JNI_ERR;
}
+
*p_env = Thread::Current()->GetJniEnv();
*p_vm = runtime->GetJavaVM();
return JNI_OK;
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index c36543f..e9317a5 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -51,7 +51,7 @@
JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
auto* jit_options = new JitOptions;
- jit_options->use_jit_ = options.GetOrDefault(RuntimeArgumentMap::UseJIT);
+ jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
jit_options->code_cache_initial_capacity_ =
options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity);
@@ -102,14 +102,26 @@
static_cast<size_t>(1));
}
+ if (options.Exists(RuntimeArgumentMap::JITInvokeTransitionWeight)) {
+ if (jit_options->invoke_transition_weight_ > jit_options->warmup_threshold_) {
+ LOG(FATAL) << "Invoke transition weight is above the warmup threshold.";
+ } else if (jit_options->invoke_transition_weight_ == 0) {
+ LOG(FATAL) << "Invoke transition ratio cannot be 0.";
+ }
+ jit_options->invoke_transition_weight_ =
+ *options.Get(RuntimeArgumentMap::JITInvokeTransitionWeight);
+ } else {
+ jit_options->invoke_transition_weight_ = std::max(
+ jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio,
+ static_cast<size_t>(1));;
+ }
+
return jit_options;
}
bool Jit::ShouldUsePriorityThreadWeight() {
- // TODO(calin): verify that IsSensitiveThread covers only the cases we are interested on.
- // In particular if apps can set StrictMode policies for any of their threads, case in which
- // we need to find another way to track sensitive threads.
- return Runtime::Current()->InJankPerceptibleProcessState() && Thread::IsSensitiveThread();
+ return Runtime::Current()->InJankPerceptibleProcessState()
+ && Thread::Current()->IsJitSensitiveThread();
}
void Jit::DumpInfo(std::ostream& os) {
@@ -132,9 +144,11 @@
cumulative_timings_("JIT timings"),
memory_use_("Memory used for compilation", 16),
lock_("JIT memory use lock"),
+ use_jit_compilation_(true),
save_profiling_info_(false) {}
Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
+ DCHECK(options->UseJitCompilation() || options->GetSaveProfilingInfo());
std::unique_ptr<Jit> jit(new Jit);
jit->dump_info_on_shutdown_ = options->DumpJitInfoOnShutdown();
if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) {
@@ -148,6 +162,7 @@
if (jit->GetCodeCache() == nullptr) {
return nullptr;
}
+ jit->use_jit_compilation_ = options->UseJitCompilation();
jit->save_profiling_info_ = options->GetSaveProfilingInfo();
VLOG(jit) << "JIT created with initial_capacity="
<< PrettySize(options->GetCodeCacheInitialCapacity())
@@ -160,8 +175,7 @@
jit->warm_method_threshold_ = options->GetWarmupThreshold();
jit->osr_method_threshold_ = options->GetOsrThreshold();
jit->priority_thread_weight_ = options->GetPriorityThreadWeight();
- jit->transition_weight_ = std::max(
- jit->warm_method_threshold_ / kDefaultTransitionRatio, static_cast<size_t>(1));
+ jit->invoke_transition_weight_ = options->GetInvokeTransitionWeight();
jit->CreateThreadPool();
@@ -227,6 +241,7 @@
}
bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
+ DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(!method->IsRuntimeMethod());
// Don't compile the method if it has breakpoints.
@@ -331,8 +346,12 @@
}
void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
+ if (!Runtime::Current()->UseJitCompilation()) {
+ // No need to notify if we only use the JIT to save profiles.
+ return;
+ }
jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr && jit->generate_debug_info_) {
+ if (jit->generate_debug_info_) {
DCHECK(jit->jit_types_loaded_ != nullptr);
jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1);
}
@@ -606,22 +625,24 @@
}
// Avoid jumping more than one state at a time.
new_count = std::min(new_count, hot_method_threshold_ - 1);
- } else if (starting_count < hot_method_threshold_) {
- if ((new_count >= hot_method_threshold_) &&
- !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
- DCHECK(thread_pool_ != nullptr);
- thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
- }
- // Avoid jumping more than one state at a time.
- new_count = std::min(new_count, osr_method_threshold_ - 1);
- } else if (starting_count < osr_method_threshold_) {
- if (!with_backedges) {
- // If the samples don't contain any back edge, we don't increment the hotness.
- return;
- }
- if ((new_count >= osr_method_threshold_) && !code_cache_->IsOsrCompiled(method)) {
- DCHECK(thread_pool_ != nullptr);
- thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
+ } else if (use_jit_compilation_) {
+ if (starting_count < hot_method_threshold_) {
+ if ((new_count >= hot_method_threshold_) &&
+ !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+ DCHECK(thread_pool_ != nullptr);
+ thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
+ }
+ // Avoid jumping more than one state at a time.
+ new_count = std::min(new_count, osr_method_threshold_ - 1);
+ } else if (starting_count < osr_method_threshold_) {
+ if (!with_backedges) {
+ // If the samples don't contain any back edge, we don't increment the hotness.
+ return;
+ }
+ if ((new_count >= osr_method_threshold_) && !code_cache_->IsOsrCompiled(method)) {
+ DCHECK(thread_pool_ != nullptr);
+ thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
+ }
}
}
// Update hotness counter
@@ -629,7 +650,8 @@
}
void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
- if (UNLIKELY(Runtime::Current()->GetJit()->JitAtFirstUse())) {
+ Runtime* runtime = Runtime::Current();
+ if (UNLIKELY(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse())) {
// The compiler requires a ProfilingInfo object.
ProfilingInfo::Create(thread, method, /* retry_allocation */ true);
JitCompileTask compile_task(method, JitCompileTask::kCompile);
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 8198c18..f3a6240 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -44,7 +44,7 @@
static constexpr bool kStressMode = kIsDebugBuild;
static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 10000;
static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000;
- static constexpr size_t kDefaultTransitionRatio = 100;
+ static constexpr size_t kDefaultInvokeTransitionWeightRatio = 500;
virtual ~Jit();
static Jit* Create(JitOptions* options, std::string* error_msg);
@@ -87,6 +87,15 @@
return priority_thread_weight_;
}
+ // Returns false if we only need to save profile information and not compile methods.
+ bool UseJitCompilation() const {
+ return use_jit_compilation_;
+ }
+
+ bool SaveProfilingInfo() const {
+ return save_profiling_info_;
+ }
+
// Wait until there is no more pending compilation tasks.
void WaitForCompilationToFinish(Thread* self);
@@ -106,12 +115,12 @@
void NotifyInterpreterToCompiledCodeTransition(Thread* self, ArtMethod* caller)
SHARED_REQUIRES(Locks::mutator_lock_) {
- AddSamples(self, caller, transition_weight_, false);
+ AddSamples(self, caller, invoke_transition_weight_, false);
}
void NotifyCompiledCodeToInterpreterTransition(Thread* self, ArtMethod* callee)
SHARED_REQUIRES(Locks::mutator_lock_) {
- AddSamples(self, callee, transition_weight_, false);
+ AddSamples(self, callee, invoke_transition_weight_, false);
}
// Starts the profile saver if the config options allow profile recording.
@@ -179,13 +188,14 @@
std::unique_ptr<jit::JitCodeCache> code_cache_;
+ bool use_jit_compilation_;
bool save_profiling_info_;
static bool generate_debug_info_;
uint16_t hot_method_threshold_;
uint16_t warm_method_threshold_;
uint16_t osr_method_threshold_;
uint16_t priority_thread_weight_;
- uint16_t transition_weight_;
+ uint16_t invoke_transition_weight_;
std::unique_ptr<ThreadPool> thread_pool_;
DISALLOW_COPY_AND_ASSIGN(Jit);
@@ -206,6 +216,9 @@
uint16_t GetPriorityThreadWeight() const {
return priority_thread_weight_;
}
+ size_t GetInvokeTransitionWeight() const {
+ return invoke_transition_weight_;
+ }
size_t GetCodeCacheInitialCapacity() const {
return code_cache_initial_capacity_;
}
@@ -218,33 +231,34 @@
bool GetSaveProfilingInfo() const {
return save_profiling_info_;
}
- bool UseJIT() const {
- return use_jit_;
+ bool UseJitCompilation() const {
+ return use_jit_compilation_;
}
- void SetUseJIT(bool b) {
- use_jit_ = b;
+ void SetUseJitCompilation(bool b) {
+ use_jit_compilation_ = b;
}
void SetSaveProfilingInfo(bool b) {
save_profiling_info_ = b;
}
void SetJitAtFirstUse() {
- use_jit_ = true;
+ use_jit_compilation_ = true;
compile_threshold_ = 0;
}
private:
- bool use_jit_;
+ bool use_jit_compilation_;
size_t code_cache_initial_capacity_;
size_t code_cache_max_capacity_;
size_t compile_threshold_;
size_t warmup_threshold_;
size_t osr_threshold_;
uint16_t priority_thread_weight_;
+ size_t invoke_transition_weight_;
bool dump_info_on_shutdown_;
bool save_profiling_info_;
JitOptions()
- : use_jit_(false),
+ : use_jit_compilation_(false),
code_cache_initial_capacity_(0),
code_cache_max_capacity_(0),
compile_threshold_(0),
diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc
index a3078bc..c99d363 100644
--- a/runtime/jit/offline_profiling_info.cc
+++ b/runtime/jit/offline_profiling_info.cc
@@ -645,7 +645,8 @@
for (auto&& pair : info_) {
const std::string& profile_key = pair.first;
const DexFileData& data = pair.second;
- DexCacheResolvedClasses classes(profile_key, data.checksum);
+ // TODO: Is it OK to use the same location for both base and dex location here?
+ DexCacheResolvedClasses classes(profile_key, profile_key, data.checksum);
classes.AddClasses(data.class_set.begin(), data.class_set.end());
ret.insert(classes);
}
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 7a9d250..e8462a1 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -145,20 +145,25 @@
void ProfileSaver::FetchAndCacheResolvedClasses() {
ScopedTrace trace(__PRETTY_FUNCTION__);
-
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
std::set<DexCacheResolvedClasses> resolved_classes =
class_linker->GetResolvedClasses(/*ignore boot classes*/ true);
MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
uint64_t total_number_of_profile_entries_cached = 0;
+
for (const auto& it : tracked_dex_base_locations_) {
- std::set<DexCacheResolvedClasses> resolved_classes_for_location;
+ std::set<DexCacheResolvedClasses> resolved_classes_for_location;
const std::string& filename = it.first;
const std::set<std::string>& locations = it.second;
for (const DexCacheResolvedClasses& classes : resolved_classes) {
- if (locations.find(classes.GetDexLocation()) != locations.end()) {
+ if (locations.find(classes.GetBaseLocation()) != locations.end()) {
+ VLOG(profiler) << "Added classes for location " << classes.GetBaseLocation()
+ << " (" << classes.GetDexLocation() << ")";
resolved_classes_for_location.insert(classes);
+ } else {
+ VLOG(profiler) << "Location not found " << classes.GetBaseLocation()
+ << " (" << classes.GetDexLocation() << ")";
}
}
ProfileCompilationInfo* info = GetCachedProfiledInfo(filename);
@@ -247,12 +252,17 @@
void* ProfileSaver::RunProfileSaverThread(void* arg) {
Runtime* runtime = Runtime::Current();
- ProfileSaver* profile_saver = reinterpret_cast<ProfileSaver*>(arg);
- CHECK(runtime->AttachCurrentThread("Profile Saver",
- /*as_daemon*/true,
- runtime->GetSystemThreadGroup(),
- /*create_peer*/true));
+ bool attached = runtime->AttachCurrentThread("Profile Saver",
+ /*as_daemon*/true,
+ runtime->GetSystemThreadGroup(),
+ /*create_peer*/true);
+ if (!attached) {
+ CHECK(runtime->IsShuttingDown(Thread::Current()));
+ return nullptr;
+ }
+
+ ProfileSaver* profile_saver = reinterpret_cast<ProfileSaver*>(arg);
profile_saver->Run();
runtime->DetachCurrentThread();
@@ -285,7 +295,7 @@
const std::vector<std::string>& code_paths,
const std::string& foreign_dex_profile_path,
const std::string& app_data_dir) {
- DCHECK(Runtime::Current()->UseJit());
+ DCHECK(Runtime::Current()->SaveProfileInfo());
DCHECK(!output_filename.empty());
DCHECK(jit_code_cache != nullptr);
@@ -520,4 +530,32 @@
<< max_number_of_profile_entries_cached_ << '\n';
}
+
+void ProfileSaver::ForceProcessProfiles() {
+ ProfileSaver* saver = nullptr;
+ {
+ MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
+ saver = instance_;
+ }
+ // TODO(calin): this is not actually thread safe as the instance_ may have been deleted,
+ // but we only use this in testing when we now this won't happen.
+ // Refactor the way we handle the instance so that we don't end up in this situation.
+ if (saver != nullptr) {
+ saver->ProcessProfilingInfo();
+ }
+}
+
+bool ProfileSaver::HasSeenMethod(const std::string& profile,
+ const DexFile* dex_file,
+ uint16_t method_idx) {
+ MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
+ if (instance_ != nullptr) {
+ ProfileCompilationInfo* info = instance_->GetCachedProfiledInfo(profile);
+ if (info != nullptr) {
+ return info->ContainsMethod(MethodReference(dex_file, method_idx));
+ }
+ }
+ return false;
+}
+
} // namespace art
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 0a222bf..4f3cdc2 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -49,6 +49,12 @@
// If the profile saver is running, dumps statistics to the `os`. Otherwise it does nothing.
static void DumpInstanceInfo(std::ostream& os);
+ // Just for testing purpose.
+ static void ForceProcessProfiles();
+ static bool HasSeenMethod(const std::string& profile,
+ const DexFile* dex_file,
+ uint16_t method_idx);
+
private:
ProfileSaver(const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
@@ -65,7 +71,10 @@
void Run() REQUIRES(!Locks::profiler_lock_, !wait_lock_);
// Processes the existing profiling info from the jit code cache and returns
// true if it needed to be saved to disk.
- bool ProcessProfilingInfo();
+ bool ProcessProfilingInfo()
+ REQUIRES(!Locks::profiler_lock_)
+ REQUIRES(!Locks::mutator_lock_);
+
// Returns true if the saver is shutting down (ProfileSaver::Stop() has been called).
bool ShuttingDown(Thread* self) REQUIRES(!Locks::profiler_lock_);
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 5d89c21..771f8ed 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -302,8 +302,9 @@
if (use_ashmem) {
if (!kIsTargetBuild) {
- // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't
- // fail due to ulimit restrictions. If they will then use a regular mmap.
+ // When not on Android (either host or assuming a linux target) ashmem is faked using
+ // files in /tmp. Ensure that such files won't fail due to ulimit restrictions. If they
+ // will then use a regular mmap.
struct rlimit rlimit_fsize;
CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 3eaf576..597f0d4 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -68,7 +68,7 @@
bool low_4gb,
bool reuse,
std::string* error_msg,
- bool use_ashmem = true);
+ bool use_ashmem = !kIsTargetLinux);
// Create placeholder for a region allocated by direct call to mmap.
// This is useful when we do not have control over the code calling mmap,
@@ -172,7 +172,7 @@
const char* tail_name,
int tail_prot,
std::string* error_msg,
- bool use_ashmem = true);
+ bool use_ashmem = !kIsTargetLinux);
static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
REQUIRES(!Locks::mem_maps_lock_);
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 6dd182a..fd7a125 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -60,10 +60,13 @@
// This is set by the class linker during LinkInterfaceMethods. Prior to that point we do not know
// if any particular method needs to be a default conflict. Used to figure out at runtime if
// invoking this method will throw an exception.
-static constexpr uint32_t kAccDefaultConflict = 0x00800000; // method (runtime)
+static constexpr uint32_t kAccDefaultConflict = 0x00800000; // method (runtime)
// Set by the verifier for a method we do not want the compiler to compile.
-static constexpr uint32_t kAccCompileDontBother = 0x01000000; // method (runtime)
+static constexpr uint32_t kAccCompileDontBother = 0x01000000; // method (runtime)
+
+// Set by the verifier for a method that could not be verified to follow structured locking.
+static constexpr uint32_t kAccMustCountLocks = 0x02000000; // method (runtime)
// Special runtime-only flags.
// Interface and all its super-interfaces with default methods have been recursively initialized.
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 3680c78..f4bc222 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -215,6 +215,85 @@
obj_ = GcRoot<mirror::Object>(object);
}
+// Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
+
+struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
+ explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFramesNoResolve),
+ method_(nullptr),
+ dex_pc_(0),
+ current_frame_number_(0),
+ wanted_frame_number_(frame) {}
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* m = GetMethod();
+ if (m == nullptr || m->IsRuntimeMethod()) {
+ // Runtime method, upcall, or resolution issue. Skip.
+ return true;
+ }
+
+ // Is this the requested frame?
+ if (current_frame_number_ == wanted_frame_number_) {
+ method_ = m;
+ dex_pc_ = GetDexPc(false /* abort_on_error*/);
+ return false;
+ }
+
+ // Look for more.
+ current_frame_number_++;
+ return true;
+ }
+
+ ArtMethod* method_;
+ uint32_t dex_pc_;
+
+ private:
+ size_t current_frame_number_;
+ const size_t wanted_frame_number_;
+};
+
+// This function is inlined and just helps to not have the VLOG and ATRACE check at all the
+// potential tracing points.
+void Monitor::AtraceMonitorLock(Thread* self, mirror::Object* obj, bool is_wait) {
+ if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging) && ATRACE_ENABLED())) {
+ AtraceMonitorLockImpl(self, obj, is_wait);
+ }
+}
+
+void Monitor::AtraceMonitorLockImpl(Thread* self, mirror::Object* obj, bool is_wait) {
+ // Wait() requires a deeper call stack to be useful. Otherwise you'll see "Waiting at
+ // Object.java". Assume that we'll wait a nontrivial amount, so it's OK to do a longer
+ // stack walk than if !is_wait.
+ NthCallerWithDexPcVisitor visitor(self, is_wait ? 1U : 0U);
+ visitor.WalkStack(false);
+ const char* prefix = is_wait ? "Waiting on " : "Locking ";
+
+ const char* filename;
+ int32_t line_number;
+ TranslateLocation(visitor.method_, visitor.dex_pc_, &filename, &line_number);
+
+ // It would be nice to have a stable "ID" for the object here. However, the only stable thing
+ // would be the identity hashcode. But we cannot use IdentityHashcode here: For one, there are
+ // times when it is unsafe to make that call (see stack dumping for an explanation). More
+ // importantly, we would have to give up on thin-locking when adding systrace locks, as the
+ // identity hashcode is stored in the lockword normally (so can't be used with thin-locks).
+ //
+ // Because of thin-locks we also cannot use the monitor id (as there is no monitor). Monitor ids
+ // also do not have to be stable, as the monitor may be deflated.
+ std::string tmp = StringPrintf("%s %d at %s:%d",
+ prefix,
+ (obj == nullptr ? -1 : static_cast<int32_t>(reinterpret_cast<uintptr_t>(obj))),
+ (filename != nullptr ? filename : "null"),
+ line_number);
+ ATRACE_BEGIN(tmp.c_str());
+}
+
+void Monitor::AtraceMonitorUnlock() {
+ if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) {
+ ATRACE_END();
+ }
+}
+
std::string Monitor::PrettyContentionInfo(const std::string& owner_name,
pid_t owner_tid,
ArtMethod* owners_method,
@@ -228,8 +307,8 @@
std::ostringstream oss;
oss << "monitor contention with owner " << owner_name << " (" << owner_tid << ")";
if (owners_method != nullptr) {
- oss << " owner method=" << PrettyMethod(owners_method);
- oss << " from " << owners_filename << ":" << owners_line_number;
+ oss << " at " << PrettyMethod(owners_method);
+ oss << "(" << owners_filename << ":" << owners_line_number << ")";
}
oss << " waiters=" << num_waiters;
return oss.str();
@@ -246,10 +325,10 @@
if (lock_profiling_threshold_ != 0) {
locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
}
- return;
+ break;
} else if (owner_ == self) { // Recursive.
lock_count_++;
- return;
+ break;
}
// Contended.
const bool log_contention = (lock_profiling_threshold_ != 0);
@@ -284,8 +363,9 @@
const char* filename;
int32_t line_number;
TranslateLocation(m, pc, &filename, &line_number);
- oss << " blocking from " << (filename != nullptr ? filename : "null")
- << ":" << line_number;
+ oss << " blocking from "
+ << PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null") << ":"
+ << line_number << ")";
ATRACE_BEGIN(oss.str().c_str());
}
monitor_contenders_.Wait(self); // Still contended so wait.
@@ -318,6 +398,8 @@
}
if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
if (wait_ms > kLongWaitMs && owners_method != nullptr) {
+ uint32_t pc;
+ ArtMethod* m = self->GetCurrentMethod(&pc);
// TODO: We should maybe check that original_owner is still a live thread.
LOG(WARNING) << "Long "
<< PrettyContentionInfo(original_owner_name,
@@ -325,7 +407,7 @@
owners_method,
owners_dex_pc,
num_waiters)
- << " for " << PrettyDuration(MsToNs(wait_ms));
+ << " in " << PrettyMethod(m) << " for " << PrettyDuration(MsToNs(wait_ms));
}
const char* owners_filename;
int32_t owners_line_number;
@@ -348,6 +430,8 @@
monitor_lock_.Lock(self); // Reacquire locks in order.
--num_waiters_;
}
+
+ AtraceMonitorLock(self, GetObject(), false /* is_wait */);
}
static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
@@ -457,6 +541,7 @@
}
if (owner == self) {
// We own the monitor, so nobody else can be in here.
+ AtraceMonitorUnlock();
if (lock_count_ == 0) {
owner_ = nullptr;
locking_method_ = nullptr;
@@ -523,6 +608,11 @@
uintptr_t saved_dex_pc = locking_dex_pc_;
locking_dex_pc_ = 0;
+ AtraceMonitorUnlock(); // For the implict Unlock() just above. This will only end the deepest
+ // nesting, but that is enough for the visualization, and corresponds to
+ // the single Lock() we do afterwards.
+ AtraceMonitorLock(self, GetObject(), true /* is_wait */);
+
bool was_interrupted = false;
{
// Update thread state. If the GC wakes up, it'll ignore us, knowing
@@ -586,6 +676,8 @@
self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
}
+ AtraceMonitorUnlock(); // End Wait().
+
// Re-acquire the monitor and lock.
Lock(self);
monitor_lock_.Lock(self);
@@ -775,6 +867,7 @@
case LockWord::kUnlocked: {
LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.ReadBarrierState()));
if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, thin_locked)) {
+ AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
// CasLockWord enforces more than the acquire ordering we need here.
return h_obj.Get(); // Success!
}
@@ -790,10 +883,12 @@
lock_word.ReadBarrierState()));
if (!kUseReadBarrier) {
h_obj->SetLockWord(thin_locked, true);
+ AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
return h_obj.Get(); // Success!
} else {
// Use CAS to preserve the read barrier state.
if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, thin_locked)) {
+ AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
return h_obj.Get(); // Success!
}
}
@@ -830,7 +925,7 @@
continue; // Start from the beginning.
default: {
LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
- return h_obj.Get();
+ UNREACHABLE();
}
}
}
@@ -869,11 +964,17 @@
if (!kUseReadBarrier) {
DCHECK_EQ(new_lw.ReadBarrierState(), 0U);
h_obj->SetLockWord(new_lw, true);
+ if (ATRACE_ENABLED()) {
+ ATRACE_END();
+ }
// Success!
return true;
} else {
// Use CAS to preserve the read barrier state.
if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, new_lw)) {
+ if (ATRACE_ENABLED()) {
+ ATRACE_END();
+ }
// Success!
return true;
}
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 8c7496b..7b4b8f9 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -250,6 +250,17 @@
uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
+ // Support for systrace output of monitor operations.
+ ALWAYS_INLINE static void AtraceMonitorLock(Thread* self,
+ mirror::Object* obj,
+ bool is_wait)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ static void AtraceMonitorLockImpl(Thread* self,
+ mirror::Object* obj,
+ bool is_wait)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE static void AtraceMonitorUnlock();
+
static uint32_t lock_profiling_threshold_;
Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc
index ce38e4f..a47a4b2 100644
--- a/runtime/monitor_pool.cc
+++ b/runtime/monitor_pool.cc
@@ -28,7 +28,11 @@
} // namespace mirror
MonitorPool::MonitorPool()
- : num_chunks_(0), capacity_(0), first_free_(nullptr) {
+ : current_chunk_list_index_(0), num_chunks_(0), current_chunk_list_capacity_(0),
+ first_free_(nullptr) {
+ for (size_t i = 0; i < kMaxChunkLists; ++i) {
+ monitor_chunks_[i] = nullptr; // Not absolutely required, but ...
+ }
AllocateChunk(); // Get our first chunk.
}
@@ -37,24 +41,19 @@
void MonitorPool::AllocateChunk() {
DCHECK(first_free_ == nullptr);
- // Do we need to resize?
- if (num_chunks_ == capacity_) {
- if (capacity_ == 0U) {
- // Initialization.
- capacity_ = kInitialChunkStorage;
- uintptr_t* new_backing = new uintptr_t[capacity_]();
- DCHECK(monitor_chunks_.LoadRelaxed() == nullptr);
- monitor_chunks_.StoreRelaxed(new_backing);
- } else {
- size_t new_capacity = 2 * capacity_;
- uintptr_t* new_backing = new uintptr_t[new_capacity]();
- uintptr_t* old_backing = monitor_chunks_.LoadRelaxed();
- memcpy(new_backing, old_backing, sizeof(uintptr_t) * capacity_);
- monitor_chunks_.StoreRelaxed(new_backing);
- capacity_ = new_capacity;
- old_chunk_arrays_.push_back(std::unique_ptr<uintptr_t[]>(old_backing));
- VLOG(monitor) << "Resizing to capacity " << capacity_;
- }
+ // Do we need to allocate another chunk list?
+ if (num_chunks_ == current_chunk_list_capacity_) {
+ if (current_chunk_list_capacity_ != 0U) {
+ ++current_chunk_list_index_;
+ CHECK_LT(current_chunk_list_index_, kMaxChunkLists) << "Out of space for inflated monitors";
+ VLOG(monitor) << "Expanding to capacity "
+ << 2 * ChunkListCapacity(current_chunk_list_index_) - kInitialChunkStorage;
+ } // else we're initializing
+ current_chunk_list_capacity_ = ChunkListCapacity(current_chunk_list_index_);
+ uintptr_t* new_list = new uintptr_t[current_chunk_list_capacity_]();
+ DCHECK(monitor_chunks_[current_chunk_list_index_] == nullptr);
+ monitor_chunks_[current_chunk_list_index_] = new_list;
+ num_chunks_ = 0;
}
// Allocate the chunk.
@@ -65,7 +64,7 @@
CHECK_EQ(0U, reinterpret_cast<uintptr_t>(chunk) % kMonitorAlignment);
// Add the chunk.
- *(monitor_chunks_.LoadRelaxed() + num_chunks_) = reinterpret_cast<uintptr_t>(chunk);
+ monitor_chunks_[current_chunk_list_index_][num_chunks_] = reinterpret_cast<uintptr_t>(chunk);
num_chunks_++;
// Set up the free list
@@ -73,8 +72,8 @@
(kChunkCapacity - 1) * kAlignedMonitorSize);
last->next_free_ = nullptr;
// Eagerly compute id.
- last->monitor_id_ = OffsetToMonitorId((num_chunks_ - 1) * kChunkSize +
- (kChunkCapacity - 1) * kAlignedMonitorSize);
+ last->monitor_id_ = OffsetToMonitorId(current_chunk_list_index_* (kMaxListSize * kChunkSize)
+ + (num_chunks_ - 1) * kChunkSize + (kChunkCapacity - 1) * kAlignedMonitorSize);
for (size_t i = 0; i < kChunkCapacity - 1; ++i) {
Monitor* before = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(last) -
kAlignedMonitorSize);
@@ -91,21 +90,19 @@
void MonitorPool::FreeInternal() {
// This is on shutdown with NO_THREAD_SAFETY_ANALYSIS, can't/don't need to lock.
- uintptr_t* backing = monitor_chunks_.LoadRelaxed();
- DCHECK(backing != nullptr);
- DCHECK_GT(capacity_, 0U);
- DCHECK_GT(num_chunks_, 0U);
-
- for (size_t i = 0; i < capacity_; ++i) {
- if (i < num_chunks_) {
- DCHECK_NE(backing[i], 0U);
- allocator_.deallocate(reinterpret_cast<uint8_t*>(backing[i]), kChunkSize);
- } else {
- DCHECK_EQ(backing[i], 0U);
+ DCHECK_NE(current_chunk_list_capacity_, 0UL);
+ for (size_t i = 0; i <= current_chunk_list_index_; ++i) {
+ DCHECK_NE(monitor_chunks_[i], static_cast<uintptr_t*>(nullptr));
+ for (size_t j = 0; j < ChunkListCapacity(i); ++j) {
+ if (i < current_chunk_list_index_ || j < num_chunks_) {
+ DCHECK_NE(monitor_chunks_[i][j], 0U);
+ allocator_.deallocate(reinterpret_cast<uint8_t*>(monitor_chunks_[i][j]), kChunkSize);
+ } else {
+ DCHECK_EQ(monitor_chunks_[i][j], 0U);
+ }
}
+ delete[] monitor_chunks_[i];
}
-
- delete[] backing;
}
Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj,
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 875b3fe..99810e0 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -128,12 +128,17 @@
void ReleaseMonitorToPool(Thread* self, Monitor* monitor);
void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors);
- // Note: This is safe as we do not ever move chunks.
+ // Note: This is safe as we do not ever move chunks. All needed entries in the monitor_chunks_
+ // data structure are read-only once we get here. Updates happen-before this call because
+ // the lock word was stored with release semantics and we read it with acquire semantics to
+ // retrieve the id.
Monitor* LookupMonitor(MonitorId mon_id) {
size_t offset = MonitorIdToOffset(mon_id);
size_t index = offset / kChunkSize;
+ size_t top_index = index / kMaxListSize;
+ size_t list_index = index % kMaxListSize;
size_t offset_in_chunk = offset % kChunkSize;
- uintptr_t base = *(monitor_chunks_.LoadRelaxed()+index);
+ uintptr_t base = monitor_chunks_[top_index][list_index];
return reinterpret_cast<Monitor*>(base + offset_in_chunk);
}
@@ -142,28 +147,37 @@
return base_addr <= mon_ptr && (mon_ptr - base_addr < kChunkSize);
}
- // Note: This is safe as we do not ever move chunks.
MonitorId ComputeMonitorIdInPool(Monitor* mon, Thread* self) {
MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
- for (size_t index = 0; index < num_chunks_; ++index) {
- uintptr_t chunk_addr = *(monitor_chunks_.LoadRelaxed() + index);
- if (IsInChunk(chunk_addr, mon)) {
- return OffsetToMonitorId(
- reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize);
+ for (size_t i = 0; i <= current_chunk_list_index_; ++i) {
+ for (size_t j = 0; j < ChunkListCapacity(i); ++j) {
+ if (j >= num_chunks_ && i == current_chunk_list_index_) {
+ break;
+ }
+ uintptr_t chunk_addr = monitor_chunks_[i][j];
+ if (IsInChunk(chunk_addr, mon)) {
+ return OffsetToMonitorId(
+ reinterpret_cast<uintptr_t>(mon) - chunk_addr
+ + i * (kMaxListSize * kChunkSize) + j * kChunkSize);
+ }
}
}
LOG(FATAL) << "Did not find chunk that contains monitor.";
return 0;
}
- static size_t MonitorIdToOffset(MonitorId id) {
+ static constexpr size_t MonitorIdToOffset(MonitorId id) {
return id << 3;
}
- static MonitorId OffsetToMonitorId(size_t offset) {
+ static constexpr MonitorId OffsetToMonitorId(size_t offset) {
return static_cast<MonitorId>(offset >> 3);
}
+ static constexpr size_t ChunkListCapacity(size_t index) {
+ return kInitialChunkStorage << index;
+ }
+
// TODO: There are assumptions in the code that monitor addresses are 8B aligned (>>3).
static constexpr size_t kMonitorAlignment = 8;
// Size of a monitor, rounded up to a multiple of alignment.
@@ -174,20 +188,47 @@
// Chunk size that is referenced in the id. We can collapse this to the actually used storage
// in a chunk, i.e., kChunkCapacity * kAlignedMonitorSize, but this will mean proper divisions.
static constexpr size_t kChunkSize = kPageSize;
- // The number of initial chunks storable in monitor_chunks_. The number is large enough to make
- // resizing unlikely, but small enough to not waste too much memory.
- static constexpr size_t kInitialChunkStorage = 8U;
+ static_assert(IsPowerOfTwo(kChunkSize), "kChunkSize must be power of 2");
+ // The number of chunks of storage that can be referenced by the initial chunk list.
+ // The total number of usable monitor chunks is typically 255 times this number, so it
+ // should be large enough that we don't run out. We run out of address bits if it's > 512.
+ // Currently we set it a bit smaller, to save half a page per process. We make it tiny in
+ // debug builds to catch growth errors. The only value we really expect to tune.
+ static constexpr size_t kInitialChunkStorage = kIsDebugBuild ? 1U : 256U;
+ static_assert(IsPowerOfTwo(kInitialChunkStorage), "kInitialChunkStorage must be power of 2");
+ // The number of lists, each containing pointers to storage chunks.
+ static constexpr size_t kMaxChunkLists = 8; // Dictated by 3 bit index. Don't increase above 8.
+ static_assert(IsPowerOfTwo(kMaxChunkLists), "kMaxChunkLists must be power of 2");
+ static constexpr size_t kMaxListSize = kInitialChunkStorage << (kMaxChunkLists - 1);
+ // We lose 3 bits in monitor id due to 3 bit monitor_chunks_ index, and gain it back from
+ // the 3 bit alignment constraint on monitors:
+ static_assert(kMaxListSize * kChunkSize < (1 << LockWord::kMonitorIdSize),
+ "Monitor id bits don't fit");
+ static_assert(IsPowerOfTwo(kMaxListSize), "kMaxListSize must be power of 2");
- // List of memory chunks. Each chunk is kChunkSize.
- Atomic<uintptr_t*> monitor_chunks_;
- // Number of chunks stored.
+ // Array of pointers to lists (again arrays) of pointers to chunks containing monitors.
+ // Zeroth entry points to a list (array) of kInitialChunkStorage pointers to chunks.
+ // Each subsequent list as twice as large as the preceding one.
+ // Monitor Ids are interpreted as follows:
+ // Top 3 bits (of 28): index into monitor_chunks_.
+ // Next 16 bits: index into the chunk list, i.e. monitor_chunks_[i].
+ // Last 9 bits: offset within chunk, expressed as multiple of kMonitorAlignment.
+ // If we set kInitialChunkStorage to 512, this would allow us to use roughly 128K chunks of
+ // monitors, which is 0.5GB of monitors. With this maximum setting, the largest chunk list
+ // contains 64K entries, and we make full use of the available index space. With a
+ // kInitialChunkStorage value of 256, this is proportionately reduced to 0.25GB of monitors.
+ // Updates to monitor_chunks_ are guarded by allocated_monitor_ids_lock_ .
+ // No field in this entire data structure is ever updated once a monitor id whose lookup
+ // requires it has been made visible to another thread. Thus readers never race with
+ // updates, in spite of the fact that they acquire no locks.
+ uintptr_t* monitor_chunks_[kMaxChunkLists]; // uintptr_t is really a Monitor* .
+ // Highest currently used index in monitor_chunks_ . Used for newly allocated chunks.
+ size_t current_chunk_list_index_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
+ // Number of chunk pointers stored in monitor_chunks_[current_chunk_list_index_] so far.
size_t num_chunks_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
- // Number of chunks storable.
- size_t capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
-
- // To avoid race issues when resizing, we keep all the previous arrays.
- std::vector<std::unique_ptr<uintptr_t[]>> old_chunk_arrays_
- GUARDED_BY(Locks::allocated_monitor_ids_lock_);
+ // After the initial allocation, this is always equal to
+ // ChunkListCapacity(current_chunk_list_index_).
+ size_t current_chunk_list_capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
typedef TrackingAllocator<uint8_t, kAllocatorTagMonitorPool> Allocator;
Allocator allocator_;
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index f355c2a..5ba8df7 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -212,6 +212,10 @@
Runtime::Current()->GetHeap()->RegisterNativeAllocation(env, static_cast<size_t>(bytes));
}
+static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) {
+ Runtime::Current()->RegisterSensitiveThread();
+}
+
static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) {
if (UNLIKELY(bytes < 0)) {
ScopedObjectAccess soa(env);
@@ -643,6 +647,7 @@
NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"),
NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"),
+ NATIVE_METHOD(VMRuntime, registerSensitiveThread, "()V"),
NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"),
NATIVE_METHOD(VMRuntime, requestConcurrentGC, "()V"),
NATIVE_METHOD(VMRuntime, requestHeapTrim, "()V"),
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 6b7ca40..0624da3 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -238,12 +238,13 @@
DCHECK(name != nullptr);
DCHECK(self != nullptr);
- StackHandleScope<1> hs(self);
+ StackHandleScope<2> hs(self);
MutableHandle<mirror::Class> h_clazz(hs.NewHandle(clazz));
+ Handle<mirror::String> h_name(hs.NewHandle(name));
// We search the current class, its direct interfaces then its superclass.
while (h_clazz.Get() != nullptr) {
- mirror::Field* result = GetDeclaredField(self, h_clazz.Get(), name);
+ mirror::Field* result = GetDeclaredField(self, h_clazz.Get(), h_name.Get());
if ((result != nullptr) && (result->GetAccessFlags() & kAccPublic)) {
return result;
} else if (UNLIKELY(self->IsExceptionPending())) {
@@ -258,7 +259,7 @@
self->AssertPendingException();
return nullptr;
}
- result = GetPublicFieldRecursive(self, iface, name);
+ result = GetPublicFieldRecursive(self, iface, h_name.Get());
if (result != nullptr) {
DCHECK(result->GetAccessFlags() & kAccPublic);
return result;
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 11a9d76..9470624 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -370,6 +370,10 @@
return lookup_table_data_;
}
+ const uint8_t* GetDexFilePointer() const {
+ return dex_file_pointer_;
+ }
+
~OatDexFile();
private:
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 3f95772..a508e87 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -179,11 +179,38 @@
return HasOriginalDexFiles() ? kDex2OatNeeded : kNoDexOptNeeded;
}
+// Figure out the currently specified compile filter option in the runtime.
+// Returns true on success, false if the compiler filter is invalid, in which
+// case error_msg describes the problem.
+static bool GetRuntimeCompilerFilterOption(CompilerFilter::Filter* filter,
+ std::string* error_msg) {
+ CHECK(filter != nullptr);
+ CHECK(error_msg != nullptr);
+
+ *filter = CompilerFilter::kDefaultCompilerFilter;
+ for (StringPiece option : Runtime::Current()->GetCompilerOptions()) {
+ if (option.starts_with("--compiler-filter=")) {
+ const char* compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
+ if (!CompilerFilter::ParseCompilerFilter(compiler_filter_string, filter)) {
+ *error_msg = std::string("Unknown --compiler-filter value: ")
+ + std::string(compiler_filter_string);
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
OatFileAssistant::ResultOfAttemptToUpdate
-OatFileAssistant::MakeUpToDate(CompilerFilter::Filter target, std::string* error_msg) {
+OatFileAssistant::MakeUpToDate(std::string* error_msg) {
+ CompilerFilter::Filter target;
+ if (!GetRuntimeCompilerFilterOption(&target, error_msg)) {
+ return kUpdateNotAttempted;
+ }
+
switch (GetDexOptNeeded(target)) {
case kNoDexOptNeeded: return kUpdateSucceeded;
- case kDex2OatNeeded: return GenerateOatFile(target, error_msg);
+ case kDex2OatNeeded: return GenerateOatFile(error_msg);
case kPatchOatNeeded: return RelocateOatFile(OdexFileName(), error_msg);
case kSelfPatchOatNeeded: return RelocateOatFile(OatFileName(), error_msg);
}
@@ -634,7 +661,7 @@
}
OatFileAssistant::ResultOfAttemptToUpdate
-OatFileAssistant::GenerateOatFile(CompilerFilter::Filter target, std::string* error_msg) {
+OatFileAssistant::GenerateOatFile(std::string* error_msg) {
CHECK(error_msg != nullptr);
Runtime* runtime = Runtime::Current();
@@ -678,7 +705,6 @@
args.push_back("--dex-file=" + dex_location_);
args.push_back("--oat-fd=" + std::to_string(oat_file->Fd()));
args.push_back("--oat-location=" + oat_file_name);
- args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(target));
if (!Dex2Oat(args, error_msg)) {
// Manually delete the file. This ensures there is no garbage left over if
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index d3228de..85f4a47 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -159,15 +159,12 @@
};
// Attempts to generate or relocate the oat file as needed to make it up to
- // date with in a way that is at least as good as an oat file generated with
- // the given compiler filter.
- // Returns the result of attempting to update the code.
+ // date based on the current runtime and compiler options.
//
// If the result is not kUpdateSucceeded, the value of error_msg will be set
// to a string describing why there was a failure or the update was not
// attempted. error_msg must not be null.
- ResultOfAttemptToUpdate MakeUpToDate(CompilerFilter::Filter target_compiler_filter,
- std::string* error_msg);
+ ResultOfAttemptToUpdate MakeUpToDate(std::string* error_msg);
// Returns an oat file that can be used for loading dex files.
// Returns null if no suitable oat file was found.
@@ -250,14 +247,15 @@
// attempted. error_msg must not be null.
ResultOfAttemptToUpdate RelocateOatFile(const std::string* input_file, std::string* error_msg);
- // Generate the oat file from the dex file using the given compiler filter.
+ // Generate the oat file from the dex file using the current runtime
+ // compiler options.
// This does not check the current status before attempting to generate the
// oat file.
//
// If the result is not kUpdateSucceeded, the value of error_msg will be set
// to a string describing why there was a failure or the update was not
// attempted. error_msg must not be null.
- ResultOfAttemptToUpdate GenerateOatFile(CompilerFilter::Filter filter, std::string* error_msg);
+ ResultOfAttemptToUpdate GenerateOatFile(std::string* error_msg);
// Executes dex2oat using the current runtime configuration overridden with
// the given arguments. This does not check to see if dex2oat is enabled in
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index f50d1cb..764b969 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -453,8 +453,7 @@
// Trying to make the oat file up to date should not fail or crash.
std::string error_msg;
- EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg));
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, oat_file_assistant.MakeUpToDate(&error_msg));
// Trying to get the best oat file should fail, but not crash.
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
@@ -705,8 +704,9 @@
// Make the oat file up to date.
std::string error_msg;
+ Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
ASSERT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
+ oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -768,8 +768,9 @@
// Make the oat file up to date.
std::string error_msg;
+ Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
ASSERT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
+ oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -825,8 +826,9 @@
// Make the oat file up to date. This should have no effect.
std::string error_msg;
+ Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
+ oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -876,8 +878,9 @@
// Make the oat file up to date.
std::string error_msg;
+ Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
ASSERT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
+ oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -920,8 +923,9 @@
// Make the oat file up to date.
std::string error_msg;
+ Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
ASSERT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
+ oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -1100,8 +1104,9 @@
OatFileAssistant oat_file_assistant(
dex_location.c_str(), oat_location.c_str(), kRuntimeISA, false, true);
std::string error_msg;
+ Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
ASSERT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
+ oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
@@ -1131,8 +1136,9 @@
OatFileAssistant oat_file_assistant(
dex_location.c_str(), oat_location.c_str(), kRuntimeISA, false, true);
std::string error_msg;
+ Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
ASSERT_EQ(OatFileAssistant::kUpdateNotAttempted,
- oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg));
+ oat_file_assistant.MakeUpToDate(&error_msg));
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() == nullptr);
@@ -1147,8 +1153,9 @@
OatFileAssistant oat_file_assistant(
dex_location.c_str(), oat_location.c_str(), kRuntimeISA, false, true);
std::string error_msg;
+ Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
EXPECT_EQ(OatFileAssistant::kUpdateNotAttempted,
- oat_file_assistant.GenerateOatFile(CompilerFilter::kSpeed, &error_msg));
+ oat_file_assistant.GenerateOatFile(&error_msg));
}
// Turn an absolute path into a path relative to the current working
@@ -1227,8 +1234,9 @@
// Trying to make it up to date should have no effect.
std::string error_msg;
+ Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
- oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg));
+ oat_file_assistant.MakeUpToDate(&error_msg));
EXPECT_TRUE(error_msg.empty());
}
@@ -1368,6 +1376,34 @@
EXPECT_EQ(2u, dex_files.size());
}
+TEST_F(OatFileAssistantTest, RuntimeCompilerFilterOptionUsed) {
+ std::string dex_location = GetScratchDir() + "/RuntimeCompilerFilterOptionUsed.jar";
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false, false);
+
+ std::string error_msg;
+ Runtime::Current()->AddCompilerOption("--compiler-filter=interpret-only");
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
+ oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+ EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly));
+ EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+
+ Runtime::Current()->AddCompilerOption("--compiler-filter=speed");
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
+ oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+ EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly));
+ EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+
+ Runtime::Current()->AddCompilerOption("--compiler-filter=bogus");
+ EXPECT_EQ(OatFileAssistant::kUpdateNotAttempted,
+ oat_file_assistant.MakeUpToDate(&error_msg));
+}
+
TEST(OatFileAssistantUtilsTest, DexFilenameToOdexFilename) {
std::string error_msg;
std::string odex_file;
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 9ab0072..bc01da4 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -44,8 +44,6 @@
// If true, then we attempt to load the application image if it exists.
static constexpr bool kEnableAppImage = true;
-CompilerFilter::Filter OatFileManager::filter_ = CompilerFilter::Filter::kSpeed;
-
const OatFile* OatFileManager::RegisterOatFile(std::unique_ptr<const OatFile> oat_file) {
WriterMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
DCHECK(oat_file != nullptr);
@@ -341,9 +339,10 @@
const OatFile* source_oat_file = nullptr;
- // Update the oat file on disk if we can. This may fail, but that's okay.
- // Best effort is all that matters here.
- switch (oat_file_assistant.MakeUpToDate(filter_, /*out*/ &error_msg)) {
+ // Update the oat file on disk if we can, based on the --compiler-filter
+ // option derived from the current runtime options.
+ // This may fail, but that's okay. Best effort is all that matters here.
+ switch (oat_file_assistant.MakeUpToDate(/*out*/ &error_msg)) {
case OatFileAssistant::kUpdateFailed:
LOG(WARNING) << error_msg;
break;
diff --git a/runtime/oat_file_manager.h b/runtime/oat_file_manager.h
index f98102e..7017dfc 100644
--- a/runtime/oat_file_manager.h
+++ b/runtime/oat_file_manager.h
@@ -25,7 +25,6 @@
#include "base/macros.h"
#include "base/mutex.h"
-#include "compiler_filter.h"
#include "jni.h"
namespace art {
@@ -116,10 +115,6 @@
void DumpForSigQuit(std::ostream& os);
- static void SetCompilerFilter(CompilerFilter::Filter filter) {
- filter_ = filter;
- }
-
private:
// Check for duplicate class definitions of the given oat file against all open oat files.
// Return true if there are any class definition collisions in the oat_file.
@@ -133,9 +128,6 @@
std::unordered_map<std::string, size_t> oat_file_count_ GUARDED_BY(Locks::oat_file_count_lock_);
bool have_non_pic_oat_file_;
- // The compiler filter used for oat files loaded by the oat file manager.
- static CompilerFilter::Filter filter_;
-
DISALLOW_COPY_AND_ASSIGN(OatFileManager);
};
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index c8d4291..eac5b43 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -153,7 +153,7 @@
.Define("-Xusejit:_")
.WithType<bool>()
.WithValueMap({{"false", false}, {"true", true}})
- .IntoKey(M::UseJIT)
+ .IntoKey(M::UseJitCompilation)
.Define("-Xjitinitialsize:_")
.WithType<MemoryKiB>()
.IntoKey(M::JITCodeCacheInitialCapacity)
@@ -172,6 +172,9 @@
.Define("-Xjitprithreadweight:_")
.WithType<unsigned int>()
.IntoKey(M::JITPriorityThreadWeight)
+ .Define("-Xjittransitionweight:_")
+ .WithType<unsigned int>()
+ .IntoKey(M::JITInvokeTransitionWeight)
.Define("-Xjitsaveprofilinginfo")
.WithValue(true)
.IntoKey(M::JITSaveProfilingInfo)
@@ -289,9 +292,6 @@
.IntoKey(M::Experimental)
.Define("-Xforce-nb-testing")
.IntoKey(M::ForceNativeBridge)
- .Define("-XOatFileManagerCompilerFilter:_")
- .WithType<std::string>()
- .IntoKey(M::OatFileManagerCompilerFilter)
.Ignore({
"-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
@@ -471,6 +471,11 @@
LOG(INFO) << "setting boot class path to " << *args.Get(M::BootClassPath);
}
+ if (args.GetOrDefault(M::UseJitCompilation) && args.GetOrDefault(M::Interpret)) {
+ Usage("-Xusejit:true and -Xint cannot be specified together");
+ Exit(0);
+ }
+
// Set a default boot class path if we didn't get an explicit one via command line.
if (getenv("BOOTCLASSPATH") != nullptr) {
args.SetIfMissing(M::BootClassPath, std::string(getenv("BOOTCLASSPATH")));
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index c7ccee2..1dea562 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -434,7 +434,7 @@
bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
InlineMethod* result) {
DCHECK(verifier != nullptr);
- if (!Runtime::Current()->UseJit()) {
+ if (!Runtime::Current()->UseJitCompilation()) {
DCHECK_EQ(verifier->CanLoadClasses(), result != nullptr);
}
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index a785ecb..a3e1f00 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -509,7 +509,7 @@
// Compiled code made an explicit deoptimization.
ArtMethod* deopt_method = visitor.GetSingleFrameDeoptMethod();
DCHECK(deopt_method != nullptr);
- if (Runtime::Current()->UseJit()) {
+ if (Runtime::Current()->UseJitCompilation()) {
Runtime::Current()->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor(
deopt_method, visitor.GetSingleFrameDeoptQuickMethodHeader());
} else {
@@ -611,7 +611,7 @@
// Prints out methods with their type of frame.
class DumpFramesWithTypeStackVisitor FINAL : public StackVisitor {
public:
- DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false)
+ explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false)
SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
show_details_(show_details) {}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a4d31ef..45ba7d0 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -60,7 +60,6 @@
#include "base/unix_file/fd_file.h"
#include "class_linker-inl.h"
#include "compiler_callbacks.h"
-#include "compiler_filter.h"
#include "debugger.h"
#include "elf_file.h"
#include "entrypoints/runtime_asm_entrypoints.h"
@@ -558,15 +557,21 @@
started_ = true;
- if (jit_options_->UseJIT()) {
+ // Create the JIT either if we have to use JIT compilation or save profiling info.
+ // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
+ // recoding profiles. Maybe we should consider changing the name to be more clear it's
+ // not only about compiling. b/28295073.
+ if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
std::string error_msg;
if (!IsZygote()) {
// If we are the zygote then we need to wait until after forking to create the code cache
// due to SELinux restrictions on r/w/x memory regions.
CreateJit();
- } else if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
- // Try to load compiler pre zygote to reduce PSS. b/27744947
- LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
+ } else if (jit_options_->UseJitCompilation()) {
+ if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
+ // Try to load compiler pre zygote to reduce PSS. b/27744947
+ LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
+ }
}
}
@@ -713,7 +718,11 @@
// before fork aren't attributed to an app.
heap_->ResetGcPerformanceInfo();
- if (!is_system_server && !safe_mode_ && jit_options_->UseJIT() && jit_.get() == nullptr) {
+
+ if (!is_system_server &&
+ !safe_mode_ &&
+ (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) &&
+ jit_.get() == nullptr) {
// Note that when running ART standalone (not zygote, nor zygote fork),
// the jit may have already been created.
CreateJit();
@@ -957,16 +966,6 @@
experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental);
is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode);
- {
- CompilerFilter::Filter filter;
- std::string filter_str = runtime_options.GetOrDefault(Opt::OatFileManagerCompilerFilter);
- if (!CompilerFilter::ParseCompilerFilter(filter_str.c_str(), &filter)) {
- LOG(ERROR) << "Cannot parse compiler filter " << filter_str;
- return false;
- }
- OatFileManager::SetCompilerFilter(filter);
- }
-
XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
@@ -1016,7 +1015,8 @@
// this case.
// If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
// null and we don't create the jit.
- jit_options_->SetUseJIT(false);
+ jit_options_->SetUseJitCompilation(false);
+ jit_options_->SetSaveProfilingInfo(false);
}
// Allocate a global table of boxed lambda objects <-> closures.
@@ -1613,18 +1613,19 @@
}
}
-static ImtConflictTable::Entry empty_entry = { nullptr, nullptr };
-
ArtMethod* Runtime::CreateImtConflictMethod(LinearAlloc* linear_alloc) {
- auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod(linear_alloc);
+ ClassLinker* const class_linker = GetClassLinker();
+ ArtMethod* method = class_linker->CreateRuntimeMethod(linear_alloc);
// When compiling, the code pointer will get set later when the image is loaded.
+ const size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
if (IsAotCompiler()) {
- size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
- method->SetImtConflictTable(reinterpret_cast<ImtConflictTable*>(&empty_entry));
}
+ // Create empty conflict table.
+ method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc),
+ pointer_size);
return method;
}
@@ -1632,9 +1633,6 @@
CHECK(method != nullptr);
CHECK(method->IsRuntimeMethod());
imt_conflict_method_ = method;
- if (!IsAotCompiler()) {
- method->SetImtConflictTable(reinterpret_cast<ImtConflictTable*>(&empty_entry));
- }
}
ArtMethod* Runtime::CreateResolutionMethod() {
@@ -1915,9 +1913,8 @@
void Runtime::CreateJit() {
CHECK(!IsAotCompiler());
- if (GetInstrumentation()->IsForcedInterpretOnly()) {
- // Don't create JIT if forced interpret only.
- return;
+ if (kIsDebugBuild && GetInstrumentation()->IsForcedInterpretOnly()) {
+ DCHECK(!jit_options_->UseJitCompilation());
}
std::string error_msg;
jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
@@ -1944,8 +1941,20 @@
CHECK(method != nullptr);
CHECK(method->IsRuntimeMethod());
imt_unimplemented_method_ = method;
- if (!IsAotCompiler()) {
- method->SetImtConflictTable(reinterpret_cast<ImtConflictTable*>(&empty_entry));
+}
+
+void Runtime::FixupConflictTables() {
+ // We can only do this after the class linker is created.
+ const size_t pointer_size = GetClassLinker()->GetImagePointerSize();
+ if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) {
+ imt_unimplemented_method_->SetImtConflictTable(
+ ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
+ pointer_size);
+ }
+ if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) {
+ imt_conflict_method_->SetImtConflictTable(
+ ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
+ pointer_size);
}
}
@@ -1981,4 +1990,18 @@
GetHeap()->UpdateProcessState(old_process_state, process_state);
}
+void Runtime::RegisterSensitiveThread() const {
+ Thread::SetJitSensitiveThread();
+}
+
+// Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
+bool Runtime::UseJitCompilation() const {
+ return (jit_ != nullptr) && jit_->UseJitCompilation();
+}
+
+// Returns true if profile saving is enabled. GetJit() will be not null in this case.
+bool Runtime::SaveProfileInfo() const {
+ return (jit_ != nullptr) && jit_->SaveProfilingInfo();
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index ae25dd1..1394462 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -127,7 +127,7 @@
// IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
bool IsAotCompiler() const {
- return !UseJit() && IsCompiler();
+ return !UseJitCompilation() && IsCompiler();
}
// IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
@@ -383,6 +383,7 @@
return imt_conflict_method_ != nullptr;
}
+ void FixupConflictTables();
void SetImtConflictMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
void SetImtUnimplementedMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -451,9 +452,11 @@
jit::Jit* GetJit() {
return jit_.get();
}
- bool UseJit() const {
- return jit_.get() != nullptr;
- }
+
+ // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
+ bool UseJitCompilation() const;
+ // Returns true if profile saving is enabled. GetJit() will be not null in this case.
+ bool SaveProfileInfo() const;
void PreZygoteFork();
bool InitZygote();
@@ -635,6 +638,8 @@
return process_state_ == kProcessStateJankPerceptible;
}
+ void RegisterSensitiveThread() const;
+
void SetZygoteNoThreadSection(bool val) {
zygote_no_threads_ = val;
}
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 6433c33..6db0cb2 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -66,12 +66,13 @@
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseTlab || kUseReadBarrier))
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
-RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
+RUNTIME_OPTIONS_KEY (bool, UseJitCompilation, false)
RUNTIME_OPTIONS_KEY (bool, DumpNativeStackOnSigQuit, true)
RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, JITOsrThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, JITPriorityThreadWeight)
+RUNTIME_OPTIONS_KEY (unsigned int, JITInvokeTransitionWeight)
RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheInitialCapacity, jit::JitCodeCache::kInitialCapacity)
RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheMaxCapacity, jit::JitCodeCache::kMaxCapacity)
RUNTIME_OPTIONS_KEY (bool, JITSaveProfilingInfo, false)
@@ -133,6 +134,5 @@
// We don't call abort(3) by default; see
// Runtime::Abort.
RUNTIME_OPTIONS_KEY (void (*)(), HookAbort, nullptr)
-RUNTIME_OPTIONS_KEY (std::string, OatFileManagerCompilerFilter, "speed")
#undef RUNTIME_OPTIONS_KEY
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 56ef5aa..a5ca527 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -637,8 +637,8 @@
// If we are the JIT then we may have just compiled the method after the
// IsQuickToInterpreterBridge check.
- jit::Jit* const jit = Runtime::Current()->GetJit();
- if (jit != nullptr && jit->GetCodeCache()->ContainsPc(code)) {
+ Runtime* runtime = Runtime::Current();
+ if (runtime->UseJitCompilation() && runtime->GetJit()->GetCodeCache()->ContainsPc(code)) {
return;
}
@@ -678,8 +678,10 @@
if (space->IsImageSpace()) {
auto* image_space = space->AsImageSpace();
const auto& header = image_space->GetImageHeader();
- const auto* methods = &header.GetMethodsSection();
- if (methods->Contains(reinterpret_cast<const uint8_t*>(method) - image_space->Begin())) {
+ const ImageSection& methods = header.GetMethodsSection();
+ const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
+ const size_t offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
+ if (methods.Contains(offset) || runtime_methods.Contains(offset)) {
in_image = true;
break;
}
@@ -948,7 +950,7 @@
}
}
-void LockCountData::AddMonitorInternal(Thread* self, mirror::Object* obj) {
+void LockCountData::AddMonitor(Thread* self, mirror::Object* obj) {
if (obj == nullptr) {
return;
}
@@ -965,7 +967,7 @@
monitors_->push_back(obj);
}
-void LockCountData::RemoveMonitorInternal(Thread* self, const mirror::Object* obj) {
+void LockCountData::RemoveMonitorOrThrow(Thread* self, const mirror::Object* obj) {
if (obj == nullptr) {
return;
}
@@ -998,7 +1000,7 @@
obj->MonitorExit(self);
}
-bool LockCountData::CheckAllMonitorsReleasedInternal(Thread* self) {
+bool LockCountData::CheckAllMonitorsReleasedOrThrow(Thread* self) {
DCHECK(self != nullptr);
if (monitors_ != nullptr) {
if (!monitors_->empty()) {
diff --git a/runtime/stack.h b/runtime/stack.h
index 7301184..e77ab46 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -80,39 +80,18 @@
public:
// Add the given object to the list of monitors, that is, objects that have been locked. This
// will not throw (but be skipped if there is an exception pending on entry).
- template <bool kLockCounting>
- void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(self != nullptr);
- if (!kLockCounting) {
- return;
- }
- AddMonitorInternal(self, obj);
- }
+ void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
// Try to remove the given object from the monitor list, indicating an unlock operation.
// This will throw an IllegalMonitorStateException (clearing any already pending exception), in
// case that there wasn't a lock recorded for the object.
- template <bool kLockCounting>
void RemoveMonitorOrThrow(Thread* self,
- const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(self != nullptr);
- if (!kLockCounting) {
- return;
- }
- RemoveMonitorInternal(self, obj);
- }
+ const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
// Check whether all acquired monitors have been released. This will potentially throw an
// IllegalMonitorStateException, clearing any already pending exception. Returns true if the
// check shows that everything is OK wrt/ lock counting, false otherwise.
- template <bool kLockCounting>
- bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(self != nullptr);
- if (!kLockCounting) {
- return true;
- }
- return CheckAllMonitorsReleasedInternal(self);
- }
+ bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
template <typename T, typename... Args>
void VisitMonitors(T visitor, Args&&... args) SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -125,12 +104,6 @@
}
private:
- // Internal implementations.
- void AddMonitorInternal(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
- void RemoveMonitorInternal(Thread* self, const mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool CheckAllMonitorsReleasedInternal(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
-
// Stores references to the locked-on objects. As noted, this should be visited during thread
// marking.
std::unique_ptr<std::vector<mirror::Object*>> monitors_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7922b60..fb24828 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -89,6 +89,7 @@
ConditionVariable* Thread::resume_cond_ = nullptr;
const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
bool (*Thread::is_sensitive_thread_hook_)() = nullptr;
+Thread* Thread::jit_sensitive_thread_ = nullptr;
static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild;
@@ -739,7 +740,7 @@
{
MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
if (runtime->IsShuttingDownLocked()) {
- LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
+ LOG(WARNING) << "Thread attaching while runtime is shutting down: " << thread_name;
return nullptr;
} else {
Runtime::Current()->StartThreadBirth();
@@ -3010,7 +3011,6 @@
return count;
}
-
void Thread::DeoptimizeWithDeoptimizationException(JValue* result) {
DCHECK_EQ(GetException(), Thread::GetDeoptimizationException());
ClearException();
@@ -3031,4 +3031,11 @@
interpreter::EnterInterpreterFromDeoptimize(this, shadow_frame, from_code, result);
}
+void Thread::SetException(mirror::Throwable* new_exception) {
+ CHECK(new_exception != nullptr);
+ // TODO: DCHECK(!IsExceptionPending());
+ tlsPtr_.exception = new_exception;
+ // LOG(ERROR) << new_exception->Dump();
+}
+
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index ed42e46..582a0cd 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -363,12 +363,7 @@
void AssertNoPendingException() const;
void AssertNoPendingExceptionForNewException(const char* msg) const;
- void SetException(mirror::Throwable* new_exception)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- CHECK(new_exception != nullptr);
- // TODO: DCHECK(!IsExceptionPending());
- tlsPtr_.exception = new_exception;
- }
+ void SetException(mirror::Throwable* new_exception) SHARED_REQUIRES(Locks::mutator_lock_);
void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) {
tlsPtr_.exception = nullptr;
@@ -1098,6 +1093,12 @@
return debug_disallow_read_barrier_;
}
+ // Returns true if the current thread is the jit sensitive thread.
+ bool IsJitSensitiveThread() const {
+ return this == jit_sensitive_thread_;
+ }
+
+ // Returns true if StrictMode events are traced for the current thread.
static bool IsSensitiveThread() {
if (is_sensitive_thread_hook_ != nullptr) {
return (*is_sensitive_thread_hook_)();
@@ -1180,6 +1181,16 @@
ALWAYS_INLINE void PassActiveSuspendBarriers()
REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
+ // Registers the current thread as the jit sensitive thread. Should be called just once.
+ static void SetJitSensitiveThread() {
+ if (jit_sensitive_thread_ == nullptr) {
+ jit_sensitive_thread_ = Thread::Current();
+ } else {
+ LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:"
+ << Thread::Current()->GetTid();
+ }
+ }
+
static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) {
is_sensitive_thread_hook_ = is_sensitive_thread_hook;
}
@@ -1229,6 +1240,8 @@
// Hook passed by framework which returns true
// when StrictMode events are traced for the current thread.
static bool (*is_sensitive_thread_hook_)();
+ // Stores the jit sensitive thread (which for now is the UI thread).
+ static Thread* jit_sensitive_thread_;
/***********************************************************************************************/
// Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index b629118..2b96328 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -401,8 +401,13 @@
method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother);
}
}
- if (method != nullptr && verifier.HasInstructionThatWillThrow()) {
- method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother);
+ if (method != nullptr) {
+ if (verifier.HasInstructionThatWillThrow()) {
+ method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother);
+ }
+ if ((verifier.encountered_failure_types_ & VerifyError::VERIFY_ERROR_LOCKING) != 0) {
+ method->SetAccessFlags(method->GetAccessFlags() | kAccMustCountLocks);
+ }
}
} else {
// Bad method data.
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 2bdf8d1..8619ff7 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -14,23 +14,22 @@
* limitations under the License.
*/
-#include <assert.h>
#include <iostream>
#include <pthread.h>
#include <stdio.h>
#include <vector>
+#include "art_method-inl.h"
+#include "base/logging.h"
#include "jni.h"
-#if defined(NDEBUG)
-#error test code compiled without NDEBUG
-#endif
+namespace art {
static JavaVM* jvm = nullptr;
extern "C" JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void*) {
- assert(vm != nullptr);
- assert(jvm == nullptr);
+ CHECK(vm != nullptr);
+ CHECK(jvm == nullptr);
jvm = vm;
std::cout << "JNI_OnLoad called" << std::endl;
return JNI_VERSION_1_6;
@@ -39,24 +38,24 @@
extern "C" JNIEXPORT void JNI_OnUnload(JavaVM*, void*) {
// std::cout since LOG(INFO) adds extra stuff like pid.
std::cout << "JNI_OnUnload called" << std::endl;
- // Clear jvm for assert in test 004-JniTest.
+ // Clear jvm for CHECK in test 004-JniTest.
jvm = nullptr;
}
static void* AttachHelper(void* arg) {
- assert(jvm != nullptr);
+ CHECK(jvm != nullptr);
JNIEnv* env = nullptr;
JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, nullptr };
int attach_result = jvm->AttachCurrentThread(&env, &args);
- assert(attach_result == 0);
+ CHECK_EQ(attach_result, 0);
typedef void (*Fn)(JNIEnv*);
Fn fn = reinterpret_cast<Fn>(arg);
fn(env);
int detach_result = jvm->DetachCurrentThread();
- assert(detach_result == 0);
+ CHECK_EQ(detach_result, 0);
return nullptr;
}
@@ -64,19 +63,19 @@
pthread_t pthread;
int pthread_create_result = pthread_create(&pthread, nullptr, AttachHelper,
reinterpret_cast<void*>(fn));
- assert(pthread_create_result == 0);
+ CHECK_EQ(pthread_create_result, 0);
int pthread_join_result = pthread_join(pthread, nullptr);
- assert(pthread_join_result == 0);
+ CHECK_EQ(pthread_join_result, 0);
}
static void testFindClassOnAttachedNativeThread(JNIEnv* env) {
jclass clazz = env->FindClass("Main");
- assert(clazz != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(clazz != nullptr);
+ CHECK(!env->ExceptionCheck());
jobjectArray array = env->NewObjectArray(0, clazz, nullptr);
- assert(array != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(array != nullptr);
+ CHECK(!env->ExceptionCheck());
}
// http://b/10994325
@@ -86,12 +85,12 @@
static void testFindFieldOnAttachedNativeThread(JNIEnv* env) {
jclass clazz = env->FindClass("Main");
- assert(clazz != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(clazz != nullptr);
+ CHECK(!env->ExceptionCheck());
jfieldID field = env->GetStaticFieldID(clazz, "testFindFieldOnAttachedNativeThreadField", "Z");
- assert(field != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(field != nullptr);
+ CHECK(!env->ExceptionCheck());
env->SetStaticBooleanField(clazz, field, JNI_TRUE);
}
@@ -103,38 +102,38 @@
static void testReflectFieldGetFromAttachedNativeThread(JNIEnv* env) {
jclass clazz = env->FindClass("Main");
- assert(clazz != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(clazz != nullptr);
+ CHECK(!env->ExceptionCheck());
jclass class_clazz = env->FindClass("java/lang/Class");
- assert(class_clazz != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(class_clazz != nullptr);
+ CHECK(!env->ExceptionCheck());
jmethodID getFieldMetodId = env->GetMethodID(class_clazz, "getField",
"(Ljava/lang/String;)Ljava/lang/reflect/Field;");
- assert(getFieldMetodId != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(getFieldMetodId != nullptr);
+ CHECK(!env->ExceptionCheck());
jstring field_name = env->NewStringUTF("testReflectFieldGetFromAttachedNativeThreadField");
- assert(field_name != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(field_name != nullptr);
+ CHECK(!env->ExceptionCheck());
jobject field = env->CallObjectMethod(clazz, getFieldMetodId, field_name);
- assert(field != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(field != nullptr);
+ CHECK(!env->ExceptionCheck());
jclass field_clazz = env->FindClass("java/lang/reflect/Field");
- assert(field_clazz != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(field_clazz != nullptr);
+ CHECK(!env->ExceptionCheck());
jmethodID getBooleanMetodId = env->GetMethodID(field_clazz, "getBoolean",
"(Ljava/lang/Object;)Z");
- assert(getBooleanMetodId != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(getBooleanMetodId != nullptr);
+ CHECK(!env->ExceptionCheck());
jboolean value = env->CallBooleanMethod(field, getBooleanMetodId, /* ignored */ clazz);
- assert(value == false);
- assert(!env->ExceptionCheck());
+ CHECK(value == false);
+ CHECK(!env->ExceptionCheck());
}
// http://b/15539150
@@ -148,22 +147,22 @@
extern "C" JNIEXPORT void JNICALL Java_Main_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env,
jclass) {
jclass super_class = env->FindClass("Main$testCallStaticVoidMethodOnSubClass_SuperClass");
- assert(super_class != nullptr);
+ CHECK(super_class != nullptr);
jmethodID execute = env->GetStaticMethodID(super_class, "execute", "()V");
- assert(execute != nullptr);
+ CHECK(execute != nullptr);
jclass sub_class = env->FindClass("Main$testCallStaticVoidMethodOnSubClass_SubClass");
- assert(sub_class != nullptr);
+ CHECK(sub_class != nullptr);
env->CallStaticVoidMethod(sub_class, execute);
}
extern "C" JNIEXPORT jobject JNICALL Java_Main_testGetMirandaMethodNative(JNIEnv* env, jclass) {
jclass abstract_class = env->FindClass("Main$testGetMirandaMethod_MirandaAbstract");
- assert(abstract_class != nullptr);
+ CHECK(abstract_class != nullptr);
jmethodID miranda_method = env->GetMethodID(abstract_class, "inInterface", "()Z");
- assert(miranda_method != nullptr);
+ CHECK(miranda_method != nullptr);
return env->ToReflectedMethod(abstract_class, miranda_method, JNI_FALSE);
}
@@ -171,11 +170,11 @@
extern "C" void JNICALL Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass) {
std::vector<uint8_t> buffer(1);
jobject byte_buffer = env->NewDirectByteBuffer(&buffer[0], 0);
- assert(byte_buffer != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(byte_buffer != nullptr);
+ CHECK(!env->ExceptionCheck());
- assert(env->GetDirectBufferAddress(byte_buffer) == &buffer[0]);
- assert(env->GetDirectBufferCapacity(byte_buffer) == 0);
+ CHECK_EQ(env->GetDirectBufferAddress(byte_buffer), &buffer[0]);
+ CHECK_EQ(env->GetDirectBufferCapacity(byte_buffer), 0);
}
constexpr size_t kByteReturnSize = 7;
@@ -185,18 +184,18 @@
jbyte b3, jbyte b4, jbyte b5, jbyte b6,
jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
// We use b1 to drive the output.
- assert(b2 == 2);
- assert(b3 == -3);
- assert(b4 == 4);
- assert(b5 == -5);
- assert(b6 == 6);
- assert(b7 == -7);
- assert(b8 == 8);
- assert(b9 == -9);
- assert(b10 == 10);
+ CHECK_EQ(b2, 2);
+ CHECK_EQ(b3, -3);
+ CHECK_EQ(b4, 4);
+ CHECK_EQ(b5, -5);
+ CHECK_EQ(b6, 6);
+ CHECK_EQ(b7, -7);
+ CHECK_EQ(b8, 8);
+ CHECK_EQ(b9, -9);
+ CHECK_EQ(b10, 10);
- assert(0 <= b1);
- assert(b1 < static_cast<jbyte>(kByteReturnSize));
+ CHECK_LE(0, b1);
+ CHECK_LT(b1, static_cast<jbyte>(kByteReturnSize));
return byte_returns[b1];
}
@@ -210,18 +209,18 @@
jshort s3, jshort s4, jshort s5, jshort s6,
jshort s7, jshort s8, jshort s9, jshort s10) {
// We use s1 to drive the output.
- assert(s2 == 2);
- assert(s3 == -3);
- assert(s4 == 4);
- assert(s5 == -5);
- assert(s6 == 6);
- assert(s7 == -7);
- assert(s8 == 8);
- assert(s9 == -9);
- assert(s10 == 10);
+ CHECK_EQ(s2, 2);
+ CHECK_EQ(s3, -3);
+ CHECK_EQ(s4, 4);
+ CHECK_EQ(s5, -5);
+ CHECK_EQ(s6, 6);
+ CHECK_EQ(s7, -7);
+ CHECK_EQ(s8, 8);
+ CHECK_EQ(s9, -9);
+ CHECK_EQ(s10, 10);
- assert(0 <= s1);
- assert(s1 < static_cast<jshort>(kShortReturnSize));
+ CHECK_LE(0, s1);
+ CHECK_LT(s1, static_cast<jshort>(kShortReturnSize));
return short_returns[s1];
}
@@ -231,17 +230,17 @@
jboolean b5, jboolean b6, jboolean b7,
jboolean b8, jboolean b9, jboolean b10) {
// We use b1 to drive the output.
- assert(b2 == JNI_TRUE);
- assert(b3 == JNI_FALSE);
- assert(b4 == JNI_TRUE);
- assert(b5 == JNI_FALSE);
- assert(b6 == JNI_TRUE);
- assert(b7 == JNI_FALSE);
- assert(b8 == JNI_TRUE);
- assert(b9 == JNI_FALSE);
- assert(b10 == JNI_TRUE);
+ CHECK_EQ(b2, JNI_TRUE);
+ CHECK_EQ(b3, JNI_FALSE);
+ CHECK_EQ(b4, JNI_TRUE);
+ CHECK_EQ(b5, JNI_FALSE);
+ CHECK_EQ(b6, JNI_TRUE);
+ CHECK_EQ(b7, JNI_FALSE);
+ CHECK_EQ(b8, JNI_TRUE);
+ CHECK_EQ(b9, JNI_FALSE);
+ CHECK_EQ(b10, JNI_TRUE);
- assert(b1 == JNI_TRUE || b1 == JNI_FALSE);
+ CHECK(b1 == JNI_TRUE || b1 == JNI_FALSE);
return b1;
}
@@ -252,17 +251,17 @@
jchar c3, jchar c4, jchar c5, jchar c6, jchar c7,
jchar c8, jchar c9, jchar c10) {
// We use c1 to drive the output.
- assert(c2 == 'a');
- assert(c3 == 'b');
- assert(c4 == 'c');
- assert(c5 == '0');
- assert(c6 == '1');
- assert(c7 == '2');
- assert(c8 == 1234);
- assert(c9 == 2345);
- assert(c10 == 3456);
+ CHECK_EQ(c2, 'a');
+ CHECK_EQ(c3, 'b');
+ CHECK_EQ(c4, 'c');
+ CHECK_EQ(c5, '0');
+ CHECK_EQ(c6, '1');
+ CHECK_EQ(c7, '2');
+ CHECK_EQ(c8, 1234);
+ CHECK_EQ(c9, 2345);
+ CHECK_EQ(c10, 3456);
- assert(c1 < static_cast<jchar>(kCharReturnSize));
+ CHECK_LT(c1, static_cast<jchar>(kCharReturnSize));
return char_returns[c1];
}
@@ -281,39 +280,39 @@
// Test direct call.
{
jclass vmstack_clazz = env->FindClass("dalvik/system/VMStack");
- assert(vmstack_clazz != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(vmstack_clazz != nullptr);
+ CHECK(!env->ExceptionCheck());
jmethodID getCallingClassLoaderMethodId = env->GetStaticMethodID(vmstack_clazz,
"getCallingClassLoader",
"()Ljava/lang/ClassLoader;");
- assert(getCallingClassLoaderMethodId != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(getCallingClassLoaderMethodId != nullptr);
+ CHECK(!env->ExceptionCheck());
jobject class_loader = env->CallStaticObjectMethod(vmstack_clazz,
getCallingClassLoaderMethodId);
- assert(class_loader == nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(class_loader == nullptr);
+ CHECK(!env->ExceptionCheck());
}
// Test one-level call. Use System.loadLibrary().
{
jclass system_clazz = env->FindClass("java/lang/System");
- assert(system_clazz != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(system_clazz != nullptr);
+ CHECK(!env->ExceptionCheck());
jmethodID loadLibraryMethodId = env->GetStaticMethodID(system_clazz, "loadLibrary",
"(Ljava/lang/String;)V");
- assert(loadLibraryMethodId != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(loadLibraryMethodId != nullptr);
+ CHECK(!env->ExceptionCheck());
// Create a string object.
jobject library_string = env->NewStringUTF("non_existing_library");
- assert(library_string != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(library_string != nullptr);
+ CHECK(!env->ExceptionCheck());
env->CallStaticVoidMethod(system_clazz, loadLibraryMethodId, library_string);
- assert(env->ExceptionCheck());
+ CHECK(env->ExceptionCheck());
// We expect UnsatisfiedLinkError.
jthrowable thrown = env->ExceptionOccurred();
@@ -321,7 +320,7 @@
jclass unsatisfied_link_error_clazz = env->FindClass("java/lang/UnsatisfiedLinkError");
jclass thrown_class = env->GetObjectClass(thrown);
- assert(env->IsSameObject(unsatisfied_link_error_clazz, thrown_class));
+ CHECK(env->IsSameObject(unsatisfied_link_error_clazz, thrown_class));
}
}
@@ -333,31 +332,31 @@
static void testShallowGetStackClass2(JNIEnv* env) {
jclass vmstack_clazz = env->FindClass("dalvik/system/VMStack");
- assert(vmstack_clazz != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(vmstack_clazz != nullptr);
+ CHECK(!env->ExceptionCheck());
// Test direct call.
{
jmethodID getStackClass2MethodId = env->GetStaticMethodID(vmstack_clazz, "getStackClass2",
"()Ljava/lang/Class;");
- assert(getStackClass2MethodId != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(getStackClass2MethodId != nullptr);
+ CHECK(!env->ExceptionCheck());
jobject caller_class = env->CallStaticObjectMethod(vmstack_clazz, getStackClass2MethodId);
- assert(caller_class == nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(caller_class == nullptr);
+ CHECK(!env->ExceptionCheck());
}
// Test one-level call. Use VMStack.getStackClass1().
{
jmethodID getStackClass1MethodId = env->GetStaticMethodID(vmstack_clazz, "getStackClass1",
"()Ljava/lang/Class;");
- assert(getStackClass1MethodId != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(getStackClass1MethodId != nullptr);
+ CHECK(!env->ExceptionCheck());
jobject caller_class = env->CallStaticObjectMethod(vmstack_clazz, getStackClass1MethodId);
- assert(caller_class == nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(caller_class == nullptr);
+ CHECK(!env->ExceptionCheck());
}
// For better testing we would need to compile against libcore and have a two-deep stack
@@ -416,8 +415,8 @@
env_->ExceptionDescribe();
env_->FatalError(__FUNCTION__);
}
- assert(!env_->ExceptionCheck());
- assert(c != nullptr);
+ CHECK(!env_->ExceptionCheck());
+ CHECK(c != nullptr);
return c;
}
@@ -429,7 +428,7 @@
env_->ExceptionDescribe();
env_->FatalError(__FUNCTION__);
}
- assert(m != nullptr);
+ CHECK(m != nullptr);
return m;
}
@@ -439,7 +438,7 @@
env_->ExceptionDescribe();
env_->FatalError(__FUNCTION__);
}
- assert(o != nullptr);
+ CHECK(o != nullptr);
return o;
}
@@ -467,7 +466,7 @@
env_->ExceptionDescribe();
env_->FatalError(__FUNCTION__);
}
- assert(m != nullptr);
+ CHECK(m != nullptr);
return m;
}
@@ -508,21 +507,21 @@
jobject sub_super = CallConstructor(sub_, super_constructor_);
jobject sub_sub = CallConstructor(sub_, sub_constructor_);
- assert(env_->IsInstanceOf(super_super, super_));
- assert(!env_->IsInstanceOf(super_super, sub_));
+ CHECK(env_->IsInstanceOf(super_super, super_));
+ CHECK(!env_->IsInstanceOf(super_super, sub_));
// Note that even though we called (and ran) the subclass
// constructor, we are not the subclass.
- assert(env_->IsInstanceOf(super_sub, super_));
- assert(!env_->IsInstanceOf(super_sub, sub_));
+ CHECK(env_->IsInstanceOf(super_sub, super_));
+ CHECK(!env_->IsInstanceOf(super_sub, sub_));
// Note that even though we called the superclass constructor, we
// are still the subclass.
- assert(env_->IsInstanceOf(sub_super, super_));
- assert(env_->IsInstanceOf(sub_super, sub_));
+ CHECK(env_->IsInstanceOf(sub_super, super_));
+ CHECK(env_->IsInstanceOf(sub_super, sub_));
- assert(env_->IsInstanceOf(sub_sub, super_));
- assert(env_->IsInstanceOf(sub_sub, sub_));
+ CHECK(env_->IsInstanceOf(sub_sub, super_));
+ CHECK(env_->IsInstanceOf(sub_sub, sub_));
}
void TestnonstaticCallNonvirtualMethod(bool super_object, bool super_class, bool super_method, const char* test_case) {
@@ -542,8 +541,8 @@
CallMethod(o, c, m, true, test_case);
jboolean super_field = GetBooleanField(o, super_field_);
jboolean sub_field = GetBooleanField(o, sub_field_);
- assert(super_field == super_method);
- assert(sub_field != super_method);
+ CHECK_EQ(super_field, super_method);
+ CHECK_NE(sub_field, super_method);
}
void TestnonstaticCallNonvirtualMethod() {
@@ -565,20 +564,20 @@
extern "C" JNIEXPORT void JNICALL Java_Main_testNewStringObject(JNIEnv* env, jclass) {
jclass c = env->FindClass("java/lang/String");
- assert(c != nullptr);
+ CHECK(c != nullptr);
jmethodID mid1 = env->GetMethodID(c, "<init>", "()V");
- assert(mid1 != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(mid1 != nullptr);
+ CHECK(!env->ExceptionCheck());
jmethodID mid2 = env->GetMethodID(c, "<init>", "([B)V");
- assert(mid2 != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(mid2 != nullptr);
+ CHECK(!env->ExceptionCheck());
jmethodID mid3 = env->GetMethodID(c, "<init>", "([C)V");
- assert(mid3 != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(mid3 != nullptr);
+ CHECK(!env->ExceptionCheck());
jmethodID mid4 = env->GetMethodID(c, "<init>", "(Ljava/lang/String;)V");
- assert(mid4 != nullptr);
- assert(!env->ExceptionCheck());
+ CHECK(mid4 != nullptr);
+ CHECK(!env->ExceptionCheck());
const char* test_array = "Test";
int byte_array_length = strlen(test_array);
@@ -587,22 +586,22 @@
// Test NewObject
jstring s = reinterpret_cast<jstring>(env->NewObject(c, mid2, byte_array));
- assert(s != nullptr);
- assert(env->GetStringLength(s) == byte_array_length);
- assert(env->GetStringUTFLength(s) == byte_array_length);
+ CHECK(s != nullptr);
+ CHECK_EQ(env->GetStringLength(s), byte_array_length);
+ CHECK_EQ(env->GetStringUTFLength(s), byte_array_length);
const char* chars = env->GetStringUTFChars(s, nullptr);
- assert(strcmp(test_array, chars) == 0);
+ CHECK_EQ(strcmp(test_array, chars), 0);
env->ReleaseStringUTFChars(s, chars);
// Test AllocObject and Call(Nonvirtual)VoidMethod
jstring s1 = reinterpret_cast<jstring>(env->AllocObject(c));
- assert(s1 != nullptr);
+ CHECK(s1 != nullptr);
jstring s2 = reinterpret_cast<jstring>(env->AllocObject(c));
- assert(s2 != nullptr);
+ CHECK(s2 != nullptr);
jstring s3 = reinterpret_cast<jstring>(env->AllocObject(c));
- assert(s3 != nullptr);
+ CHECK(s3 != nullptr);
jstring s4 = reinterpret_cast<jstring>(env->AllocObject(c));
- assert(s4 != nullptr);
+ CHECK(s4 != nullptr);
jcharArray char_array = env->NewCharArray(5);
jstring string_arg = env->NewStringUTF("helloworld");
@@ -621,18 +620,18 @@
// Test with global and weak global references
jstring s5 = reinterpret_cast<jstring>(env->AllocObject(c));
- assert(s5 != nullptr);
+ CHECK(s5 != nullptr);
s5 = reinterpret_cast<jstring>(env->NewGlobalRef(s5));
jstring s6 = reinterpret_cast<jstring>(env->AllocObject(c));
- assert(s6 != nullptr);
+ CHECK(s6 != nullptr);
s6 = reinterpret_cast<jstring>(env->NewWeakGlobalRef(s6));
env->CallVoidMethod(s5, mid1);
env->CallNonvirtualVoidMethod(s6, c, mid2, byte_array);
- assert(env->GetStringLength(s5) == 0);
- assert(env->GetStringLength(s6) == byte_array_length);
+ CHECK_EQ(env->GetStringLength(s5), 0);
+ CHECK_EQ(env->GetStringLength(s6), byte_array_length);
const char* chars6 = env->GetStringUTFChars(s6, nullptr);
- assert(strcmp(test_array, chars6) == 0);
+ CHECK_EQ(strcmp(test_array, chars6), 0);
env->ReleaseStringUTFChars(s6, chars6);
}
@@ -664,8 +663,8 @@
public:
explicit JniCallDefaultMethodsTest(JNIEnv* env)
: env_(env), concrete_class_(env_->FindClass("ConcreteClass")) {
- assert(!env_->ExceptionCheck());
- assert(concrete_class_ != nullptr);
+ CHECK(!env_->ExceptionCheck());
+ CHECK(concrete_class_ != nullptr);
}
void Test() {
@@ -688,14 +687,14 @@
void TestCalls(const char* declaring_class, std::vector<const char*> methods) {
jmethodID new_method = env_->GetMethodID(concrete_class_, "<init>", "()V");
jobject obj = env_->NewObject(concrete_class_, new_method);
- assert(!env_->ExceptionCheck());
- assert(obj != nullptr);
+ CHECK(!env_->ExceptionCheck());
+ CHECK(obj != nullptr);
jclass decl_class = env_->FindClass(declaring_class);
- assert(!env_->ExceptionCheck());
- assert(decl_class != nullptr);
+ CHECK(!env_->ExceptionCheck());
+ CHECK(decl_class != nullptr);
for (const char* method : methods) {
jmethodID method_id = env_->GetMethodID(decl_class, method, "()V");
- assert(!env_->ExceptionCheck());
+ CHECK(!env_->ExceptionCheck());
printf("Calling method %s->%s on object of type ConcreteClass\n", declaring_class, method);
env_->CallVoidMethod(obj, method_id);
if (env_->ExceptionCheck()) {
@@ -704,10 +703,10 @@
jmethodID to_string = env_->GetMethodID(
env_->FindClass("java/lang/Object"), "toString", "()Ljava/lang/String;");
jstring exception_string = (jstring) env_->CallObjectMethod(thrown, to_string);
- assert(!env_->ExceptionCheck());
+ CHECK(!env_->ExceptionCheck());
const char* exception_string_utf8 = env_->GetStringUTFChars(exception_string, nullptr);
- assert(!env_->ExceptionCheck());
- assert(exception_string_utf8 != nullptr);
+ CHECK(!env_->ExceptionCheck());
+ CHECK(exception_string_utf8 != nullptr);
printf("EXCEPTION OCCURED: %s\n", exception_string_utf8);
env_->ReleaseStringUTFChars(exception_string, exception_string_utf8);
}
@@ -724,12 +723,12 @@
static void InvokeSpecificMethod(JNIEnv* env, jobject obj, const char* method) {
jclass lambda_class = env->FindClass("LambdaInterface");
- assert(!env->ExceptionCheck());
- assert(lambda_class != nullptr);
+ CHECK(!env->ExceptionCheck());
+ CHECK(lambda_class != nullptr);
jmethodID method_id = env->GetMethodID(lambda_class, method, "()V");
- assert(!env->ExceptionCheck());
+ CHECK(!env->ExceptionCheck());
env->CallVoidMethod(obj, method_id);
- assert(!env->ExceptionCheck());
+ CHECK(!env->ExceptionCheck());
}
extern "C" JNIEXPORT void JNICALL Java_Main_testInvokeLambdaDefaultMethod(
@@ -740,3 +739,6 @@
extern "C" JNIEXPORT void JNICALL Java_Main_testInvokeLambdaMethod(JNIEnv* e, jclass, jobject l) {
InvokeSpecificMethod(e, l, "sayHi");
}
+
+} // namespace art
+
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 284e554..5304590 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "art_method-inl.h"
#include "check_reference_map_visitor.h"
#include "jni.h"
diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc
index 51bb68f..420224d 100644
--- a/test/004-StackWalk/stack_walk_jni.cc
+++ b/test/004-StackWalk/stack_walk_jni.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "art_method-inl.h"
#include "check_reference_map_visitor.h"
#include "jni.h"
diff --git a/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc b/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
index 54879fb..c9110a9 100644
--- a/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
+++ b/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <dlfcn.h>
#include <iostream>
#include "base/casts.h"
@@ -45,6 +46,10 @@
self->SetTopOfShadowStack(nullptr);
JavaVM* vm = down_cast<JNIEnvExt*>(env)->vm;
vm->DetachCurrentThread();
+ // Open ourself again to make sure the native library does not get unloaded from
+ // underneath us due to DestroyJavaVM. b/28406866
+ void* handle = dlopen(kIsDebugBuild ? "libarttestd.so" : "libarttest.so", RTLD_NOW);
+ CHECK(handle != nullptr);
vm->DestroyJavaVM();
vm_was_shutdown.store(true);
// Give threads some time to get stuck in ExceptionCheck.
diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java
index 15683b0..17a6049 100644
--- a/test/141-class-unload/src/Main.java
+++ b/test/141-class-unload/src/Main.java
@@ -23,6 +23,7 @@
public class Main {
static final String DEX_FILE = System.getenv("DEX_LOCATION") + "/141-class-unload-ex.jar";
+ static final String LIBRARY_SEARCH_PATH = System.getProperty("java.library.path");
static String nativeLibraryName;
public static void main(String[] args) throws Exception {
@@ -32,7 +33,7 @@
throw new AssertionError("Couldn't find path class loader class");
}
Constructor constructor =
- pathClassLoader.getDeclaredConstructor(String.class, ClassLoader.class);
+ pathClassLoader.getDeclaredConstructor(String.class, String.class, ClassLoader.class);
try {
testUnloadClass(constructor);
testUnloadLoader(constructor);
@@ -49,7 +50,7 @@
// Test that the oat files are unloaded.
testOatFilesUnloaded(getPid());
} catch (Exception e) {
- System.out.println(e);
+ e.printStackTrace();
}
}
@@ -118,7 +119,7 @@
private static void testNoUnloadInvoke(Constructor constructor) throws Exception {
WeakReference<ClassLoader> loader =
new WeakReference((ClassLoader) constructor.newInstance(
- DEX_FILE, ClassLoader.getSystemClassLoader()));
+ DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader()));
WeakReference<Class> intHolder = new WeakReference(loader.get().loadClass("IntHolder"));
intHolder.get().getDeclaredMethod("runGC").invoke(intHolder.get());
boolean isNull = loader.get() == null;
@@ -128,7 +129,7 @@
private static void testNoUnloadInstance(Constructor constructor) throws Exception {
WeakReference<ClassLoader> loader =
new WeakReference((ClassLoader) constructor.newInstance(
- DEX_FILE, ClassLoader.getSystemClassLoader()));
+ DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader()));
WeakReference<Class> intHolder = new WeakReference(loader.get().loadClass("IntHolder"));
Object o = intHolder.get().newInstance();
Runtime.getRuntime().gc();
@@ -138,7 +139,7 @@
private static WeakReference<Class> setUpUnloadClass(Constructor constructor) throws Exception {
ClassLoader loader = (ClassLoader) constructor.newInstance(
- DEX_FILE, ClassLoader.getSystemClassLoader());
+ DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader());
Class intHolder = loader.loadClass("IntHolder");
Method getValue = intHolder.getDeclaredMethod("getValue");
Method setValue = intHolder.getDeclaredMethod("setValue", Integer.TYPE);
@@ -155,7 +156,7 @@
boolean waitForCompilation)
throws Exception {
ClassLoader loader = (ClassLoader) constructor.newInstance(
- DEX_FILE, ClassLoader.getSystemClassLoader());
+ DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader());
Class intHolder = loader.loadClass("IntHolder");
Method setValue = intHolder.getDeclaredMethod("setValue", Integer.TYPE);
setValue.invoke(intHolder, 2);
@@ -177,7 +178,7 @@
private static WeakReference<ClassLoader> setUpLoadLibrary(Constructor constructor)
throws Exception {
ClassLoader loader = (ClassLoader) constructor.newInstance(
- DEX_FILE, ClassLoader.getSystemClassLoader());
+ DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader());
Class intHolder = loader.loadClass("IntHolder");
Method loadLibrary = intHolder.getDeclaredMethod("loadLibrary", String.class);
loadLibrary.invoke(intHolder, nativeLibraryName);
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index 66e1d92..41771b5 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -927,6 +927,32 @@
}
}
+ /// CHECK-START: void Main.nonzeroLength(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ //
+ /// CHECK-START: void Main.nonzeroLength(int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ public static void nonzeroLength(int[] a) {
+ if (a.length != 0) {
+ a[0] = 112;
+ }
+ }
+
+ /// CHECK-START: void Main.knownLength(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ //
+ /// CHECK-START: void Main.knownLength(int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ public static void knownLength(int[] a) {
+ if (a.length == 2) {
+ a[0] = -1;
+ a[1] = -2;
+ }
+ }
+
static int[][] mA;
/// CHECK-START: void Main.dynamicBCEAndIntrinsic(int) BCE (before)
@@ -1586,6 +1612,26 @@
}
}
+ nonzeroLength(array);
+ if (array[0] != 112) {
+ System.out.println("nonzero length failed!");
+ }
+
+ knownLength(array);
+ if (array[0] != 112 || array[1] != 1) {
+ System.out.println("nonzero length failed!");
+ }
+ array = new int[2];
+ knownLength(array);
+ if (array[0] != -1 || array[1] != -2) {
+ System.out.println("nonzero length failed!");
+ }
+
+ // Zero length array does not break.
+ array = new int[0];
+ nonzeroLength(array);
+ knownLength(array);
+
mA = new int[4][4];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
diff --git a/test/530-checker-loops2/src/Main.java b/test/530-checker-loops2/src/Main.java
index c644692..b12fbd6 100644
--- a/test/530-checker-loops2/src/Main.java
+++ b/test/530-checker-loops2/src/Main.java
@@ -710,8 +710,8 @@
// making them a candidate for deoptimization based on constant indices.
// Compiler should ensure the array loads are not subsequently hoisted
// "above" the deoptimization "barrier" on the bounds.
- a[0][i] = 1;
- a[1][i] = 2;
+ a[1][i] = 1;
+ a[2][i] = 2;
a[99][i] = 3;
}
}
@@ -1042,11 +1042,11 @@
a = new int[100][10];
expectEquals(55, dynamicBCEAndConstantIndices(x, a, 0, 10));
for (int i = 0; i < 10; i++) {
- expectEquals((i % 10) != 0 ? 1 : 0, a[0][i]);
- expectEquals((i % 10) != 0 ? 2 : 0, a[1][i]);
+ expectEquals((i % 10) != 0 ? 1 : 0, a[1][i]);
+ expectEquals((i % 10) != 0 ? 2 : 0, a[2][i]);
expectEquals((i % 10) != 0 ? 3 : 0, a[99][i]);
}
- a = new int[2][10];
+ a = new int[3][10];
sResult = 0;
try {
expectEquals(55, dynamicBCEAndConstantIndices(x, a, 0, 10));
@@ -1054,8 +1054,8 @@
sResult = 1;
}
expectEquals(1, sResult);
- expectEquals(a[0][1], 1);
- expectEquals(a[1][1], 2);
+ expectEquals(a[1][1], 1);
+ expectEquals(a[2][1], 2);
// Dynamic BCE combined with constant indices of all types.
boolean[] x1 = { true };
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 167a575..7b2c6cb 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -60,6 +60,7 @@
do_checks(cls, "testInvokeVirtual");
do_checks(cls, "testInvokeInterface");
+ do_checks(cls, "$noinline$testInlineToSameTarget");
}
} // namespace art
diff --git a/test/566-polymorphic-inlining/src/Main.java b/test/566-polymorphic-inlining/src/Main.java
index 7283e86..a59ce5b 100644
--- a/test/566-polymorphic-inlining/src/Main.java
+++ b/test/566-polymorphic-inlining/src/Main.java
@@ -25,6 +25,12 @@
}
}
+ public static void assertEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new Error("Expected " + expected + ", got " + actual);
+ }
+ }
+
public static void main(String[] args) throws Exception {
System.loadLibrary(args[0]);
Main[] mains = new Main[3];
@@ -41,6 +47,8 @@
testInvokeVirtual(mains[1]);
testInvokeInterface(itfs[0]);
testInvokeInterface(itfs[1]);
+ $noinline$testInlineToSameTarget(mains[0]);
+ $noinline$testInlineToSameTarget(mains[1]);
}
ensureJittedAndPolymorphicInline();
@@ -56,6 +64,10 @@
// This will trigger a deoptimization of the compiled code.
assertEquals(OtherSubclass.class, testInvokeVirtual(mains[2]));
assertEquals(OtherSubclass.class, testInvokeInterface(itfs[2]));
+
+ // Run this once to make sure we execute the JITted code.
+ $noinline$testInlineToSameTarget(mains[0]);
+ assertEquals(20001, counter);
}
public Class sameInvokeVirtual() {
@@ -76,9 +88,21 @@
return m.sameInvokeVirtual();
}
+ public static void $noinline$testInlineToSameTarget(Main m) {
+ if (doThrow) throw new Error("");
+ m.increment();
+ }
+
public Object field = new Object();
public static native void ensureJittedAndPolymorphicInline();
+
+ public void increment() {
+ field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo
+ counter++;
+ }
+ public static int counter = 0;
+ public static boolean doThrow = false;
}
class Subclass extends Main {
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index 2a5b2c9..2fa5800 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "art_method.h"
+#include "art_method-inl.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/profiling_info.h"
@@ -75,7 +75,7 @@
extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInInterpreter(JNIEnv* env,
jclass,
jstring method_name) {
- if (!Runtime::Current()->UseJit()) {
+ if (!Runtime::Current()->UseJitCompilation()) {
// The return value is irrelevant if we're not using JIT.
return false;
}
@@ -111,7 +111,7 @@
extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasProfilingInfo(JNIEnv* env,
jclass,
jstring method_name) {
- if (!Runtime::Current()->UseJit()) {
+ if (!Runtime::Current()->UseJitCompilation()) {
return;
}
ScopedUtfChars chars(env, method_name);
@@ -151,7 +151,7 @@
extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasOsrCode(JNIEnv* env,
jclass,
jstring method_name) {
- if (!Runtime::Current()->UseJit()) {
+ if (!Runtime::Current()->UseJitCompilation()) {
return;
}
ScopedUtfChars chars(env, method_name);
diff --git a/test/570-checker-osr/src/Main.java b/test/570-checker-osr/src/Main.java
index 200b54a..15c232d 100644
--- a/test/570-checker-osr/src/Main.java
+++ b/test/570-checker-osr/src/Main.java
@@ -61,6 +61,18 @@
throw new Error("Unexpected return value");
}
+ $noinline$inlineCache2(new Main(), /* isSecondInvocation */ false);
+ if ($noinline$inlineCache2(new SubMain(), /* isSecondInvocation */ true) != SubMain.class) {
+ throw new Error("Unexpected return value");
+ }
+
+ // Test polymorphic inline cache to the same target (inlineCache3).
+ $noinline$inlineCache3(new Main(), /* isSecondInvocation */ false);
+ $noinline$inlineCache3(new SubMain(), /* isSecondInvocation */ false);
+ if ($noinline$inlineCache3(new SubMain(), /* isSecondInvocation */ true) != null) {
+ throw new Error("Unexpected return value");
+ }
+
$noinline$stackOverflow(new Main(), /* isSecondInvocation */ false);
$noinline$stackOverflow(new SubMain(), /* isSecondInvocation */ true);
@@ -147,10 +159,76 @@
return other.returnClass();
}
+ public static Class $noinline$inlineCache2(Main m, boolean isSecondInvocation) {
+ // If we are running in non-JIT mode, or were unlucky enough to get this method
+ // already JITted, just return the expected value.
+ if (!isInInterpreter("$noinline$inlineCache2")) {
+ return SubMain.class;
+ }
+
+ ensureHasProfilingInfo("$noinline$inlineCache2");
+
+ // Ensure that we have OSR code to jump to.
+ if (isSecondInvocation) {
+ ensureHasOsrCode("$noinline$inlineCache2");
+ }
+
+ // This call will be optimized in the OSR compiled code
+ // to check and deoptimize if m is not of type 'Main'.
+ Main other = m.inlineCache2();
+
+ // Jump to OSR compiled code. The second run
+ // of this method will have 'm' as a SubMain, and the compiled
+ // code we are jumping to will have wrongly optimize other as being null.
+ if (isSecondInvocation) {
+ while (!isInOsrCode("$noinline$inlineCache2")) {}
+ }
+
+ // We used to wrongly optimize this code and assume 'other' was always null.
+ return (other == null) ? null : other.returnClass();
+ }
+
+ public static Class $noinline$inlineCache3(Main m, boolean isSecondInvocation) {
+ // If we are running in non-JIT mode, or were unlucky enough to get this method
+ // already JITted, just return the expected value.
+ if (!isInInterpreter("$noinline$inlineCache3")) {
+ return null;
+ }
+
+ ensureHasProfilingInfo("$noinline$inlineCache3");
+
+ // Ensure that we have OSR code to jump to.
+ if (isSecondInvocation) {
+ ensureHasOsrCode("$noinline$inlineCache3");
+ }
+
+ // This call will be optimized in the OSR compiled code
+ // to check and deoptimize if m is not of type 'Main'.
+ Main other = m.inlineCache3();
+
+ // Jump to OSR compiled code. The second run
+ // of this method will have 'm' as a SubMain, and the compiled
+ // code we are jumping to will have wrongly optimize other as being null.
+ if (isSecondInvocation) {
+ while (!isInOsrCode("$noinline$inlineCache3")) {}
+ }
+
+ // We used to wrongly optimize this code and assume 'other' was always null.
+ return (other == null) ? null : other.returnClass();
+ }
+
public Main inlineCache() {
return new Main();
}
+ public Main inlineCache2() {
+ return null;
+ }
+
+ public Main inlineCache3() {
+ return null;
+ }
+
public Class returnClass() {
return Main.class;
}
@@ -235,6 +313,10 @@
return new SubMain();
}
+ public Main inlineCache2() {
+ return new SubMain();
+ }
+
public void otherInlineCache() {
return;
}
diff --git a/test/586-checker-null-array-get/src/Main.java b/test/586-checker-null-array-get/src/Main.java
index 332cfb0..e0782bc 100644
--- a/test/586-checker-null-array-get/src/Main.java
+++ b/test/586-checker-null-array-get/src/Main.java
@@ -14,10 +14,20 @@
* limitations under the License.
*/
+class Test1 {
+ int[] iarr;
+}
+
+class Test2 {
+ float[] farr;
+}
+
public class Main {
public static Object[] getObjectArray() { return null; }
public static long[] getLongArray() { return null; }
public static Object getNull() { return null; }
+ public static Test1 getNullTest1() { return null; }
+ public static Test2 getNullTest2() { return null; }
public static void main(String[] args) {
try {
@@ -26,13 +36,25 @@
} catch (NullPointerException e) {
// Expected.
}
+ try {
+ bar();
+ throw new Error("Expected NullPointerException");
+ } catch (NullPointerException e) {
+ // Expected.
+ }
+ try {
+ test1();
+ throw new Error("Expected NullPointerException");
+ } catch (NullPointerException e) {
+ // Expected.
+ }
}
/// CHECK-START: void Main.foo() load_store_elimination (after)
- /// CHECK-DAG: <<Null:l\d+>> NullConstant
- /// CHECK-DAG: <<Check:l\d+>> NullCheck [<<Null>>]
- /// CHECK-DAG: <<Get1:j\d+>> ArrayGet [<<Check>>,{{i\d+}}]
- /// CHECK-DAG: <<Get2:l\d+>> ArrayGet [<<Check>>,{{i\d+}}]
+ /// CHECK-DAG: <<Null:l\d+>> NullConstant
+ /// CHECK-DAG: <<Check:l\d+>> NullCheck [<<Null>>]
+ /// CHECK-DAG: <<Get1:j\d+>> ArrayGet [<<Check>>,{{i\d+}}]
+ /// CHECK-DAG: <<Get2:l\d+>> ArrayGet [<<Check>>,{{i\d+}}]
public static void foo() {
longField = getLongArray()[0];
objectField = getObjectArray()[0];
@@ -56,7 +78,7 @@
// elimination pass to add a HDeoptimize. Not having the bounds check helped
// the load store elimination think it could merge two ArrayGet with different
// types.
- String[] array = ((String[])getNull());
+ String[] array = (String[])getNull();
objectField = array[0];
objectField = array[1];
objectField = array[2];
@@ -68,6 +90,23 @@
longField = longArray[3];
}
+ /// CHECK-START: float Main.test1() load_store_elimination (after)
+ /// CHECK-DAG: <<Null:l\d+>> NullConstant
+ /// CHECK-DAG: <<Check1:l\d+>> NullCheck [<<Null>>]
+ /// CHECK-DAG: <<FieldGet1:l\d+>> InstanceFieldGet [<<Check1>>] field_name:Test1.iarr
+ /// CHECK-DAG: <<Check2:l\d+>> NullCheck [<<FieldGet1>>]
+ /// CHECK-DAG: <<ArrayGet1:i\d+>> ArrayGet [<<Check2>>,{{i\d+}}]
+ /// CHECK-DAG: <<ArrayGet2:f\d+>> ArrayGet [<<Check2>>,{{i\d+}}]
+ /// CHECK-DAG: Return [<<ArrayGet2>>]
+ public static float test1() {
+ Test1 test1 = getNullTest1();
+ Test2 test2 = getNullTest2();;
+ int[] iarr = test1.iarr;
+ float[] farr = test2.farr;
+ iarr[0] = iarr[1];
+ return farr[0];
+ }
+
public static long longField;
public static Object objectField;
}
diff --git a/test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali b/test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali
index 366c7b9..ef53ee8 100644
--- a/test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali
+++ b/test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali
@@ -67,3 +67,57 @@
return p3
.end method
+
+## CHECK-START: int IrreducibleLoop.liveness2(boolean, boolean, boolean, int) builder (after)
+## CHECK-DAG: Mul loop:<<Loop:B\d+>>
+## CHECK-DAG: Not loop:<<Loop>>
+
+## CHECK-START: int IrreducibleLoop.liveness2(boolean, boolean, boolean, int) liveness (after)
+## CHECK-DAG: Mul liveness:<<LPreEntry2:\d+>>
+## CHECK-DAG: Not liveness:<<LBackEdge1:\d+>>
+## CHECK-EVAL: <<LBackEdge1>> < <<LPreEntry2>>
+
+.method public liveness2(ZZZI)I
+ .registers 10
+
+ const v1, 1
+
+ :header1
+ if-eqz p0, :body1
+
+ :exit
+ return p3
+
+ :body1
+ # The test will generate an incorrect linear order when the following IF swaps
+ # its successors. To do that, load a boolean value and compare NotEqual to 1.
+ sget-boolean v2, LIrreducibleLoop;->f:Z
+ const v3, 1
+ if-ne v2, v3, :pre_header2
+
+ :pre_entry2
+ # This constant has a use in a phi in :back_edge2 and a back edge use in
+ # :back_edge1. Because the linear order is wrong, the back edge use has
+ # a lower liveness than the phi use.
+ const v0, 42
+ mul-int/2addr p3, p3
+ goto :back_edge2
+
+ :back_edge2
+ add-int/2addr p3, v0
+ add-int/2addr v0, v1
+ goto :header2
+
+ :header2
+ if-eqz p2, :back_edge2
+
+ :back_edge1
+ not-int p3, p3
+ goto :header1
+
+ :pre_header2
+ const v0, 42
+ goto :header2
+.end method
+
+.field public static f:Z
diff --git a/test/594-load-string-regression/expected.txt b/test/594-load-string-regression/expected.txt
new file mode 100644
index 0000000..365b0e1
--- /dev/null
+++ b/test/594-load-string-regression/expected.txt
@@ -0,0 +1 @@
+String: ""
diff --git a/test/594-load-string-regression/info.txt b/test/594-load-string-regression/info.txt
new file mode 100644
index 0000000..6a07ace
--- /dev/null
+++ b/test/594-load-string-regression/info.txt
@@ -0,0 +1,2 @@
+Regression test for LoadString listing side effects when it doesn't have any
+and triggering a DCHECK() failure when merging ClinitCheck into NewInstance.
diff --git a/test/594-load-string-regression/src/Main.java b/test/594-load-string-regression/src/Main.java
new file mode 100644
index 0000000..0b9f7b5
--- /dev/null
+++ b/test/594-load-string-regression/src/Main.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ static boolean doThrow = false;
+
+ // Note: We're not doing checker tests as we cannot do them specifically for a non-PIC
+ // configuration. The check here would be "prepare_for_register_allocation (before)"
+ // CHECK: LoadClass
+ // CHECK-NEXT: ClinitCheck
+ // CHECK-NEXT: LoadString load_kind:BootImageAddress
+ // CHECK-NEXT: NewInstance
+ // and "prepare_for_register_allocation (after)"
+ // CHECK: LoadString
+ // CHECK-NEXT: NewInstance
+ // but the order of instructions for non-PIC mode is different.
+ public static int $noinline$test() {
+ if (doThrow) { throw new Error(); }
+
+ int r = 0x12345678;
+ do {
+ // LICM pulls the LoadClass and ClinitCheck out of the loop, leaves NewInstance in the loop.
+ Helper h = new Helper();
+ // For non-PIC mode, LICM pulls the boot image LoadString out of the loop.
+ // (For PIC mode, the LoadString can throw and will not be moved out of the loop.)
+ String s = ""; // Empty string is known to be in the boot image.
+ r = r ^ (r >> 5);
+ h.$noinline$printString(s);
+ // During DCE after inlining, the loop back-edge disappears and the pre-header is
+ // merged with the body, leaving consecutive LoadClass, ClinitCheck, LoadString
+ // and NewInstance in non-PIC mode. The prepare_for_register_allocation pass
+ // merges the LoadClass and ClinitCheck with the NewInstance and checks that
+ // there are no instructions with side effects in between. This check used to
+ // fail because LoadString was always listing SideEffects::CanTriggerGC() even
+ // when it doesn't really have any side effects, i.e. for direct references to
+ // boot image Strings or for Strings known to be in the dex cache.
+ } while ($inline$shouldContinue());
+ return r;
+ }
+
+ static boolean $inline$shouldContinue() {
+ return false;
+ }
+
+ public static void main(String[] args) {
+ assertIntEquals(0x12345678 ^ (0x12345678 >> 5), $noinline$test());
+ }
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
+
+class Helper {
+ static boolean doThrow = false;
+
+ public void $noinline$printString(String s) {
+ if (doThrow) { throw new Error(); }
+
+ System.out.println("String: \"" + s + "\"");
+ }
+}
diff --git a/test/595-profile-saving/expected.txt b/test/595-profile-saving/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/595-profile-saving/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/595-profile-saving/info.txt b/test/595-profile-saving/info.txt
new file mode 100644
index 0000000..5d318f5
--- /dev/null
+++ b/test/595-profile-saving/info.txt
@@ -0,0 +1 @@
+Check that profile recording works even when JIT compilation is not enabled.
diff --git a/test/595-profile-saving/profile-saving.cc b/test/595-profile-saving/profile-saving.cc
new file mode 100644
index 0000000..0d26f45
--- /dev/null
+++ b/test/595-profile-saving/profile-saving.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_file.h"
+
+#include "art_method-inl.h"
+#include "jit/offline_profiling_info.h"
+#include "jit/profile_saver.h"
+#include "jni.h"
+#include "method_reference.h"
+#include "mirror/class-inl.h"
+#include "oat_file_assistant.h"
+#include "oat_file_manager.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedUtfChars.h"
+#include "thread.h"
+
+namespace art {
+namespace {
+
+class CreateProfilingInfoVisitor : public StackVisitor {
+ public:
+ explicit CreateProfilingInfoVisitor(Thread* thread, const char* method_name)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ method_name_(method_name) {}
+
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* m = GetMethod();
+ std::string m_name(m->GetName());
+
+ if (m_name.compare(method_name_) == 0) {
+ ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
+ method_index_ = m->GetDexMethodIndex();
+ return false;
+ }
+ return true;
+ }
+
+ int method_index_ = -1;
+ const char* const method_name_;
+};
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_ensureProfilingInfo(JNIEnv* env,
+ jclass,
+ jstring method_name) {
+ ScopedUtfChars chars(env, method_name);
+ CHECK(chars.c_str() != nullptr);
+ ScopedObjectAccess soa(Thread::Current());
+ CreateProfilingInfoVisitor visitor(soa.Self(), chars.c_str());
+ visitor.WalkStack();
+ return visitor.method_index_;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_ensureProfileProcessing(JNIEnv*, jclass) {
+ ProfileSaver::ForceProcessProfiles();
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_presentInProfile(
+ JNIEnv* env, jclass cls, jstring filename, jint method_index) {
+ ScopedUtfChars filename_chars(env, filename);
+ CHECK(filename_chars.c_str() != nullptr);
+ ScopedObjectAccess soa(Thread::Current());
+ const DexFile* dex_file = soa.Decode<mirror::Class*>(cls)->GetDexCache()->GetDexFile();
+ return ProfileSaver::HasSeenMethod(std::string(filename_chars.c_str()),
+ dex_file,
+ static_cast<uint16_t>(method_index));
+}
+
+} // namespace
+} // namespace art
diff --git a/test/595-profile-saving/run b/test/595-profile-saving/run
new file mode 100644
index 0000000..068ad03
--- /dev/null
+++ b/test/595-profile-saving/run
@@ -0,0 +1,27 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use
+# --compiler-filter=interpret-only to make sure that the test is not compiled AOT
+# and to make sure the test is not compiled when loaded (by PathClassLoader)
+# -Xjitsaveprofilinginfo to enable profile saving
+# -Xusejit:false to disable jit and only test profiles.
+exec ${RUN} \
+ -Xcompiler-option --compiler-filter=interpret-only \
+ --runtime-option '-Xcompiler-option --compiler-filter=interpret-only' \
+ --runtime-option -Xjitsaveprofilinginfo \
+ --runtime-option -Xusejit:false \
+ "${@}"
diff --git a/test/595-profile-saving/src/Main.java b/test/595-profile-saving/src/Main.java
new file mode 100644
index 0000000..039503f
--- /dev/null
+++ b/test/595-profile-saving/src/Main.java
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Method;
+
+public class Main {
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+
+ File file = null;
+ try {
+ file = createTempFile();
+ // String codePath = getDexBaseLocation();
+ String codePath = System.getenv("DEX_LOCATION") + "/595-profile-saving.jar";
+ VMRuntime.registerAppInfo(file.getPath(),
+ System.getenv("DEX_LOCATION"),
+ new String[] {codePath},
+ /* foreignProfileDir */ null);
+
+ int methodIdx = $opt$noinline$testProfile();
+ ensureProfileProcessing();
+ if (!presentInProfile(file.getPath(), methodIdx)) {
+ throw new RuntimeException("Method with index " + methodIdx + " not in the profile");
+ }
+ } finally {
+ if (file != null) {
+ file.delete();
+ }
+ }
+ }
+
+ public static int $opt$noinline$testProfile() {
+ if (doThrow) throw new Error();
+ // Make sure we have a profile info for this method without the need to loop.
+ return ensureProfilingInfo("$opt$noinline$testProfile");
+ }
+
+ // Return the dex method index.
+ public static native int ensureProfilingInfo(String methodName);
+ // Ensures the profile saver does its usual processing.
+ public static native void ensureProfileProcessing();
+ // Checks if the profiles saver knows about the method.
+ public static native boolean presentInProfile(String profile, int methodIdx);
+
+ public static boolean doThrow = false;
+ private static final String TEMP_FILE_NAME_PREFIX = "dummy";
+ private static final String TEMP_FILE_NAME_SUFFIX = "-file";
+
+ static native String getProfileInfoDump(
+ String filename);
+
+ private static File createTempFile() throws Exception {
+ try {
+ return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+ } catch (IOException e) {
+ System.setProperty("java.io.tmpdir", "/data/local/tmp");
+ try {
+ return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+ } catch (IOException e2) {
+ System.setProperty("java.io.tmpdir", "/sdcard");
+ return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+ }
+ }
+ }
+
+ private static class VMRuntime {
+ private static final Method registerAppInfoMethod;
+ static {
+ try {
+ Class<? extends Object> c = Class.forName("dalvik.system.VMRuntime");
+ registerAppInfoMethod = c.getDeclaredMethod("registerAppInfo",
+ String.class, String.class, String[].class, String.class);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static void registerAppInfo(String profile, String appDir,
+ String[] codePaths, String foreignDir) throws Exception {
+ registerAppInfoMethod.invoke(null, profile, appDir, codePaths, foreignDir);
+ }
+ }
+}
diff --git a/test/596-app-images/app_images.cc b/test/596-app-images/app_images.cc
new file mode 100644
index 0000000..a5bbf5f
--- /dev/null
+++ b/test/596-app-images/app_images.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <pthread.h>
+#include <stdio.h>
+#include <vector>
+
+#include "gc/heap.h"
+#include "gc/space/image_space.h"
+#include "gc/space/space-inl.h"
+#include "image.h"
+#include "jni.h"
+#include "mirror/class.h"
+#include "runtime.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+
+namespace {
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_checkAppImageLoaded(JNIEnv*, jclass) {
+ ScopedObjectAccess soa(Thread::Current());
+ for (auto* space : Runtime::Current()->GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace()) {
+ auto* image_space = space->AsImageSpace();
+ const auto& image_header = image_space->GetImageHeader();
+ if (image_header.IsAppImage()) {
+ return JNI_TRUE;
+ }
+ }
+ }
+ return JNI_FALSE;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_checkAppImageContains(JNIEnv*, jclass, jclass c) {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* klass_ptr = soa.Decode<mirror::Class*>(c);
+ for (auto* space : Runtime::Current()->GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace()) {
+ auto* image_space = space->AsImageSpace();
+ const auto& image_header = image_space->GetImageHeader();
+ if (image_header.IsAppImage()) {
+ if (image_space->HasAddress(klass_ptr)) {
+ return JNI_TRUE;
+ }
+ }
+ }
+ }
+ return JNI_FALSE;
+}
+
+} // namespace
+
+} // namespace art
diff --git a/test/596-app-images/expected.txt b/test/596-app-images/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/596-app-images/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/596-app-images/info.txt b/test/596-app-images/info.txt
new file mode 100644
index 0000000..a3d5e7e
--- /dev/null
+++ b/test/596-app-images/info.txt
@@ -0,0 +1 @@
+Tests that app-images are loaded and used.
diff --git a/test/596-app-images/src/Main.java b/test/596-app-images/src/Main.java
new file mode 100644
index 0000000..75b31b8
--- /dev/null
+++ b/test/596-app-images/src/Main.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+ static class Inner {
+ public static int abc = 0;
+ }
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ if (!checkAppImageLoaded()) {
+ System.out.println("App image is not loaded!");
+ } else if (!checkAppImageContains(Inner.class)) {
+ System.out.println("App image does not contain Inner!");
+ }
+ }
+
+ public static native boolean checkAppImageLoaded();
+ public static native boolean checkAppImageContains(Class<?> klass);
+}
diff --git a/test/596-checker-dead-phi/expected.txt b/test/596-checker-dead-phi/expected.txt
new file mode 100644
index 0000000..d81cc07
--- /dev/null
+++ b/test/596-checker-dead-phi/expected.txt
@@ -0,0 +1 @@
+42
diff --git a/test/596-checker-dead-phi/info.txt b/test/596-checker-dead-phi/info.txt
new file mode 100644
index 0000000..7f7cf0f
--- /dev/null
+++ b/test/596-checker-dead-phi/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing where we used to replace a dead loop
+phi with its first incoming input.
diff --git a/test/596-checker-dead-phi/smali/IrreducibleLoop.smali b/test/596-checker-dead-phi/smali/IrreducibleLoop.smali
new file mode 100644
index 0000000..bab2ba9
--- /dev/null
+++ b/test/596-checker-dead-phi/smali/IrreducibleLoop.smali
@@ -0,0 +1,74 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LIrreducibleLoop;
+
+.super Ljava/lang/Object;
+
+# Test case where liveness analysis produces linear order where loop blocks are
+# not adjacent. This revealed a bug in our SSA builder, where a dead loop phi would
+# be replaced by its incoming input during SsaRedundantPhiElimination.
+
+# Check that the outer loop suspend check environment only has the parameter vreg.
+## CHECK-START: int IrreducibleLoop.liveness(int) builder (after)
+## CHECK-DAG: <<Phi:i\d+>> Phi reg:4 loop:{{B\d+}} irreducible:false
+## CHECK-DAG: SuspendCheck env:[[_,_,_,_,<<Phi>>]] loop:{{B\d+}} irreducible:false
+
+# Check that the linear order has non-adjacent loop blocks.
+## CHECK-START: int IrreducibleLoop.liveness(int) liveness (after)
+## CHECK-DAG: Mul liveness:<<LPreEntry2:\d+>>
+## CHECK-DAG: Add liveness:<<LBackEdge1:\d+>>
+## CHECK-EVAL: <<LBackEdge1>> < <<LPreEntry2>>
+
+.method public static liveness(I)I
+ .registers 5
+
+ const-string v1, "MyString"
+
+ :header1
+ if-eqz p0, :body1
+
+ :exit
+ return p0
+
+ :body1
+ # The test will generate an incorrect linear order when the following IF swaps
+ # its successors. To do that, load a boolean value and compare NotEqual to 1.
+ sget-boolean v2, LIrreducibleLoop;->f:Z
+ const v3, 1
+ if-ne v2, v3, :pre_header2
+
+ :pre_entry2
+ # Add a marker on the irreducible loop entry.
+ mul-int/2addr p0, p0
+ goto :back_edge2
+
+ :back_edge2
+ goto :header2
+
+ :header2
+ if-eqz p0, :back_edge2
+
+ :back_edge1
+ # Add a marker on the outer loop back edge.
+ add-int/2addr p0, p0
+ # Set a wide register, to have v1 undefined at the back edge.
+ const-wide/16 v0, 0x1
+ goto :header1
+
+ :pre_header2
+ goto :header2
+.end method
+
+.field public static f:Z
diff --git a/test/596-checker-dead-phi/src/Main.java b/test/596-checker-dead-phi/src/Main.java
new file mode 100644
index 0000000..5a3fffc
--- /dev/null
+++ b/test/596-checker-dead-phi/src/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String[] args) throws Exception {
+ Class<?> c = Class.forName("IrreducibleLoop");
+ // Note that we don't actually enter the loops in the 'liveness'
+ // method, so this is just a sanity check that part of the code we
+ // generated for that method is correct.
+ Method m = c.getMethod("liveness", int.class);
+ Object[] arguments = { 42 };
+ System.out.println(m.invoke(null, arguments));
+ }
+}
diff --git a/test/597-deopt-new-string/deopt.cc b/test/597-deopt-new-string/deopt.cc
new file mode 100644
index 0000000..844a786
--- /dev/null
+++ b/test/597-deopt-new-string/deopt.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni.h"
+#include "mirror/class-inl.h"
+#include "runtime.h"
+#include "thread_list.h"
+#include "thread_state.h"
+#include "gc/gc_cause.h"
+#include "gc/scoped_gc_critical_section.h"
+
+namespace art {
+
+extern "C" JNIEXPORT void JNICALL Java_Main_deoptimizeAll(
+ JNIEnv* env,
+ jclass cls ATTRIBUTE_UNUSED) {
+ ScopedObjectAccess soa(env);
+ ScopedThreadSuspension sts(Thread::Current(), kWaitingForDeoptimization);
+ gc::ScopedGCCriticalSection gcs(Thread::Current(),
+ gc::kGcCauseInstrumentation,
+ gc::kCollectorTypeInstrumentation);
+ // We need to suspend mutator threads first.
+ ScopedSuspendAll ssa(__FUNCTION__);
+ static bool first = true;
+ if (first) {
+ // We need to enable deoptimization once in order to call DeoptimizeEverything().
+ Runtime::Current()->GetInstrumentation()->EnableDeoptimization();
+ first = false;
+ }
+ Runtime::Current()->GetInstrumentation()->DeoptimizeEverything("test");
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_undeoptimizeAll(
+ JNIEnv* env,
+ jclass cls ATTRIBUTE_UNUSED) {
+ ScopedObjectAccess soa(env);
+ ScopedThreadSuspension sts(Thread::Current(), kWaitingForDeoptimization);
+ gc::ScopedGCCriticalSection gcs(Thread::Current(),
+ gc::kGcCauseInstrumentation,
+ gc::kCollectorTypeInstrumentation);
+ // We need to suspend mutator threads first.
+ ScopedSuspendAll ssa(__FUNCTION__);
+ Runtime::Current()->GetInstrumentation()->UndeoptimizeEverything("test");
+}
+
+} // namespace art
diff --git a/test/597-deopt-new-string/expected.txt b/test/597-deopt-new-string/expected.txt
new file mode 100644
index 0000000..f993efc
--- /dev/null
+++ b/test/597-deopt-new-string/expected.txt
@@ -0,0 +1,2 @@
+JNI_OnLoad called
+Finishing
diff --git a/test/597-deopt-new-string/info.txt b/test/597-deopt-new-string/info.txt
new file mode 100644
index 0000000..1bd1f79
--- /dev/null
+++ b/test/597-deopt-new-string/info.txt
@@ -0,0 +1 @@
+Regression test for b/28555675
diff --git a/test/597-deopt-new-string/run b/test/597-deopt-new-string/run
new file mode 100644
index 0000000..9776ab3
--- /dev/null
+++ b/test/597-deopt-new-string/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# We want to run in debuggable mode which keeps the call into StringFactory.newEmptyString().
+exec ${RUN} -Xcompiler-option --debuggable "${@}"
diff --git a/test/597-deopt-new-string/src/Main.java b/test/597-deopt-new-string/src/Main.java
new file mode 100644
index 0000000..1224e40
--- /dev/null
+++ b/test/597-deopt-new-string/src/Main.java
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main implements Runnable {
+ static final int numberOfThreads = 2;
+ static final int totalOperations = 40000;
+ static boolean sFlag = false;
+ static volatile boolean done = false;
+ int threadIndex;
+
+ public static native void deoptimizeAll();
+ public static native void undeoptimizeAll();
+
+ Main(int index) {
+ threadIndex = index;
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+
+ final Thread[] threads = new Thread[numberOfThreads];
+ for (int t = 0; t < threads.length; t++) {
+ threads[t] = new Thread(new Main(t));
+ threads[t].start();
+ }
+ for (Thread t : threads) {
+ t.join();
+ }
+ System.out.println("Finishing");
+ }
+
+ public String $noinline$run0() {
+ // Prevent inlining.
+ if (sFlag) {
+ throw new Error();
+ }
+ char[] arr = {'a', 'b', 'c'};
+ return new String(arr, 0, arr.length);
+ }
+
+ public void run() {
+ if (threadIndex == 0) {
+ // This thread keeps doing deoptimization of all threads.
+ // Hopefully that will trigger one deoptimization when returning from
+ // StringFactory.newEmptyString() in one of the other threads.
+ for (int i = 0; i < totalOperations; ++i) {
+ if (i % 50 == 0) {
+ deoptimizeAll();
+ }
+ if (i % 50 == 25) {
+ undeoptimizeAll();
+ }
+ }
+ done = true;
+ } else {
+ // This thread keeps doing new String() from a char array.
+ while (!done) {
+ $noinline$run0();
+ }
+ }
+ }
+}
diff --git a/test/598-checker-irreducible-dominance/expected.txt b/test/598-checker-irreducible-dominance/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/598-checker-irreducible-dominance/expected.txt
diff --git a/test/598-checker-irreducible-dominance/info.txt b/test/598-checker-irreducible-dominance/info.txt
new file mode 100644
index 0000000..8ca4e63
--- /dev/null
+++ b/test/598-checker-irreducible-dominance/info.txt
@@ -0,0 +1,2 @@
+Regression test for HGraphBuilder which would compute wrong dominance information
+in the presence of irreducible loops.
\ No newline at end of file
diff --git a/test/598-checker-irreducible-dominance/smali/IrreducibleLoop.smali b/test/598-checker-irreducible-dominance/smali/IrreducibleLoop.smali
new file mode 100644
index 0000000..4d8b515
--- /dev/null
+++ b/test/598-checker-irreducible-dominance/smali/IrreducibleLoop.smali
@@ -0,0 +1,52 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LIrreducibleLoop;
+.super Ljava/lang/Object;
+
+# Test case in which `inner_back_edge` is not dominated by `inner_header` and
+# causes `outer_back_edge` to not be dominated by `outer_header`. HGraphBuilder
+# not do a fix-point iteration and would miss the path to `outer_back_edge`
+# through `inner_back_edge` and incorrectly label the outer loop non-irreducible.
+
+## CHECK-START: int IrreducibleLoop.dominance(int) builder (after)
+## CHECK: Add irreducible:true
+
+.method public static dominance(I)I
+ .registers 2
+
+ if-eqz p0, :outer_header
+ goto :inner_back_edge
+
+ :outer_header
+ if-eqz p0, :inner_header
+
+ :outer_branch_exit
+ if-eqz p0, :outer_merge
+ return p0
+
+ :inner_header
+ goto :outer_merge
+
+ :inner_back_edge
+ goto :inner_header
+
+ :outer_merge
+ if-eqz p0, :inner_back_edge
+
+ :outer_back_edge
+ add-int/2addr p0, p0
+ goto :outer_header
+
+.end method
diff --git a/test/598-checker-irreducible-dominance/src/Main.java b/test/598-checker-irreducible-dominance/src/Main.java
new file mode 100644
index 0000000..38b2ab4
--- /dev/null
+++ b/test/598-checker-irreducible-dominance/src/Main.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String[] args) {
+ // Nothing to run. This regression test merely makes sure the smali test
+ // case successfully compiles.
+ }
+}
diff --git a/test/803-no-super/expected.txt b/test/803-no-super/expected.txt
new file mode 100644
index 0000000..5036991
--- /dev/null
+++ b/test/803-no-super/expected.txt
@@ -0,0 +1,2 @@
+java.lang.ClassNotFoundException: NoSuper1
+Done!
diff --git a/test/803-no-super/info.txt b/test/803-no-super/info.txt
new file mode 100644
index 0000000..0178a44
--- /dev/null
+++ b/test/803-no-super/info.txt
@@ -0,0 +1,3 @@
+Regression test that temp (erroneous) classes don't get conflict tables created.
+
+Obviously needs to run under Dalvik or ART.
diff --git a/test/803-no-super/smali/nosuper1.smali b/test/803-no-super/smali/nosuper1.smali
new file mode 100644
index 0000000..df2eaa5
--- /dev/null
+++ b/test/803-no-super/smali/nosuper1.smali
@@ -0,0 +1,3 @@
+.class public LNoSuper1;
+
+.super LNoClass;
diff --git a/test/803-no-super/src/Main.java b/test/803-no-super/src/Main.java
new file mode 100644
index 0000000..a07e042
--- /dev/null
+++ b/test/803-no-super/src/Main.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Attempt to load class with no superclass.
+ */
+public class Main {
+ public static void main(String[] args) throws Exception {
+ try {
+ Class<?> c = Class.forName("NoSuper1");
+ } catch (Exception e) {
+ System.out.println(e);
+ }
+ System.out.println("Done!");
+ }
+}
diff --git a/test/960-default-smali/expected.txt b/test/960-default-smali/expected.txt
index 7671eed..f3db93f 100644
--- a/test/960-default-smali/expected.txt
+++ b/test/960-default-smali/expected.txt
@@ -82,3 +82,19 @@
J-interface Greeter.SayHiTwice()='Hi Hi '
J-virtual J.SayHiTwice()='Hi Hi '
End testing for type J
+Testing for type K
+K-interface Foo.bar()='foobar'
+K-virtual K.bar()='foobar'
+End testing for type K
+Testing for type L
+L-interface Foo.bar()='foobar'
+L-virtual K.bar()='foobar'
+L-virtual L.bar()='foobar'
+End testing for type L
+Testing for type M
+M-interface Foo.bar()='BAZ!'
+M-interface Fooer.bar()='BAZ!'
+M-virtual K.bar()='BAZ!'
+M-virtual L.bar()='BAZ!'
+M-virtual M.bar()='BAZ!'
+End testing for type M
diff --git a/test/960-default-smali/src/Foo.java b/test/960-default-smali/src/Foo.java
new file mode 100644
index 0000000..ed5b35f
--- /dev/null
+++ b/test/960-default-smali/src/Foo.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+interface Foo {
+ public default String bar() {
+ return "foobar";
+ }
+}
diff --git a/test/960-default-smali/src/Fooer.java b/test/960-default-smali/src/Fooer.java
new file mode 100644
index 0000000..d8a5f61
--- /dev/null
+++ b/test/960-default-smali/src/Fooer.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+interface Fooer extends Foo {
+ public String bar();
+}
diff --git a/test/960-default-smali/src/K.java b/test/960-default-smali/src/K.java
new file mode 100644
index 0000000..4426be7
--- /dev/null
+++ b/test/960-default-smali/src/K.java
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class K implements Foo { }
diff --git a/test/960-default-smali/src/L.java b/test/960-default-smali/src/L.java
new file mode 100644
index 0000000..c08ab72
--- /dev/null
+++ b/test/960-default-smali/src/L.java
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class L extends K { }
diff --git a/test/960-default-smali/src/M.java b/test/960-default-smali/src/M.java
new file mode 100644
index 0000000..affe7e9
--- /dev/null
+++ b/test/960-default-smali/src/M.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class M extends L implements Fooer {
+ public String bar() {
+ return "BAZ!";
+ }
+}
diff --git a/test/960-default-smali/src/classes.xml b/test/960-default-smali/src/classes.xml
index 0aa41f7..f3e50c5 100644
--- a/test/960-default-smali/src/classes.xml
+++ b/test/960-default-smali/src/classes.xml
@@ -81,6 +81,27 @@
<implements> </implements>
<methods> </methods>
</class>
+
+ <class name="K" super="java/lang/Object">
+ <implements>
+ <item>Foo</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="L" super="K">
+ <implements> </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="M" super="L">
+ <implements>
+ <item>Fooer</item>
+ </implements>
+ <methods>
+ <method>bar</method>
+ </methods>
+ </class>
</classes>
<interfaces>
@@ -123,5 +144,22 @@
<method type="abstract">GetPlace</method>
</methods>
</interface>
+
+ <interface name="Foo" super="java/lang/Object">
+ <implements>
+ </implements>
+ <methods>
+ <method type="default">bar</method>
+ </methods>
+ </interface>
+
+ <interface name="Fooer" super="java/lang/Object">
+ <implements>
+ <item>Foo</item>
+ </implements>
+ <methods>
+ <method type="abstract">bar</method>
+ </methods>
+ </interface>
</interfaces>
</data>
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index e547c72..21f8141 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -41,7 +41,10 @@
497-inlining-and-class-loader/clear_dex_cache.cc \
543-env-long-ref/env_long_ref.cc \
566-polymorphic-inlining/polymorphic_inline.cc \
- 570-checker-osr/osr.cc
+ 570-checker-osr/osr.cc \
+ 595-profile-saving/profile-saving.cc \
+ 596-app-images/app_images.cc \
+ 597-deopt-new-string/deopt.cc
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so
@@ -90,7 +93,12 @@
include $(BUILD_SHARED_LIBRARY)
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
- LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
+ LOCAL_CFLAGS := $(ART_HOST_CFLAGS)
+ ifeq ($$(suffix),d)
+ LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
+ else
+ LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
+ endif
LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS)
LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread
LOCAL_IS_HOST_MODULE := true
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index f3cda47..ee651b5 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -563,6 +563,13 @@
TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS :=
TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS :=
+TEST_ART_BROKEN_NPIC_RUN_TESTS := 596-app-images
+ifneq (,$(filter npictest,$(PICTEST_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ ${COMPILER_TYPES},$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),npictest,$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_NPIC_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
# Tests that should fail in the heap poisoning configuration with the Optimizing compiler.
# 055: Exceeds run time limits due to heap poisoning instrumentation (on ARM and ARM64 devices).
TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS := \
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index d61fc8f..aa45d40 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -323,11 +323,14 @@
if [ "$INTERPRETER" = "y" ]; then
INT_OPTS="-Xint"
if [ "$VERIFY" = "y" ] ; then
+ INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=interpret-only"
COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only"
elif [ "$VERIFY" = "s" ]; then
+ INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=verify-at-runtime"
COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-at-runtime"
DEX_VERIFY="${DEX_VERIFY} -Xverify:softfail"
else # VERIFY = "n"
+ INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=verify-none"
COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-none"
DEX_VERIFY="${DEX_VERIFY} -Xverify:none"
fi
@@ -336,18 +339,12 @@
if [ "$JIT" = "y" ]; then
INT_OPTS="-Xusejit:true"
if [ "$VERIFY" = "y" ] ; then
+ INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=verify-at-runtime"
COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-at-runtime"
- if [ "$PREBUILD" = "n" ]; then
- # Make sure that if we have noprebuild we still JIT as DexClassLoader will
- # try to compile the dex file.
- INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=verify-at-runtime"
- fi
else
+ INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=verify-none"
COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-none"
DEX_VERIFY="${DEX_VERIFY} -Xverify:none"
- if [ "$PREBUILD" = "n" ]; then
- INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=verify-none"
- fi
fi
fi
@@ -476,11 +473,14 @@
LD_LIBRARY_PATH=$ANDROID_ROOT/$LIBRARY_DIRECTORY
fi
+ PUBLIC_LIBS=libart.so:libartd.so
+
# Create a script with the command. The command can get longer than the longest
# allowed adb command and there is no way to get the exit status from a adb shell
# command.
cmdline="cd $DEX_LOCATION && \
export ANDROID_DATA=$DEX_LOCATION && \
+ export ANDROID_ADDITIONAL_PUBLIC_LIBRARIES=$PUBLIC_LIBS && \
export DEX_LOCATION=$DEX_LOCATION && \
export ANDROID_ROOT=$ANDROID_ROOT && \
$mkdir_cmdline && \
diff --git a/test/run-test b/test/run-test
index fc57d09..9b311dd 100755
--- a/test/run-test
+++ b/test/run-test
@@ -126,6 +126,9 @@
pic_image_suffix=""
multi_image_suffix=""
android_root="/system"
+# By default we will use optimizing.
+image_args="-Xcompiler-option --compiler-backend=Optimizing"
+image_suffix="-optimizing"
while true; do
if [ "x$1" = "x--host" ]; then
@@ -148,6 +151,7 @@
elif [ "x$1" = "x--jvm" ]; then
target_mode="no"
runtime="jvm"
+ image_args=""
prebuild_mode="no"
NEED_DEX="false"
USE_JACK="false"
@@ -244,22 +248,22 @@
run_args="${run_args} --zygote"
shift
elif [ "x$1" = "x--interpreter" ]; then
- run_args="${run_args} --interpreter --runtime-option -XOatFileManagerCompilerFilter:verify-at-runtime"
+ image_args="--interpreter"
image_suffix="-interpreter"
shift
elif [ "x$1" = "x--jit" ]; then
- run_args="${run_args} --jit --runtime-option -XOatFileManagerCompilerFilter:verify-at-runtime"
+ image_args="--jit"
image_suffix="-jit"
shift
elif [ "x$1" = "x--optimizing" ]; then
- run_args="${run_args} -Xcompiler-option --compiler-backend=Optimizing"
+ image_args="-Xcompiler-option --compiler-backend=Optimizing"
image_suffix="-optimizing"
shift
elif [ "x$1" = "x--no-verify" ]; then
- run_args="${run_args} --no-verify --runtime-option -XOatFileManagerCompilerFilter:verify-none"
+ run_args="${run_args} --no-verify"
shift
elif [ "x$1" = "x--verify-soft-fail" ]; then
- run_args="${run_args} --verify-soft-fail --runtime-option -XOatFileManagerCompilerFilter:verify-at-runtime"
+ image_args="${run_args} --verify-soft-fail"
image_suffix="-interp-ac"
shift
elif [ "x$1" = "x--no-optimize" ]; then
@@ -348,6 +352,7 @@
fi
done
+run_args="${run_args} ${image_args}"
# Allocate file descriptor real_stderr and redirect it to the shell's error
# output (fd 2).
if [ ${BASH_VERSINFO[1]} -ge 4 ] && [ ${BASH_VERSINFO[2]} -ge 1 ]; then
@@ -467,7 +472,7 @@
run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib${suffix64}"
else
guess_target_arch_name
- run_args="${run_args} --runtime-option -Djava.library.path=/data/art-test/${target_arch_name}"
+ run_args="${run_args} --runtime-option -Djava.library.path=/data/art-test/${target_arch_name}:/system/lib${suffix64}"
run_args="${run_args} --boot /data/art-test/core${image_suffix}${pic_image_suffix}${multi_image_suffix}.art"
fi
if [ "$relocate" = "yes" ]; then
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 2eb52bc..304c2a9 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -46,9 +46,14 @@
done
if [[ $mode == "host" ]]; then
- make_command="make $j_arg $showcommands build-art-host-tests $common_targets ${out_dir}/host/linux-x86/lib/libjavacoretests.so ${out_dir}/host/linux-x86/lib64/libjavacoretests.so"
+ make_command="make $j_arg $showcommands build-art-host-tests $common_targets"
+ make_command+=" ${out_dir}/host/linux-x86/lib/libjavacoretests.so "
+ make_command+=" ${out_dir}/host/linux-x86/lib64/libjavacoretests.so"
elif [[ $mode == "target" ]]; then
- make_command="make $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh ${out_dir}/host/linux-x86/bin/adb libstdc++"
+ make_command="make $j_arg $showcommands build-art-target-tests $common_targets"
+ make_command+=" libjavacrypto libjavacoretests linker toybox toolbox sh"
+ make_command+=" ${out_dir}/host/linux-x86/bin/adb libstdc++ "
+ make_command+=" ${out_dir}/target/product/${TARGET_PRODUCT}/system/etc/public.libraries.txt"
fi
echo "Executing $make_command"
diff --git a/tools/dmtracedump/tracedump.cc b/tools/dmtracedump/tracedump.cc
index f70e2c2..3afee6f 100644
--- a/tools/dmtracedump/tracedump.cc
+++ b/tools/dmtracedump/tracedump.cc
@@ -512,10 +512,10 @@
void freeDataKeys(DataKeys* pKeys) {
if (pKeys == nullptr) return;
- free(pKeys->fileData);
- free(pKeys->threads);
- free(pKeys->methods);
- free(pKeys);
+ delete[] pKeys->fileData;
+ delete[] pKeys->threads;
+ delete[] pKeys->methods;
+ delete pKeys;
}
/*
@@ -822,8 +822,8 @@
DataKeys* parseKeys(FILE* fp, int32_t verbose) {
int64_t offset;
DataKeys* pKeys = new DataKeys();
- memset(pKeys, 0, sizeof(DataKeys));
if (pKeys == nullptr) return nullptr;
+ memset(pKeys, 0, sizeof(DataKeys));
/*
* We load the entire file into memory. We do this, rather than memory-
@@ -865,9 +865,13 @@
return nullptr;
}
- /* Reduce our allocation now that we know where the end of the key section is. */
- pKeys->fileData = reinterpret_cast<char*>(realloc(pKeys->fileData, offset));
- pKeys->fileLen = offset;
+ /*
+ * Although it is tempting to reduce our allocation now that we know where the
+ * end of the key section is, there is a pitfall. The method names and
+ * signatures in the method list contain pointers into the fileData area.
+ * Realloc or free will result in corruption.
+ */
+
/* Leave fp pointing to the beginning of the data section. */
fseek(fp, offset, SEEK_SET);
@@ -2607,7 +2611,7 @@
if (gOptions.graphFileName != nullptr) {
createInclusiveProfileGraphNew(dataKeys);
}
- free(methods);
+ delete[] methods;
}
freeDataKeys(dataKeys);
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 38b6ea6..dd2cc31 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -243,48 +243,6 @@
"org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"]
},
{
- description: "libnativehelper_compat_libc++ loading issue",
- result: EXEC_FAILED,
- modes: [device],
- names: ["dalvik.system.JniTest#testGetSuperclass",
- "dalvik.system.JniTest#testPassingBooleans",
- "dalvik.system.JniTest#testPassingBytes",
- "dalvik.system.JniTest#testPassingChars",
- "dalvik.system.JniTest#testPassingClass",
- "dalvik.system.JniTest#testPassingDoubles",
- "dalvik.system.JniTest#testPassingFloats",
- "dalvik.system.JniTest#testPassingInts",
- "dalvik.system.JniTest#testPassingLongs",
- "dalvik.system.JniTest#testPassingObjectReferences",
- "dalvik.system.JniTest#testPassingShorts",
- "dalvik.system.JniTest#testPassingThis",
- "libcore.util.NativeAllocationRegistryTest#testBadSize",
- "libcore.util.NativeAllocationRegistryTest#testEarlyFree",
- "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndNoSharedRegistry",
- "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndSharedRegistry",
- "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndNoSharedRegistry",
- "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndSharedRegistry",
- "libcore.util.NativeAllocationRegistryTest#testNullArguments"]
-},
-{
- description: "libnativehelper_compat_libc++.so not found by dlopen on ARM64",
- result: EXEC_FAILED,
- modes: [device],
- bug: 28082914,
- names: ["libcore.java.lang.ThreadTest#testContextClassLoaderIsInherited",
- "libcore.java.lang.ThreadTest#testContextClassLoaderIsNotNull",
- "libcore.java.lang.ThreadTest#testGetAllStackTracesIncludesAllGroups",
- "libcore.java.lang.ThreadTest#testGetStackTrace",
- "libcore.java.lang.ThreadTest#testJavaContextClassLoader",
- "libcore.java.lang.ThreadTest#testLeakingStartedThreads",
- "libcore.java.lang.ThreadTest#testLeakingUnstartedThreads",
- "libcore.java.lang.ThreadTest#testNativeThreadNames",
- "libcore.java.lang.ThreadTest#testThreadInterrupted",
- "libcore.java.lang.ThreadTest#testThreadSleep",
- "libcore.java.lang.ThreadTest#testThreadSleepIllegalArguments",
- "libcore.java.lang.ThreadTest#testThreadWakeup"]
-},
-{
description: "Only work with --mode=activity",
result: EXEC_FAILED,
names: [ "libcore.java.io.FileTest#testJavaIoTmpdirMutable" ]
@@ -295,5 +253,11 @@
names: ["jsr166.CollectionTest#testEmptyMeansEmpty",
"jsr166.Collection8Test#testForEach",
"jsr166.Collection8Test#testForEachConcurrentStressTest"]
+},
+{
+ description: "Unclear why this started to fail",
+ result: EXEC_FAILED,
+ bug: 28574453,
+ names: [ "org.apache.harmony.tests.javax.security.cert.X509CertificateTest#testVerifyPublicKey" ]
}
]
diff --git a/tools/public.libraries.buildbot.txt b/tools/public.libraries.buildbot.txt
new file mode 100644
index 0000000..4b01796
--- /dev/null
+++ b/tools/public.libraries.buildbot.txt
@@ -0,0 +1,8 @@
+libart.so
+libartd.so
+libbacktrace.so
+libc.so
+libc++.so
+libdl.so
+libm.so
+libnativehelper.so