Merge "Allow generation of native debug info for multiple methods."
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 02bce41..fd84d05 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -335,7 +335,6 @@
art_debug_cflags := \
$(ART_DEBUG_OPT_FLAG) \
-DDYNAMIC_ANNOTATIONS_ENABLED=1 \
- -DVIXL_DEBUG \
-UNDEBUG
art_host_non_debug_cflags := $(art_non_debug_cflags)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index b3832ac..fda4f5d 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -573,7 +573,7 @@
ifeq ($$(art_target_or_host),target)
$$(eval $$(call set-target-local-clang-vars))
$$(eval $$(call set-target-local-cflags-vars,debug))
- LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixld
+ LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixl
LOCAL_MODULE_PATH_32 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_32)
LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64)
LOCAL_MULTILIB := both
@@ -611,7 +611,7 @@
LOCAL_CLANG := $$(ART_HOST_CLANG)
LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS)
LOCAL_ASFLAGS += $$(ART_HOST_ASFLAGS)
- LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixld
+ LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixl
LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -lpthread -ldl
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 7a257b6..11ee6dd 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -330,9 +330,9 @@
# Vixl assembly support for ARM64 targets.
ifeq ($$(art_ndebug_or_debug),debug)
ifeq ($$(art_static_or_shared), static)
- LOCAL_WHOLESTATIC_LIBRARIES += libvixld
+ LOCAL_WHOLESTATIC_LIBRARIES += libvixl
else
- LOCAL_SHARED_LIBRARIES += libvixld
+ LOCAL_SHARED_LIBRARIES += libvixl
endif
else
ifeq ($$(art_static_or_shared), static)
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 239bc59..6075cd6 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -187,7 +187,9 @@
}
}
-void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa) {
+void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind,
+ InstructionSet isa,
+ size_t number_of_threads) {
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
method_inliner_map_.get(),
@@ -198,7 +200,7 @@
GetImageClasses(),
GetCompiledClasses(),
GetCompiledMethods(),
- /* thread_count */ 2,
+ number_of_threads,
/* dump_stats */ true,
/* dump_passes */ true,
timer_.get(),
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 7e0fbab..7c2c844 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -93,7 +93,7 @@
const char* method_name, const char* signature)
SHARED_REQUIRES(Locks::mutator_lock_);
- void CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa);
+ void CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa, size_t number_of_threads = 2U);
void ReserveImageSpace();
@@ -122,6 +122,13 @@
return; \
}
+// TODO: When read barrier works with all tests, get rid of this.
+#define TEST_DISABLED_FOR_READ_BARRIER() \
+ if (kUseReadBarrier) { \
+ printf("WARNING: TEST DISABLED FOR READ BARRIER\n"); \
+ return; \
+ }
+
// TODO: When read barrier works with all compilers in use, get rid of this.
#define TEST_DISABLED_FOR_READ_BARRIER_WITH_QUICK() \
if (kUseReadBarrier && GetCompilerKind() == Compiler::kQuick) { \
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 8f5d3ae..48c4356 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -605,6 +605,13 @@
INTRINSIC(JavaLangString, IndexOf, I_I, kIntrinsicIndexOf, kIntrinsicFlagBase0),
INTRINSIC(JavaLangString, Length, _I, kIntrinsicIsEmptyOrLength, kIntrinsicFlagLength),
+ INTRINSIC(JavaLangStringFactory, NewStringFromBytes, ByteArrayIII_String,
+ kIntrinsicNewStringFromBytes, kIntrinsicFlagNone),
+ INTRINSIC(JavaLangStringFactory, NewStringFromChars, IICharArray_String,
+ kIntrinsicNewStringFromChars, kIntrinsicFlagNone),
+ INTRINSIC(JavaLangStringFactory, NewStringFromString, String_String,
+ kIntrinsicNewStringFromString, kIntrinsicFlagNone),
+
INTRINSIC(JavaLangThread, CurrentThread, _Thread, kIntrinsicCurrentThread, 0),
INTRINSIC(LibcoreIoMemory, PeekByte, J_B, kIntrinsicPeek, kSignedByte),
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index a220959..4db82a6 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -159,10 +159,16 @@
size_t GetInlineDepthLimit() const {
return inline_depth_limit_;
}
+ void SetInlineDepthLimit(size_t limit) {
+ inline_depth_limit_ = limit;
+ }
size_t GetInlineMaxCodeUnits() const {
return inline_max_code_units_;
}
+ void SetInlineMaxCodeUnits(size_t units) {
+ inline_max_code_units_ = units;
+ }
double GetTopKProfileThreshold() const {
return top_k_profile_threshold_;
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 42f4e41..ef44a6f 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -39,6 +39,7 @@
// .rodata - DEX files and oat metadata.
// .text - Compiled code.
// .bss - Zero-initialized writeable section.
+// .MIPS.abiflags - MIPS specific section.
// .dynstr - Names for .dynsym.
// .dynsym - A few oat-specific dynamic symbols.
// .hash - Hash-table for .dynsym.
@@ -388,6 +389,75 @@
}
};
+ class AbiflagsSection FINAL : public Section {
+ public:
+ // Section with Mips abiflag info.
+ static constexpr uint8_t MIPS_AFL_REG_NONE = 0; // no registers
+ static constexpr uint8_t MIPS_AFL_REG_32 = 1; // 32-bit registers
+ static constexpr uint8_t MIPS_AFL_REG_64 = 2; // 64-bit registers
+ static constexpr uint32_t MIPS_AFL_FLAGS1_ODDSPREG = 1; // Uses odd single-prec fp regs
+ static constexpr uint8_t MIPS_ABI_FP_DOUBLE = 1; // -mdouble-float
+ static constexpr uint8_t MIPS_ABI_FP_XX = 5; // -mfpxx
+ static constexpr uint8_t MIPS_ABI_FP_64A = 7; // -mips32r* -mfp64 -mno-odd-spreg
+
+ AbiflagsSection(ElfBuilder<ElfTypes>* owner,
+ const std::string& name,
+ Elf_Word type,
+ Elf_Word flags,
+ const Section* link,
+ Elf_Word info,
+ Elf_Word align,
+ Elf_Word entsize,
+ InstructionSet isa,
+ const InstructionSetFeatures* features)
+ : Section(owner, name, type, flags, link, info, align, entsize) {
+ if (isa == kMips || isa == kMips64) {
+ bool fpu32 = false; // assume mips64 values
+ uint8_t isa_rev = 6; // assume mips64 values
+ if (isa == kMips) {
+ // adjust for mips32 values
+ fpu32 = features->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint();
+ isa_rev = features->AsMipsInstructionSetFeatures()->IsR6()
+ ? 6
+ : features->AsMipsInstructionSetFeatures()->IsMipsIsaRevGreaterThanEqual2()
+ ? (fpu32 ? 2 : 5)
+ : 1;
+ }
+ abiflags_.version = 0; // version of flags structure
+ abiflags_.isa_level = (isa == kMips) ? 32 : 64;
+ abiflags_.isa_rev = isa_rev;
+ abiflags_.gpr_size = (isa == kMips) ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64;
+ abiflags_.cpr1_size = fpu32 ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64;
+ abiflags_.cpr2_size = MIPS_AFL_REG_NONE;
+ // Set the fp_abi to MIPS_ABI_FP_64A for mips32 with 64-bit FPUs (ie: mips32 R5 and R6).
+ // Otherwise set to MIPS_ABI_FP_DOUBLE.
+ abiflags_.fp_abi = (isa == kMips && !fpu32) ? MIPS_ABI_FP_64A : MIPS_ABI_FP_DOUBLE;
+ abiflags_.isa_ext = 0;
+ abiflags_.ases = 0;
+ // To keep the code simple, we are not using odd FP reg for single floats for both
+ // mips32 and mips64 ART. Therefore we are not setting the MIPS_AFL_FLAGS1_ODDSPREG bit.
+ abiflags_.flags1 = 0;
+ abiflags_.flags2 = 0;
+ }
+ }
+
+ Elf_Word GetSize() const {
+ return sizeof(abiflags_);
+ }
+
+ void Write() {
+ this->WriteFully(&abiflags_, sizeof(abiflags_));
+ }
+
+ private:
+ struct {
+ uint16_t version; // version of this structure
+ uint8_t isa_level, isa_rev, gpr_size, cpr1_size, cpr2_size;
+ uint8_t fp_abi;
+ uint32_t isa_ext, ases, flags1, flags2;
+ } abiflags_;
+ };
+
ElfBuilder(InstructionSet isa, const InstructionSetFeatures* features, OutputStream* output)
: isa_(isa),
features_(features),
@@ -407,6 +477,8 @@
debug_info_(this, ".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0),
debug_line_(this, ".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0),
shstrtab_(this, ".shstrtab", 0, 1),
+ abiflags_(this, ".MIPS.abiflags", SHT_MIPS_ABIFLAGS, SHF_ALLOC, nullptr, 0, kPageSize, 0,
+ isa, features),
started_(false),
write_program_headers_(false),
loaded_size_(0u),
@@ -416,6 +488,7 @@
dynamic_.phdr_flags_ = PF_R | PF_W;
dynamic_.phdr_type_ = PT_DYNAMIC;
eh_frame_hdr_.phdr_type_ = PT_GNU_EH_FRAME;
+ abiflags_.phdr_type_ = PT_MIPS_ABIFLAGS;
}
~ElfBuilder() {}
@@ -517,7 +590,7 @@
stream_.Flush();
// The main ELF header.
- Elf_Ehdr elf_header = MakeElfHeader(isa_);
+ Elf_Ehdr elf_header = MakeElfHeader(isa_, features_);
elf_header.e_shoff = section_headers_offset;
elf_header.e_shnum = shdrs.size();
elf_header.e_shstrndx = shstrtab_.GetSectionIndex();
@@ -561,7 +634,12 @@
Elf_Word rodata_address = rodata_.GetAddress();
Elf_Word text_address = RoundUp(rodata_address + rodata_size, kPageSize);
Elf_Word bss_address = RoundUp(text_address + text_size, kPageSize);
- Elf_Word dynstr_address = RoundUp(bss_address + bss_size, kPageSize);
+ Elf_Word abiflags_address = RoundUp(bss_address + bss_size, kPageSize);
+ Elf_Word abiflags_size = 0;
+ if (isa_ == kMips || isa_ == kMips64) {
+ abiflags_size = abiflags_.GetSize();
+ }
+ Elf_Word dynstr_address = RoundUp(abiflags_address + abiflags_size, kPageSize);
// Cache .dynstr, .dynsym and .hash data.
dynstr_.Add(""); // dynstr should start with empty string.
@@ -646,6 +724,12 @@
return loaded_size_;
}
+ void WriteMIPSabiflagsSection() {
+ abiflags_.Start();
+ abiflags_.Write();
+ abiflags_.End();
+ }
+
// Returns true if all writes and seeks on the output stream succeeded.
bool Good() {
return stream_.Good();
@@ -665,7 +749,7 @@
}
private:
- static Elf_Ehdr MakeElfHeader(InstructionSet isa) {
+ static Elf_Ehdr MakeElfHeader(InstructionSet isa, const InstructionSetFeatures* features) {
Elf_Ehdr elf_header = Elf_Ehdr();
switch (isa) {
case kArm:
@@ -693,18 +777,20 @@
case kMips: {
elf_header.e_machine = EM_MIPS;
elf_header.e_flags = (EF_MIPS_NOREORDER |
- EF_MIPS_PIC |
- EF_MIPS_CPIC |
- EF_MIPS_ABI_O32 |
- EF_MIPS_ARCH_32R2);
+ EF_MIPS_PIC |
+ EF_MIPS_CPIC |
+ EF_MIPS_ABI_O32 |
+ features->AsMipsInstructionSetFeatures()->IsR6()
+ ? EF_MIPS_ARCH_32R6
+ : EF_MIPS_ARCH_32R2);
break;
}
case kMips64: {
elf_header.e_machine = EM_MIPS;
elf_header.e_flags = (EF_MIPS_NOREORDER |
- EF_MIPS_PIC |
- EF_MIPS_CPIC |
- EF_MIPS_ARCH_64R6);
+ EF_MIPS_PIC |
+ EF_MIPS_CPIC |
+ EF_MIPS_ARCH_64R6);
break;
}
case kNone: {
@@ -834,6 +920,7 @@
Section debug_info_;
Section debug_line_;
StringSection shstrtab_;
+ AbiflagsSection abiflags_;
std::vector<std::unique_ptr<Section>> other_sections_;
// List of used section in the order in which they were written.
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index e35662d..bed864b 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -210,6 +210,9 @@
if (bss_size_ != 0u) {
builder_->GetBss()->WriteNoBitsSection(bss_size_);
}
+ if (builder_->GetIsa() == kMips || builder_->GetIsa() == kMips64) {
+ builder_->WriteMIPSabiflagsSection();
+ }
builder_->WriteDynamicSection();
}
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 3b622b5..7779e44 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -24,6 +24,7 @@
#include "class_linker-inl.h"
#include "common_compiler_test.h"
#include "debug/method_debug_info.h"
+#include "driver/compiler_options.h"
#include "elf_writer.h"
#include "elf_writer_quick.h"
#include "gc/space/image_space.h"
@@ -48,8 +49,12 @@
};
void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) {
- // TODO: Test does not currently work with optimizing.
- CreateCompilerDriver(Compiler::kQuick, kRuntimeISA);
+ CreateCompilerDriver(Compiler::kOptimizing, kRuntimeISA, kIsTargetBuild ? 2U : 16U);
+
+ // Set inline filter values.
+ compiler_options_->SetInlineDepthLimit(CompilerOptions::kDefaultInlineDepthLimit);
+ compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
+
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// Enable write for dex2dex.
for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
@@ -283,14 +288,17 @@
}
TEST_F(ImageTest, WriteReadUncompressed) {
+ TEST_DISABLED_FOR_READ_BARRIER(); // b/27578460
TestWriteRead(ImageHeader::kStorageModeUncompressed);
}
TEST_F(ImageTest, WriteReadLZ4) {
+ TEST_DISABLED_FOR_READ_BARRIER(); // b/27578460
TestWriteRead(ImageHeader::kStorageModeLZ4);
}
TEST_F(ImageTest, WriteReadLZ4HC) {
+ TEST_DISABLED_FOR_READ_BARRIER(); // b/27578460
TestWriteRead(ImageHeader::kStorageModeLZ4HC);
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 871435b..b1b971f 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -266,17 +266,9 @@
<< PrettyDuration(NanoTime() - compress_start_time);
}
- // Write header first, as uncompressed.
- image_header->data_size_ = data_size;
- if (!image_file->WriteFully(image_info.image_->Begin(), sizeof(ImageHeader))) {
- PLOG(ERROR) << "Failed to write image file header " << image_filename;
- image_file->Erase();
- return false;
- }
-
// Write out the image + fields + methods.
const bool is_compressed = compressed_data != nullptr;
- if (!image_file->WriteFully(image_data_to_write, data_size)) {
+ if (!image_file->PwriteFully(image_data_to_write, data_size, sizeof(ImageHeader))) {
PLOG(ERROR) << "Failed to write image file data " << image_filename;
image_file->Erase();
return false;
@@ -291,13 +283,33 @@
if (!is_compressed) {
CHECK_EQ(bitmap_position_in_file, bitmap_section.Offset());
}
- if (!image_file->Write(reinterpret_cast<char*>(image_info.image_bitmap_->Begin()),
- bitmap_section.Size(),
- bitmap_position_in_file)) {
+ if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_bitmap_->Begin()),
+ bitmap_section.Size(),
+ bitmap_position_in_file)) {
PLOG(ERROR) << "Failed to write image file " << image_filename;
image_file->Erase();
return false;
}
+
+ int err = image_file->Flush();
+ if (err < 0) {
+ PLOG(ERROR) << "Failed to flush image file " << image_filename << " with result " << err;
+ image_file->Erase();
+ return false;
+ }
+
+ // Write header last in case the compiler gets killed in the middle of image writing.
+ // We do not want to have a corrupted image with a valid header.
+ // The header is uncompressed since it contains whether the image is compressed or not.
+ image_header->data_size_ = data_size;
+ if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_->Begin()),
+ sizeof(ImageHeader),
+ 0)) {
+ PLOG(ERROR) << "Failed to write image file header " << image_filename;
+ image_file->Erase();
+ return false;
+ }
+
CHECK_EQ(bitmap_position_in_file + bitmap_section.Size(),
static_cast<size_t>(image_file->GetLength()));
if (image_file->FlushCloseOrErase() != 0) {
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 9a77946..cda2e27 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -169,13 +169,14 @@
compiler_driver_->SetDedupeEnabled(false);
compiler_driver_->SetSupportBootImageFixup(false);
+ size_t thread_count = compiler_driver_->GetThreadCount();
if (compiler_options_->GetGenerateDebugInfo()) {
#ifdef __ANDROID__
const char* prefix = "/data/misc/trace";
#else
const char* prefix = "/tmp";
#endif
- DCHECK_EQ(compiler_driver_->GetThreadCount(), 1u)
+ DCHECK_EQ(thread_count, 1u)
<< "Generating debug info only works with one compiler thread";
std::string perf_filename = std::string(prefix) + "/perf-" + std::to_string(getpid()) + ".map";
perf_file_.reset(OS::CreateEmptyFileWriteOnly(perf_filename.c_str()));
@@ -184,6 +185,10 @@
" Are you on a user build? Perf only works on userdebug/eng builds";
}
}
+
+ size_t inline_depth_limit = compiler_driver_->GetCompilerOptions().GetInlineDepthLimit();
+ DCHECK_LT(thread_count * inline_depth_limit, std::numeric_limits<uint16_t>::max())
+ << "ProfilingInfo's inline counter can potentially overflow";
}
JitCompiler::~JitCompiler() {
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 967d156..af50363 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -543,8 +543,16 @@
DCHECK(CheckTypeConsistency(instruction));
LocationSummary* locations = instruction->GetLocations();
if (!instruction->IsSuspendCheckEntry()) {
- if (locations != nullptr && locations->CanCall()) {
- MarkNotLeaf();
+ if (locations != nullptr) {
+ if (locations->CanCall()) {
+ MarkNotLeaf();
+ } else if (locations->Intrinsified() &&
+ instruction->IsInvokeStaticOrDirect() &&
+ !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) {
+ // A static method call that has been fully intrinsified, and cannot call on the slow
+ // path or refer to the current method directly, no longer needs current method.
+ return;
+ }
}
if (instruction->NeedsCurrentMethod()) {
SetRequiresCurrentMethod();
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3e3719e..d861e39 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -28,6 +28,8 @@
#include "driver/dex_compilation_unit.h"
#include "instruction_simplifier.h"
#include "intrinsics.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "nodes.h"
@@ -220,6 +222,33 @@
return index;
}
+class ScopedProfilingInfoInlineUse {
+ public:
+ explicit ScopedProfilingInfoInlineUse(ArtMethod* method, Thread* self)
+ : method_(method),
+ self_(self),
+ // Fetch the profiling info ahead of using it. If it's null when fetching,
+ // we should not call JitCodeCache::DoneInlining.
+ profiling_info_(
+ Runtime::Current()->GetJit()->GetCodeCache()->NotifyCompilerUse(method, self)) {
+ }
+
+ ~ScopedProfilingInfoInlineUse() {
+ if (profiling_info_ != nullptr) {
+ size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ DCHECK_EQ(profiling_info_, method_->GetProfilingInfo(pointer_size));
+ Runtime::Current()->GetJit()->GetCodeCache()->DoneCompilerUse(method_, self_);
+ }
+ }
+
+ ProfilingInfo* GetProfilingInfo() const { return profiling_info_; }
+
+ private:
+ ArtMethod* const method_;
+ Thread* const self_;
+ ProfilingInfo* const profiling_info_;
+};
+
bool HInliner::TryInline(HInvoke* invoke_instruction) {
if (invoke_instruction->IsInvokeUnresolved()) {
return false; // Don't bother to move further if we know the method is unresolved.
@@ -271,30 +300,32 @@
// Check if we can use an inline cache.
ArtMethod* caller = graph_->GetArtMethod();
- size_t pointer_size = class_linker->GetImagePointerSize();
- // Under JIT, we should always know the caller.
- DCHECK(!Runtime::Current()->UseJit() || (caller != nullptr));
- if (caller != nullptr && caller->GetProfilingInfo(pointer_size) != nullptr) {
- ProfilingInfo* profiling_info = caller->GetProfilingInfo(pointer_size);
- const InlineCache& ic = *profiling_info->GetInlineCache(invoke_instruction->GetDexPc());
- if (ic.IsUnitialized()) {
- VLOG(compiler) << "Interface or virtual call to "
- << PrettyMethod(method_index, caller_dex_file)
- << " is not hit and not inlined";
- return false;
- } else if (ic.IsMonomorphic()) {
- MaybeRecordStat(kMonomorphicCall);
- return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic);
- } else if (ic.IsPolymorphic()) {
- MaybeRecordStat(kPolymorphicCall);
- return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic);
- } else {
- DCHECK(ic.IsMegamorphic());
- VLOG(compiler) << "Interface or virtual call to "
- << PrettyMethod(method_index, caller_dex_file)
- << " is megamorphic and not inlined";
- MaybeRecordStat(kMegamorphicCall);
- return false;
+ if (Runtime::Current()->UseJit()) {
+ // Under JIT, we should always know the caller.
+ DCHECK(caller != nullptr);
+ ScopedProfilingInfoInlineUse spiis(caller, soa.Self());
+ ProfilingInfo* profiling_info = spiis.GetProfilingInfo();
+ if (profiling_info != nullptr) {
+ const InlineCache& ic = *profiling_info->GetInlineCache(invoke_instruction->GetDexPc());
+ if (ic.IsUninitialized()) {
+ VLOG(compiler) << "Interface or virtual call to "
+ << PrettyMethod(method_index, caller_dex_file)
+ << " is not hit and not inlined";
+ return false;
+ } else if (ic.IsMonomorphic()) {
+ MaybeRecordStat(kMonomorphicCall);
+ return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic);
+ } else if (ic.IsPolymorphic()) {
+ MaybeRecordStat(kPolymorphicCall);
+ return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic);
+ } else {
+ DCHECK(ic.IsMegamorphic());
+ VLOG(compiler) << "Interface or virtual call to "
+ << PrettyMethod(method_index, caller_dex_file)
+ << " is megamorphic and not inlined";
+ MaybeRecordStat(kMegamorphicCall);
+ return false;
+ }
}
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index f8a9a94..b95ece5 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -94,6 +94,7 @@
void SimplifyCompare(HInvoke* invoke, bool has_zero_op);
void SimplifyIsNaN(HInvoke* invoke);
void SimplifyFP2Int(HInvoke* invoke);
+ void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind);
OptimizingCompilerStats* stats_;
bool simplification_occurred_ = false;
@@ -1594,6 +1595,12 @@
invoke->ReplaceWithExceptInReplacementAtIndex(select, 0); // false at index 0
}
+void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) {
+ uint32_t dex_pc = invoke->GetDexPc();
+ HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc);
+ invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, mem_barrier);
+}
+
void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
switch (instruction->GetIntrinsic()) {
case Intrinsics::kStringEquals:
@@ -1626,6 +1633,15 @@
case Intrinsics::kDoubleDoubleToLongBits:
SimplifyFP2Int(instruction);
break;
+ case Intrinsics::kUnsafeLoadFence:
+ SimplifyMemBarrier(instruction, MemBarrierKind::kLoadAny);
+ break;
+ case Intrinsics::kUnsafeStoreFence:
+ SimplifyMemBarrier(instruction, MemBarrierKind::kAnyStore);
+ break;
+ case Intrinsics::kUnsafeFullFence:
+ SimplifyMemBarrier(instruction, MemBarrierKind::kAnyAny);
+ break;
default:
break;
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 0cec5cc..3da8285 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -231,7 +231,10 @@
UNREACHABLE_INTRINSIC(Arch, IntegerCompare) \
UNREACHABLE_INTRINSIC(Arch, LongCompare) \
UNREACHABLE_INTRINSIC(Arch, IntegerSignum) \
-UNREACHABLE_INTRINSIC(Arch, LongSignum)
+UNREACHABLE_INTRINSIC(Arch, LongSignum) \
+UNREACHABLE_INTRINSIC(Arch, UnsafeLoadFence) \
+UNREACHABLE_INTRINSIC(Arch, UnsafeStoreFence) \
+UNREACHABLE_INTRINSIC(Arch, UnsafeFullFence)
} // namespace art
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index b599d42..4b94c94 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1224,8 +1224,9 @@
__ LoadFromOffset(
kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromBytes).Int32Value());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
__ blx(LR);
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -1251,8 +1252,9 @@
// all include a null check on `data` before calling that method.
__ LoadFromOffset(
kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromChars).Int32Value());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
__ blx(LR);
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void IntrinsicLocationsBuilderARM::VisitStringNewStringFromString(HInvoke* invoke) {
@@ -1276,8 +1278,9 @@
__ LoadFromOffset(kLoadWord,
LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromString).Int32Value());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
__ blx(LR);
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -2008,9 +2011,6 @@
UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndSetInt)
UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(ARM, UnsafeLoadFence)
-UNIMPLEMENTED_INTRINSIC(ARM, UnsafeStoreFence)
-UNIMPLEMENTED_INTRINSIC(ARM, UnsafeFullFence)
UNREACHABLE_INTRINSICS(ARM)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index ccbbd43..5de2306 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1409,8 +1409,9 @@
__ Ldr(lr,
MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromBytes).Int32Value()));
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
__ Blr(lr);
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -1436,19 +1437,17 @@
// all include a null check on `data` before calling that method.
__ Ldr(lr,
MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromChars).Int32Value()));
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
__ Blr(lr);
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromString(HInvoke* invoke) {
- // The inputs plus one temp.
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kCall,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -1464,8 +1463,9 @@
__ Ldr(lr,
MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromString).Int32Value()));
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
__ Blr(lr);
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -1959,9 +1959,6 @@
UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndSetInt)
UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeLoadFence)
-UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeStoreFence)
-UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeFullFence)
UNREACHABLE_INTRINSICS(ARM64)
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 697b8fe..f1a6e3d 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1457,6 +1457,24 @@
}
}
+// Thread java.lang.Thread.currentThread()
+void IntrinsicLocationsBuilderMIPS::VisitThreadCurrentThread(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitThreadCurrentThread(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ Register out = invoke->GetLocations()->Out().AsRegister<Register>();
+
+ __ LoadFromOffset(kLoadWord,
+ out,
+ TR,
+ Thread::PeerOffset<kMipsPointerSize>().Int32Value());
+}
+
// char java.lang.String.charAt(int index)
void IntrinsicLocationsBuilderMIPS::VisitStringCharAt(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
@@ -1464,7 +1482,9 @@
kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
+ // The inputs will be considered live at the last instruction and restored. This will overwrite
+ // the output with kNoOutputOverlap.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitStringCharAt(HInvoke* invoke) {
@@ -1503,6 +1523,40 @@
__ Bind(slow_path->GetExitLabel());
}
+// int java.lang.String.compareTo(String anotherString)
+void IntrinsicLocationsBuilderMIPS::VisitStringCompareTo(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>()));
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitStringCompareTo(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ Register argument = locations->InAt(1).AsRegister<Register>();
+ SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqz(argument, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadWord,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMipsWordSize,
+ pStringCompareTo).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
// boolean java.lang.String.equals(Object anObject)
void IntrinsicLocationsBuilderMIPS::VisitStringEquals(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
@@ -1605,6 +1659,211 @@
__ Bind(&end);
}
+static void GenerateStringIndexOf(HInvoke* invoke,
+ bool start_at_zero,
+ MipsAssembler* assembler,
+ CodeGeneratorMIPS* codegen,
+ ArenaAllocator* allocator) {
+ LocationSummary* locations = invoke->GetLocations();
+ Register tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<Register>() : TMP;
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ // Check for code points > 0xFFFF. Either a slow-path check when we
+ // don't know statically, or directly dispatch if we have a constant.
+ SlowPathCodeMIPS* slow_path = nullptr;
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ if (!IsUint<16>(invoke->InputAt(1)->AsIntConstant()->GetValue())) {
+ // Always needs the slow-path. We could directly dispatch to it,
+ // but this case should be rare, so for simplicity just put the
+ // full slow-path down and branch unconditionally.
+ slow_path = new (allocator) IntrinsicSlowPathMIPS(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
+ } else {
+ Register char_reg = locations->InAt(1).AsRegister<Register>();
+ // The "bltu" conditional branch tests to see if the character value
+ // fits in a valid 16-bit (MIPS halfword) value. If it doesn't then
+ // the character being searched for, if it exists in the string, is
+ // encoded using UTF-16 and stored in the string as two (16-bit)
+ // halfwords. Currently the assembly code used to implement this
+ // intrinsic doesn't support searching for a character stored as
+ // two halfwords so we fallback to using the generic implementation
+ // of indexOf().
+ __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
+ slow_path = new (allocator) IntrinsicSlowPathMIPS(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ Bltu(tmp_reg, char_reg, slow_path->GetEntryLabel());
+ }
+
+ if (start_at_zero) {
+ DCHECK_EQ(tmp_reg, A2);
+ // Start-index = 0.
+ __ Clear(tmp_reg);
+ }
+
+ __ LoadFromOffset(kLoadWord,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pIndexOf).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+}
+
+// int java.lang.String.indexOf(int ch)
+void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime
+ // calling convention. So it's best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>()));
+
+ // Need a temp for slow-path codepoint compare, and need to send start-index=0.
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke,
+ /* start_at_zero */ true,
+ GetAssembler(),
+ codegen_,
+ GetAllocator());
+}
+
+// int java.lang.String.indexOf(int ch, int fromIndex)
+void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime
+ // calling convention. So it's best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>()));
+
+ // Need a temp for slow-path codepoint compare.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke,
+ /* start_at_zero */ false,
+ GetAssembler(),
+ codegen_,
+ GetAllocator());
+}
+
+// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
+void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>()));
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register byte_array = locations->InAt(0).AsRegister<Register>();
+ SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqz(byte_array, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadWord,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromBytes).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>()));
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromChars(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+
+ // No need to emit code checking whether `locations->InAt(2)` is a null
+ // pointer, as callers of the native method
+ //
+ // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+ //
+ // all include a null check on `data` before calling that method.
+
+ __ LoadFromOffset(kLoadWord,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromChars).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+// java.lang.StringFactory.newStringFromString(String toCopy)
+void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromString(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>()));
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromString(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register string_to_copy = locations->InAt(0).AsRegister<Register>();
+ SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqz(string_to_copy, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadWord,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromString).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Bind(slow_path->GetExitLabel());
+}
+
static void GenIsInfinite(LocationSummary* locations,
const Primitive::Type type,
const bool isR6,
@@ -1783,7 +2042,6 @@
UNIMPLEMENTED_INTRINSIC(MIPS, MathRint)
UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(MIPS, ThreadCurrentThread)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGet)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetVolatile)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLong)
@@ -1802,12 +2060,6 @@
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASInt)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASObject)
-UNIMPLEMENTED_INTRINSIC(MIPS, StringCompareTo)
-UNIMPLEMENTED_INTRINSIC(MIPS, StringIndexOf)
-UNIMPLEMENTED_INTRINSIC(MIPS, StringIndexOfAfter)
-UNIMPLEMENTED_INTRINSIC(MIPS, StringNewStringFromBytes)
-UNIMPLEMENTED_INTRINSIC(MIPS, StringNewStringFromChars)
-UNIMPLEMENTED_INTRINSIC(MIPS, StringNewStringFromString)
UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(MIPS, StringGetCharsNoCheck)
@@ -1838,9 +2090,6 @@
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetInt)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeLoadFence)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeStoreFence)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeFullFence)
UNREACHABLE_INTRINSICS(MIPS)
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 83dff33..5ec5b86 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1503,9 +1503,6 @@
DCHECK_EQ(tmp_reg, A2);
// Start-index = 0.
__ Clear(tmp_reg);
- } else {
- __ Slt(TMP, A2, ZERO); // if fromIndex < 0
- __ Seleqz(A2, A2, TMP); // fromIndex = 0
}
__ LoadFromOffset(kLoadDoubleword,
@@ -1590,9 +1587,10 @@
TR,
QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize,
pAllocStringFromBytes).Int32Value());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
__ Jalr(TMP);
__ Nop();
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -1623,20 +1621,19 @@
TR,
QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize,
pAllocStringFromChars).Int32Value());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
__ Jalr(TMP);
__ Nop();
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
-// java.lang.String.String(String original)
+// java.lang.StringFactory.newStringFromString(String toCopy)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kCall,
kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
}
@@ -1655,9 +1652,10 @@
TR,
QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize,
pAllocStringFromString).Int32Value());
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
__ Jalr(TMP);
__ Nop();
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -1735,9 +1733,6 @@
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetInt)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeLoadFence)
-UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeStoreFence)
-UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeFullFence)
UNREACHABLE_INTRINSICS(MIPS64)
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 048590e..95fdb9b 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1546,6 +1546,7 @@
__ j(kEqual, slow_path->GetEntryLabel());
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromBytes)));
+ CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -1571,6 +1572,7 @@
//
// all include a null check on `data` before calling that method.
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromChars)));
+ CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -1594,6 +1596,7 @@
__ j(kEqual, slow_path->GetEntryLabel());
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromString)));
+ CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -2643,9 +2646,6 @@
UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndSetInt)
UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(X86, UnsafeLoadFence)
-UNIMPLEMENTED_INTRINSIC(X86, UnsafeStoreFence)
-UNIMPLEMENTED_INTRINSIC(X86, UnsafeFullFence)
UNREACHABLE_INTRINSICS(X86)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 35e13a6..9e568f7 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1641,6 +1641,7 @@
__ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromBytes),
/* no_rip */ true));
+ CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -1667,6 +1668,7 @@
// all include a null check on `data` before calling that method.
__ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromChars),
/* no_rip */ true));
+ CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -1691,6 +1693,7 @@
__ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromString),
/* no_rip */ true));
+ CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -2721,9 +2724,6 @@
UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndSetInt)
UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeLoadFence)
-UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeStoreFence)
-UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeFullFence)
UNREACHABLE_INTRINSICS(X86_64)
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 64bb919..cc1a806 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -20,7 +20,7 @@
#include <memory>
#include <stdint.h>
-#ifdef ART_ENABLE_CODEGEN_arm64
+#ifdef ART_ENABLE_CODEGEN_arm
#include "dex_cache_array_fixups_arm.h"
#endif
@@ -431,6 +431,7 @@
static void RunArchOptimizations(InstructionSet instruction_set,
HGraph* graph,
+ CodeGenerator* codegen,
OptimizingCompilerStats* stats,
PassObserver* pass_observer) {
ArenaAllocator* arena = graph->GetArena();
@@ -466,7 +467,8 @@
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86: {
- x86::PcRelativeFixups* pc_relative_fixups = new (arena) x86::PcRelativeFixups(graph, stats);
+ x86::PcRelativeFixups* pc_relative_fixups =
+ new (arena) x86::PcRelativeFixups(graph, codegen, stats);
HOptimization* x86_optimizations[] = {
pc_relative_fixups
};
@@ -561,7 +563,7 @@
};
RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
- RunArchOptimizations(driver->GetInstructionSet(), graph, stats, pass_observer);
+ RunArchOptimizations(driver->GetInstructionSet(), graph, codegen, stats, pass_observer);
AllocateRegisters(graph, codegen, pass_observer);
}
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index a6f1461..d281a9f 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -16,6 +16,7 @@
#include "pc_relative_fixups_x86.h"
#include "code_generator_x86.h"
+#include "intrinsics_x86.h"
namespace art {
namespace x86 {
@@ -25,7 +26,10 @@
*/
class PCRelativeHandlerVisitor : public HGraphVisitor {
public:
- explicit PCRelativeHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
+ PCRelativeHandlerVisitor(HGraph* graph, CodeGenerator* codegen)
+ : HGraphVisitor(graph),
+ codegen_(down_cast<CodeGeneratorX86*>(codegen)),
+ base_(nullptr) {}
void MoveBaseIfNeeded() {
if (base_ != nullptr) {
@@ -146,7 +150,6 @@
if (base_ != nullptr) {
return;
}
-
// Insert the base at the start of the entry block, move it to a better
// position later in MoveBaseIfNeeded().
base_ = new (GetGraph()->GetArena()) HX86ComputeBaseMethodAddress();
@@ -180,7 +183,9 @@
}
bool base_added = false;
- if (invoke_static_or_direct != nullptr && invoke_static_or_direct->HasPcRelativeDexCache()) {
+ if (invoke_static_or_direct != nullptr &&
+ invoke_static_or_direct->HasPcRelativeDexCache() &&
+ !WillHaveCallFreeIntrinsicsCodeGen(invoke)) {
InitializePCRelativeBasePointer();
// Add the extra parameter base_.
invoke_static_or_direct->AddSpecialInput(base_);
@@ -215,6 +220,24 @@
}
}
+ bool WillHaveCallFreeIntrinsicsCodeGen(HInvoke* invoke) {
+ if (invoke->GetIntrinsic() != Intrinsics::kNone) {
+ // This invoke may have intrinsic code generation defined. However, we must
+ // now also determine if this code generation is truly there and call-free
+ // (not unimplemented, no bail on instruction features, or call on slow path).
+ // This is done by actually calling the locations builder on the instruction
+ // and clearing out the locations once result is known. We assume this
+ // call only has creating locations as side effects!
+ IntrinsicLocationsBuilderX86 builder(codegen_);
+ bool success = builder.TryDispatch(invoke) && !invoke->GetLocations()->CanCall();
+ invoke->SetLocations(nullptr);
+ return success;
+ }
+ return false;
+ }
+
+ CodeGeneratorX86* codegen_;
+
// The generated HX86ComputeBaseMethodAddress in the entry block needed as an
// input to the HX86LoadFromConstantTable instructions.
HX86ComputeBaseMethodAddress* base_;
@@ -226,7 +249,7 @@
// that can be live-in at the irreducible loop header.
return;
}
- PCRelativeHandlerVisitor visitor(graph_);
+ PCRelativeHandlerVisitor visitor(graph_, codegen_);
visitor.VisitInsertionOrder();
visitor.MoveBaseIfNeeded();
}
diff --git a/compiler/optimizing/pc_relative_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h
index af708ac..03de2fc 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.h
+++ b/compiler/optimizing/pc_relative_fixups_x86.h
@@ -21,14 +21,21 @@
#include "optimization.h"
namespace art {
+
+class CodeGenerator;
+
namespace x86 {
class PcRelativeFixups : public HOptimization {
public:
- PcRelativeFixups(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, "pc_relative_fixups_x86", stats) {}
+ PcRelativeFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+ : HOptimization(graph, "pc_relative_fixups_x86", stats),
+ codegen_(codegen) {}
void Run() OVERRIDE;
+
+ private:
+ CodeGenerator* codegen_;
};
} // namespace x86
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index 039986c..bf563c7 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -89,7 +89,7 @@
LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
# For disassembler_arm64.
ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_SHARED_LIBRARIES += libvixld
+ LOCAL_SHARED_LIBRARIES += libvixl
else
LOCAL_SHARED_LIBRARIES += libvixl
endif
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 285803c..0e17fc2 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -148,6 +148,9 @@
bss->WriteNoBitsSection(oat_file_->BssSize());
}
+ if (isa == kMips || isa == kMips64) {
+ builder_->WriteMIPSabiflagsSection();
+ }
builder_->PrepareDynamicSection(
elf_file->GetPath(), rodata_size, text_size, oat_file_->BssSize());
builder_->WriteDynamicSection();
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index cfcef49..f33eebe 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -276,7 +276,7 @@
bl \entrypoint @ (field_idx, Object*, new_val, referrer, Thread*)
add sp, #16 @ release out args
.cfi_adjust_cfa_offset -16
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
\return
END \name
.endm
@@ -812,14 +812,23 @@
.macro FOUR_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
+ sub sp, #12 @ alignment padding
+ .cfi_adjust_cfa_offset 12
+ push {r3} @ Save r3 as is it used as a temp register in the
+ .cfi_adjust_cfa_offset 4 @ expansion of the SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ .cfi_rel_offset r3, 0 @ macro below, which clobbers its arguments.
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
+ ldr r3, [sp, 32] @ restore r3
+ .cfi_restore r3
+
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
- .pad #16
.cfi_adjust_cfa_offset 16
bl \entrypoint
add sp, #16 @ strip the extra frame
.cfi_adjust_cfa_offset -16
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ add sp, #16 @ pop r3 + padding
+ .cfi_adjust_cfa_offset -16
\return
END \name
.endm
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
index 51e224c..801f708 100644
--- a/runtime/arch/mips/asm_support_mips.S
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -129,4 +129,43 @@
#endif // USE_HEAP_POISONING
.endm
+// Based on contents of creg select the minimum integer
+// At the end of the macro the original value of creg is lost
+.macro MINint dreg,rreg,sreg,creg
+ .set push
+ .set noat
+#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
+ .ifc \dreg, \rreg
+ selnez \dreg, \rreg, \creg
+ seleqz \creg, \sreg, \creg
+ .else
+ seleqz \dreg, \sreg, \creg
+ selnez \creg, \rreg, \creg
+ .endif
+ or \dreg, \dreg, \creg
+#else
+ movn \dreg, \rreg, \creg
+ movz \dreg, \sreg, \creg
+#endif
+ .set pop
+.endm
+
+// Find minimum of two signed registers
+.macro MINs dreg,rreg,sreg
+ .set push
+ .set noat
+ slt $at, \rreg, \sreg
+ MINint \dreg, \rreg, \sreg, $at
+ .set pop
+.endm
+
+// Find minimum of two unsigned registers
+.macro MINu dreg,rreg,sreg
+ .set push
+ .set noat
+ sltu $at, \rreg, \sreg
+ MINint \dreg, \rreg, \sreg, $at
+ .set pop
+.endm
+
#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 699ab3e..6c7d510 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1416,7 +1416,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
jal artAllocObjectFromCodeRosAlloc
- move $a2 ,$s1 # Pass self as argument.
+ move $a2, $s1 # Pass self as argument.
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_rosalloc
@@ -1744,5 +1744,74 @@
nop
END art_quick_ushr_long
-UNIMPLEMENTED art_quick_indexof
-UNIMPLEMENTED art_quick_string_compareto
+/* java.lang.String.indexOf(int ch, int fromIndex=0) */
+ENTRY_NO_GP art_quick_indexof
+/* $a0 holds address of "this" */
+/* $a1 holds "ch" */
+/* $a2 holds "fromIndex" */
+ lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
+ slt $at, $a2, $zero # if fromIndex < 0
+#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
+ seleqz $a2, $a2, $at # fromIndex = 0;
+#else
+ movn $a2, $zero, $at # fromIndex = 0;
+#endif
+ subu $t0, $t0, $a2 # this.length() - fromIndex
+ blez $t0, 6f # if this.length()-fromIndex <= 0
+ li $v0, -1 # return -1;
+
+ sll $v0, $a2, 1 # $a0 += $a2 * 2
+ addu $a0, $a0, $v0 # " " " " "
+ move $v0, $a2 # Set i to fromIndex.
+
+1:
+ lhu $t3, MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
+ beq $t3, $a1, 6f # return i;
+ addu $a0, $a0, 2 # i++
+ subu $t0, $t0, 1 # this.length() - i
+ bnez $t0, 1b # while this.length() - i > 0
+ addu $v0, $v0, 1 # i++
+
+ li $v0, -1 # if this.length() - i <= 0
+ # return -1;
+
+6:
+ j $ra
+ nop
+END art_quick_indexof
+
+ .set push
+ .set noat
+/* java.lang.String.compareTo(String anotherString) */
+ENTRY_NO_GP art_quick_string_compareto
+/* $a0 holds address of "this" */
+/* $a1 holds address of "anotherString" */
+ beq $a0, $a1, 9f # this and anotherString are the same object
+ move $v0, $zero
+
+ lw $a2, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
+ lw $a3, MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
+ MINu $t2, $a2, $a3
+# $t2 now holds min(this.length(),anotherString.length())
+
+ beqz $t2, 9f # while min(this.length(),anotherString.length())-i != 0
+ subu $v0, $a2, $a3 # if $t2==0 return
+ # (this.length() - anotherString.length())
+1:
+ lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i)
+ lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
+ bne $t0, $t1, 9f # if this.charAt(i) != anotherString.charAt(i)
+ subu $v0, $t0, $t1 # return (this.charAt(i) - anotherString.charAt(i))
+ addiu $a0, $a0, 2 # point at this.charAt(i++)
+ subu $t2, $t2, 1 # new value of
+ # min(this.length(),anotherString.length())-i
+ bnez $t2, 1b
+ addiu $a1, $a1, 2 # point at anotherString.charAt(i++)
+ subu $v0, $a2, $a3
+
+9:
+ j $ra
+ nop
+END art_quick_string_compareto
+
+ .set pop
diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S
index b859c70..786e860 100644
--- a/runtime/arch/mips64/asm_support_mips64.S
+++ b/runtime/arch/mips64/asm_support_mips64.S
@@ -83,4 +83,38 @@
#endif // USE_HEAP_POISONING
.endm
+// Based on contents of creg select the minimum integer
+// At the end of the macro the original value of creg is lost
+.macro MINint dreg,rreg,sreg,creg
+ .set push
+ .set noat
+ .ifc \dreg, \rreg
+ selnez \dreg, \rreg, \creg
+ seleqz \creg, \sreg, \creg
+ .else
+ seleqz \dreg, \sreg, \creg
+ selnez \creg, \rreg, \creg
+ .endif
+ or \dreg, \dreg, \creg
+ .set pop
+.endm
+
+// Find minimum of two signed registers
+.macro MINs dreg,rreg,sreg
+ .set push
+ .set noat
+ slt $at, \rreg, \sreg
+ MINint \dreg, \rreg, \sreg, $at
+ .set pop
+.endm
+
+// Find minimum of two unsigned registers
+.macro MINu dreg,rreg,sreg
+ .set push
+ .set noat
+ sltu $at, \rreg, \sreg
+ MINint \dreg, \rreg, \sreg, $at
+ .set pop
+.endm
+
#endif // ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_S_
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index d264c9b..b4e2fcc 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1725,10 +1725,8 @@
lw $a2,MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
lw $a3,MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
- sltu $at,$a2,$a3
- seleqz $t2,$a3,$at
- selnez $at,$a2,$at
- or $t2,$t2,$at # $t2 now holds min(this.length(),anotherString.length())
+ MINu $t2, $a2, $a3
+# $t2 now holds min(this.length(),anotherString.length())
beqz $t2,9f # while min(this.length(),anotherString.length())-i != 0
subu $v0,$a2,$a3 # if $t2==0 return
@@ -1753,16 +1751,18 @@
/* java.lang.String.indexOf(int ch, int fromIndex=0) */
ENTRY_NO_GP art_quick_indexof
/* $a0 holds address of "this" */
-/* $a1 holds address of "ch" */
-/* $a2 holds address of "fromIndex" */
+/* $a1 holds "ch" */
+/* $a2 holds "fromIndex" */
lw $t0,MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
- subu $t0,$t0,$a2 # this.length() - offset
- blez $t0,6f # if this.length()-offset <= 0
+ slt $at, $a2, $zero # if fromIndex < 0
+ seleqz $a2, $a2, $at # fromIndex = 0;
+ subu $t0,$t0,$a2 # this.length() - fromIndex
+ blez $t0,6f # if this.length()-fromIndex <= 0
li $v0,-1 # return -1;
sll $v0,$a2,1 # $a0 += $a2 * 2
daddu $a0,$a0,$v0 # " " " " "
- move $v0,$a2 # Set i to offset.
+ move $v0,$a2 # Set i to fromIndex.
1:
lhu $t3,MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index d5807e2..4236c28 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1205,7 +1205,7 @@
TEST_F(StubTest, StringCompareTo) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
- (defined(__mips__) && defined(__LP64__)) || (defined(__x86_64__) && !defined(__APPLE__))
+ defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__))
// TODO: Check the "Unresolved" allocation stubs
Thread* self = Thread::Current();
@@ -2054,7 +2054,7 @@
}
TEST_F(StubTest, StringIndexOf) {
-#if defined(__arm__) || defined(__aarch64__) || (defined(__mips__) && defined(__LP64__))
+#if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index 77b8e87..3e47209 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -142,6 +142,10 @@
CFI_RESTORE(REG_VAR(reg))
END_MACRO
+MACRO1(CFI_RESTORE_REG, reg)
+ CFI_RESTORE(REG_VAR(reg))
+END_MACRO
+
#define UNREACHABLE int3
MACRO1(UNIMPLEMENTED,name)
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index fbee5d7..4be00ce 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -686,7 +686,15 @@
MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
+ subl MACRO_LITERAL(12), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(12)
+ PUSH ebx // Save ebx as the expansion of the
+ // SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ // macro below clobbers it.
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ movl 28(%esp), %ebx // restore ebx
+ CFI_RESTORE_REG ebx
+
// Outgoing argument set up
subl MACRO_LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
@@ -700,6 +708,8 @@
addl MACRO_LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ addl MACRO_LITERAL(16), %esp // pop ebx + padding
+ CFI_ADJUST_CFA_OFFSET(-16)
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 4672948..e4097dd 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -234,21 +234,34 @@
return ReadFullyGeneric<pread>(fd_, buffer, byte_count, offset);
}
-bool FdFile::WriteFully(const void* buffer, size_t byte_count) {
+template <bool kUseOffset>
+bool FdFile::WriteFullyGeneric(const void* buffer, size_t byte_count, size_t offset) {
DCHECK(!read_only_mode_);
- const char* ptr = static_cast<const char*>(buffer);
moveTo(GuardState::kBase, GuardState::kClosed, "Writing into closed file.");
+ DCHECK(kUseOffset || offset == 0u);
+ const char* ptr = static_cast<const char*>(buffer);
while (byte_count > 0) {
- ssize_t bytes_written = TEMP_FAILURE_RETRY(write(fd_, ptr, byte_count));
+ ssize_t bytes_written = kUseOffset
+ ? TEMP_FAILURE_RETRY(pwrite(fd_, ptr, byte_count, offset))
+ : TEMP_FAILURE_RETRY(write(fd_, ptr, byte_count));
if (bytes_written == -1) {
return false;
}
byte_count -= bytes_written; // Reduce the number of remaining bytes.
ptr += bytes_written; // Move the buffer forward.
+ offset += static_cast<size_t>(bytes_written);
}
return true;
}
+bool FdFile::PwriteFully(const void* buffer, size_t byte_count, size_t offset) {
+ return WriteFullyGeneric<true>(buffer, byte_count, offset);
+}
+
+bool FdFile::WriteFully(const void* buffer, size_t byte_count) {
+ return WriteFullyGeneric<false>(buffer, byte_count, 0u);
+}
+
bool FdFile::Copy(FdFile* input_file, int64_t offset, int64_t size) {
DCHECK(!read_only_mode_);
off_t off = static_cast<off_t>(offset);
diff --git a/runtime/base/unix_file/fd_file.h b/runtime/base/unix_file/fd_file.h
index 8040afe..16cd44f 100644
--- a/runtime/base/unix_file/fd_file.h
+++ b/runtime/base/unix_file/fd_file.h
@@ -79,6 +79,7 @@
bool ReadFully(void* buffer, size_t byte_count) WARN_UNUSED;
bool PreadFully(void* buffer, size_t byte_count, size_t offset) WARN_UNUSED;
bool WriteFully(const void* buffer, size_t byte_count) WARN_UNUSED;
+ bool PwriteFully(const void* buffer, size_t byte_count, size_t offset) WARN_UNUSED;
// Copy data from another file.
bool Copy(FdFile* input_file, int64_t offset, int64_t size);
@@ -119,6 +120,9 @@
GuardState guard_state_;
private:
+ template <bool kUseOffset>
+ bool WriteFullyGeneric(const void* buffer, size_t byte_count, size_t offset);
+
int fd_;
std::string file_path_;
bool auto_close_;
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index ecf607c..9bc87e5 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -110,6 +110,34 @@
ASSERT_EQ(file.Close(), 0);
}
+TEST_F(FdFileTest, ReadWriteFullyWithOffset) {
+ // New scratch file, zero-length.
+ art::ScratchFile tmp;
+ FdFile file;
+ ASSERT_TRUE(file.Open(tmp.GetFilename(), O_RDWR));
+ EXPECT_GE(file.Fd(), 0);
+ EXPECT_TRUE(file.IsOpened());
+
+ const char* test_string = "This is a test string";
+ size_t length = strlen(test_string) + 1;
+ const size_t offset = 12;
+ std::unique_ptr<char[]> offset_read_string(new char[length]);
+ std::unique_ptr<char[]> read_string(new char[length]);
+
+ // Write scratch data to file that we can read back into.
+ EXPECT_TRUE(file.PwriteFully(test_string, length, offset));
+ ASSERT_EQ(file.Flush(), 0);
+
+ // Test reading both the offsets.
+ EXPECT_TRUE(file.PreadFully(&offset_read_string[0], length, offset));
+ EXPECT_STREQ(test_string, &offset_read_string[0]);
+
+ EXPECT_TRUE(file.PreadFully(&read_string[0], length, 0u));
+ EXPECT_NE(memcmp(&read_string[0], test_string, length), 0);
+
+ ASSERT_EQ(file.Close(), 0);
+}
+
TEST_F(FdFileTest, Copy) {
art::ScratchFile src_tmp;
FdFile src;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 01d140a..d51a1f7 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -7714,7 +7714,10 @@
}
++num_resolved;
DCHECK(!klass->IsProxyClass());
- DCHECK(klass->IsResolved());
+ if (!klass->IsResolved()) {
+ DCHECK(klass->IsErroneous());
+ continue;
+ }
mirror::DexCache* klass_dex_cache = klass->GetDexCache();
if (klass_dex_cache == dex_cache) {
const size_t class_def_idx = klass->GetDexClassDefIndex();
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 3df9101..729957f 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -406,6 +406,7 @@
int rmdir_cache_result = rmdir(dalvik_cache_.c_str());
ASSERT_EQ(0, rmdir_cache_result);
TearDownAndroidData(android_data_, true);
+ dalvik_cache_.clear();
// icu4c has a fixed 10-element array "gCommonICUDataArray".
// If we run > 10 tests, we fill that array and u_setCommonData fails.
diff --git a/runtime/elf.h b/runtime/elf.h
index d1efc92..63b18c5 100644
--- a/runtime/elf.h
+++ b/runtime/elf.h
@@ -1284,6 +1284,7 @@
SHT_MIPS_REGINFO = 0x70000006, // Register usage information
SHT_MIPS_OPTIONS = 0x7000000d, // General options
+ SHT_MIPS_ABIFLAGS = 0x7000002a, // Abiflags options
SHT_HIPROC = 0x7fffffff, // Highest processor arch-specific type.
SHT_LOUSER = 0x80000000, // Lowest type reserved for applications.
@@ -1606,7 +1607,8 @@
// MIPS program header types.
PT_MIPS_REGINFO = 0x70000000, // Register usage information.
PT_MIPS_RTPROC = 0x70000001, // Runtime procedure table.
- PT_MIPS_OPTIONS = 0x70000002 // Options segment.
+ PT_MIPS_OPTIONS = 0x70000002, // Options segment.
+ PT_MIPS_ABIFLAGS = 0x70000003 // Abiflags segment.
};
// Segment flag bits.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index faa3d3b..2e5b599 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -59,6 +59,8 @@
#include "heap-inl.h"
#include "image.h"
#include "intern_table.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -2668,6 +2670,12 @@
// permanantly disabled. b/17942071
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
+
+ if ((gc_type == collector::kGcTypeFull) && runtime->UseJit()) {
+ // It's time to clear all inline caches, in case some classes can be unloaded.
+ runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self);
+ }
+
CHECK(collector != nullptr)
<< "Could not find garbage collector with collector_type="
<< static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index a4e5587..9ecd391 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1535,50 +1535,31 @@
// images[0] is f/c/d/e.art
// ----------------------------------------------
// images[1] is g/h/i/j.art -> /a/b/h/i/j.art
-
- // Derive pattern.
- std::vector<std::string> left;
- Split(input_image_file_name, '/', &left);
- std::vector<std::string> right;
- Split(images[0], '/', &right);
-
- size_t common = 1;
- while (common < left.size() && common < right.size()) {
- if (left[left.size() - common - 1] != right[right.size() - common - 1]) {
- break;
- }
- common++;
+ const std::string& first_image = images[0];
+ // Length of common suffix.
+ size_t common = 0;
+ while (common < input_image_file_name.size() &&
+ common < first_image.size() &&
+ *(input_image_file_name.end() - common - 1) == *(first_image.end() - common - 1)) {
+ ++common;
}
-
- std::vector<std::string> prefix_vector(left.begin(), left.end() - common);
- std::string common_prefix = Join(prefix_vector, '/');
- if (!common_prefix.empty() && common_prefix[0] != '/' && input_image_file_name[0] == '/') {
- common_prefix = "/" + common_prefix;
- }
+ // We want to replace the prefix of the input image with the prefix of the boot class path.
+ // This handles the case where the image file contains @ separators.
+ // Example image_file_name is oats/system@framework@boot.art
+ // images[0] is .../arm/boot.art
+ // means that the image name prefix will be oats/system@framework@
+ // so that the other images are openable.
+ const size_t old_prefix_length = first_image.size() - common;
+ const std::string new_prefix = input_image_file_name.substr(
+ 0,
+ input_image_file_name.size() - common);
// Apply pattern to images[1] .. images[n].
for (size_t i = 1; i < images.size(); ++i) {
- std::string image = images[i];
-
- size_t rslash = std::string::npos;
- for (size_t j = 0; j < common; ++j) {
- if (rslash != std::string::npos) {
- rslash--;
- }
-
- rslash = image.rfind('/', rslash);
- if (rslash == std::string::npos) {
- rslash = 0;
- }
- if (rslash == 0) {
- break;
- }
- }
- std::string image_part = image.substr(rslash);
-
- std::string new_image = common_prefix + (StartsWith(image_part, "/") ? "" : "/") +
- image_part;
- image_file_names->push_back(new_image);
+ const std::string& image = images[i];
+ CHECK_GT(image.length(), old_prefix_length);
+ std::string suffix = image.substr(old_prefix_length);
+ image_file_names->push_back(new_prefix + suffix);
}
}
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 7dbd89c..5bd9a6b 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -41,20 +41,42 @@
JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
auto* jit_options = new JitOptions;
jit_options->use_jit_ = options.GetOrDefault(RuntimeArgumentMap::UseJIT);
+
jit_options->code_cache_initial_capacity_ =
options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity);
jit_options->code_cache_max_capacity_ =
options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity);
- jit_options->compile_threshold_ =
- options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
- // TODO(ngeoffray): Make this a proper option.
- jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2;
- jit_options->warmup_threshold_ =
- options.GetOrDefault(RuntimeArgumentMap::JITWarmupThreshold);
jit_options->dump_info_on_shutdown_ =
options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
jit_options->save_profiling_info_ =
- options.GetOrDefault(RuntimeArgumentMap::JITSaveProfilingInfo);;
+ options.GetOrDefault(RuntimeArgumentMap::JITSaveProfilingInfo);
+
+ jit_options->compile_threshold_ = options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
+ if (jit_options->compile_threshold_ > std::numeric_limits<uint16_t>::max()) {
+ LOG(FATAL) << "Method compilation threshold is above its internal limit.";
+ }
+
+ if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) {
+ jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold);
+ if (jit_options->warmup_threshold_ > std::numeric_limits<uint16_t>::max()) {
+ LOG(FATAL) << "Method warmup threshold is above its internal limit.";
+ }
+ } else {
+ jit_options->warmup_threshold_ = jit_options->compile_threshold_ / 2;
+ }
+
+ if (options.Exists(RuntimeArgumentMap::JITOsrThreshold)) {
+ jit_options->osr_threshold_ = *options.Get(RuntimeArgumentMap::JITOsrThreshold);
+ if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) {
+ LOG(FATAL) << "Method on stack replacement threshold is above its internal limit.";
+ }
+ } else {
+ jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2;
+ if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) {
+ jit_options->osr_threshold_ = std::numeric_limits<uint16_t>::max();
+ }
+ }
+
return jit_options;
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index ee416d8..d5c2134 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -43,8 +43,7 @@
class Jit {
public:
static constexpr bool kStressMode = kIsDebugBuild;
- static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 500;
- static constexpr size_t kDefaultWarmupThreshold = kDefaultCompileThreshold / 2;
+ static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 10000;
virtual ~Jit();
static Jit* Create(JitOptions* options, std::string* error_msg);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 0b0f926..af47da6 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -297,6 +297,15 @@
}
}
+void JitCodeCache::ClearGcRootsInInlineCaches(Thread* self) {
+ MutexLock mu(self, lock_);
+ for (ProfilingInfo* info : profiling_infos_) {
+ if (!info->IsInUseByCompiler()) {
+ info->ClearGcRootsInInlineCaches();
+ }
+ }
+}
+
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
ArtMethod* method,
const uint8_t* mapping_table,
@@ -679,7 +688,7 @@
// Also remove the saved entry point from the ProfilingInfo objects.
for (ProfilingInfo* info : profiling_infos_) {
const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
- if (!ContainsPc(ptr) && !info->IsMethodBeingCompiled()) {
+ if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) {
info->GetMethod()->SetProfilingInfo(nullptr);
}
info->SetSavedEntryPoint(nullptr);
@@ -731,7 +740,7 @@
// code cache collection.
if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
// We clear the inline caches as classes in it might be stalled.
- info->ClearInlineCaches();
+ info->ClearGcRootsInInlineCaches();
// Do a fence to make sure the clearing is seen before attaching to the method.
QuasiAtomic::ThreadFenceRelease();
info->GetMethod()->SetProfilingInfo(info);
@@ -919,6 +928,22 @@
return true;
}
+ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
+ MutexLock mu(self, lock_);
+ ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ if (info != nullptr) {
+ info->IncrementInlineUse();
+ }
+ return info;
+}
+
+void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
+ MutexLock mu(self, lock_);
+ ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ DCHECK(info != nullptr);
+ info->DecrementInlineUse();
+}
+
void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED) {
ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
DCHECK(info->IsMethodBeingCompiled());
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 0bd4f7d..98dd70d 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -71,10 +71,22 @@
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
+ // Notify to the code cache that the compiler wants to use the
+ // profiling info of `method` to drive optimizations,
+ // and therefore ensure the returned profiling info object is not
+ // collected.
+ ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!lock_);
+
void DoneCompiling(ArtMethod* method, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
+ void DoneCompilerUse(ArtMethod* method, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!lock_);
+
// Allocate and write code and its metadata to the code cache.
uint8_t* CommitCode(Thread* self,
ArtMethod* method,
@@ -143,6 +155,8 @@
REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
+
// Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
// will collect and retry if the first allocation is unsuccessful.
ProfilingInfo* AddProfilingInfo(Thread* self,
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 3820592..07c8051 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -97,8 +97,8 @@
}
}
}
- // Unsuccessfull - cache is full, making it megamorphic.
- DCHECK(cache->IsMegamorphic());
+ // Unsuccessfull - cache is full, making it megamorphic. We do not DCHECK it though,
+ // as the garbage collector might clear the entries concurrently.
}
} // namespace art
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index a8c056c..55d627a 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -56,10 +56,11 @@
mirror::Class* GetMonomorphicType() const SHARED_REQUIRES(Locks::mutator_lock_) {
// Note that we cannot ensure the inline cache is actually monomorphic
// at this point, as other threads may have updated it.
+ DCHECK(!classes_[0].IsNull());
return classes_[0].Read();
}
- bool IsUnitialized() const {
+ bool IsUninitialized() const {
return classes_[0].IsNull();
}
@@ -134,8 +135,27 @@
return saved_entry_point_;
}
- void ClearInlineCaches() {
- memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
+ void ClearGcRootsInInlineCaches() {
+ for (size_t i = 0; i < number_of_inline_caches_; ++i) {
+ InlineCache* cache = &cache_[i];
+ memset(&cache->classes_[0],
+ 0,
+ InlineCache::kIndividualCacheSize * sizeof(GcRoot<mirror::Class>));
+ }
+ }
+
+ void IncrementInlineUse() {
+ DCHECK_NE(current_inline_uses_, std::numeric_limits<uint16_t>::max());
+ current_inline_uses_++;
+ }
+
+ void DecrementInlineUse() {
+ DCHECK_GT(current_inline_uses_, 0);
+ current_inline_uses_--;
+ }
+
+ bool IsInUseByCompiler() const {
+ return IsMethodBeingCompiled() || (current_inline_uses_ > 0);
}
private:
@@ -143,8 +163,9 @@
: number_of_inline_caches_(entries.size()),
method_(method),
is_method_being_compiled_(false),
+ current_inline_uses_(0),
saved_entry_point_(nullptr) {
- ClearInlineCaches();
+ memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
for (size_t i = 0; i < number_of_inline_caches_; ++i) {
cache_[i].dex_pc_ = entries[i];
}
@@ -161,6 +182,10 @@
// TODO: Make the JIT code cache lock global.
bool is_method_being_compiled_;
+ // When the compiler inlines the method associated to this ProfilingInfo,
+ // it updates this counter so that the GC does not try to clear the inline caches.
+ uint16_t current_inline_uses_;
+
// Entry point of the corresponding ArtMethod, while the JIT code cache
// is poking for the liveness of compiled code.
const void* saved_entry_point_;
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 1ce5841..a262c7a 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -497,6 +497,24 @@
self->SetWaitMonitor(nullptr);
}
+ // Allocate the interrupted exception not holding the monitor lock since it may cause a GC.
+ // If the GC requires acquiring the monitor for enqueuing cleared references, this would
+ // cause a deadlock if the monitor is held.
+ if (was_interrupted && interruptShouldThrow) {
+ /*
+ * We were interrupted while waiting, or somebody interrupted an
+ * un-interruptible thread earlier and we're bailing out immediately.
+ *
+ * The doc sayeth: "The interrupted status of the current thread is
+ * cleared when this exception is thrown."
+ */
+ {
+ MutexLock mu(self, *self->GetWaitMutex());
+ self->SetInterruptedLocked(false);
+ }
+ self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
+ }
+
// Re-acquire the monitor and lock.
Lock(self);
monitor_lock_.Lock(self);
@@ -516,21 +534,6 @@
RemoveFromWaitSet(self);
monitor_lock_.Unlock(self);
-
- if (was_interrupted && interruptShouldThrow) {
- /*
- * We were interrupted while waiting, or somebody interrupted an
- * un-interruptible thread earlier and we're bailing out immediately.
- *
- * The doc sayeth: "The interrupted status of the current thread is
- * cleared when this exception is thrown."
- */
- {
- MutexLock mu(self, *self->GetWaitMutex());
- self->SetInterruptedLocked(false);
- }
- self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
- }
}
void Monitor::Notify(Thread* self) {
diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc
index 9e78cda..ce38e4f 100644
--- a/runtime/monitor_pool.cc
+++ b/runtime/monitor_pool.cc
@@ -42,11 +42,12 @@
if (capacity_ == 0U) {
// Initialization.
capacity_ = kInitialChunkStorage;
- uintptr_t* new_backing = new uintptr_t[capacity_];
+ uintptr_t* new_backing = new uintptr_t[capacity_]();
+ DCHECK(monitor_chunks_.LoadRelaxed() == nullptr);
monitor_chunks_.StoreRelaxed(new_backing);
} else {
size_t new_capacity = 2 * capacity_;
- uintptr_t* new_backing = new uintptr_t[new_capacity];
+ uintptr_t* new_backing = new uintptr_t[new_capacity]();
uintptr_t* old_backing = monitor_chunks_.LoadRelaxed();
memcpy(new_backing, old_backing, sizeof(uintptr_t) * capacity_);
monitor_chunks_.StoreRelaxed(new_backing);
@@ -88,6 +89,25 @@
first_free_ = last;
}
+void MonitorPool::FreeInternal() {
+ // This is on shutdown with NO_THREAD_SAFETY_ANALYSIS, can't/don't need to lock.
+ uintptr_t* backing = monitor_chunks_.LoadRelaxed();
+ DCHECK(backing != nullptr);
+ DCHECK_GT(capacity_, 0U);
+ DCHECK_GT(num_chunks_, 0U);
+
+ for (size_t i = 0; i < capacity_; ++i) {
+ if (i < num_chunks_) {
+ DCHECK_NE(backing[i], 0U);
+ allocator_.deallocate(reinterpret_cast<uint8_t*>(backing[i]), kChunkSize);
+ } else {
+ DCHECK_EQ(backing[i], 0U);
+ }
+ }
+
+ delete[] backing;
+}
+
Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj,
int32_t hash_code)
SHARED_REQUIRES(Locks::mutator_lock_) {
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index de553fc..875b3fe 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -104,6 +104,12 @@
#endif
}
+ ~MonitorPool() {
+#ifdef __LP64__
+ FreeInternal();
+#endif
+ }
+
private:
#ifdef __LP64__
// When we create a monitor pool, threads have not been initialized, yet, so ignore thread-safety
@@ -112,6 +118,10 @@
void AllocateChunk() REQUIRES(Locks::allocated_monitor_ids_lock_);
+ // Release all chunks and metadata. This is done on shutdown, where threads have been destroyed,
+ // so ignore thead-safety analysis.
+ void FreeInternal() NO_THREAD_SAFETY_ANALYSIS;
+
Monitor* CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index d64aa43..60403f9 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -166,6 +166,9 @@
.Define("-Xjitwarmupthreshold:_")
.WithType<unsigned int>()
.IntoKey(M::JITWarmupThreshold)
+ .Define("-Xjitosrthreshold:_")
+ .WithType<unsigned int>()
+ .IntoKey(M::JITOsrThreshold)
.Define("-Xjitsaveprofilinginfo")
.WithValue(true)
.IntoKey(M::JITSaveProfilingInfo)
@@ -694,6 +697,8 @@
UsageMessage(stream, " -Xusejit:booleanvalue\n");
UsageMessage(stream, " -Xjitinitialsize:N\n");
UsageMessage(stream, " -Xjitmaxsize:N\n");
+ UsageMessage(stream, " -Xjitwarmupthreshold:integervalue\n");
+ UsageMessage(stream, " -Xjitosrthreshold:integervalue\n");
UsageMessage(stream, " -X[no]relocate\n");
UsageMessage(stream, " -X[no]dex2oat (Whether to invoke dex2oat on the application)\n");
UsageMessage(stream, " -X[no]image-dex2oat (Whether to create and use a boot image)\n");
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 8237b06..bc963c5 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -36,6 +36,7 @@
static constexpr bool kDumpHeapObjectOnSigsevg = false;
static constexpr bool kUseSigRTTimeout = true;
+static constexpr bool kDumpNativeStackOnTimeout = true;
struct Backtrace {
public:
@@ -350,7 +351,9 @@
if (runtime != nullptr) {
if (IsTimeoutSignal(signal_number)) {
// Special timeout signal. Try to dump all threads.
- runtime->GetThreadList()->DumpForSigQuit(LOG(INTERNAL_FATAL));
+ // Note: Do not use DumpForSigQuit, as that might disable native unwind, but the native parts
+ // are of value here.
+ runtime->GetThreadList()->Dump(LOG(INTERNAL_FATAL), kDumpNativeStackOnTimeout);
}
gc::Heap* heap = runtime->GetHeap();
LOG(INTERNAL_FATAL) << "Fault message: " << runtime->GetFaultMessage();
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 838d1a9..3fd9905 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -69,7 +69,8 @@
RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
RUNTIME_OPTIONS_KEY (bool, DumpNativeStackOnSigQuit, true)
RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
-RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold, jit::Jit::kDefaultWarmupThreshold)
+RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold)
+RUNTIME_OPTIONS_KEY (unsigned int, JITOsrThreshold)
RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheInitialCapacity, jit::JitCodeCache::kInitialCapacity)
RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheMaxCapacity, jit::JitCodeCache::kMaxCapacity)
RUNTIME_OPTIONS_KEY (bool, JITSaveProfilingInfo, false)
diff --git a/runtime/simulator/Android.mk b/runtime/simulator/Android.mk
index c154eb6..5c71da6 100644
--- a/runtime/simulator/Android.mk
+++ b/runtime/simulator/Android.mk
@@ -86,7 +86,7 @@
LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
# For simulator_arm64.
ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_SHARED_LIBRARIES += libvixld
+ LOCAL_SHARED_LIBRARIES += libvixl
else
LOCAL_SHARED_LIBRARIES += libvixl
endif
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 13564a6..472a85c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1120,7 +1120,8 @@
}
std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid, map));
if (!backtrace->Unwind(0, reinterpret_cast<ucontext*>(ucontext_ptr))) {
- os << prefix << "(backtrace::Unwind failed for thread " << tid << ")\n";
+ os << prefix << "(backtrace::Unwind failed for thread " << tid
+ << ": " << backtrace->GetErrorString(backtrace->GetError()) << ")\n";
return;
} else if (backtrace->NumFrames() == 0) {
os << prefix << "(no native stack frames for thread " << tid << ")\n";
diff --git a/test/004-checker-UnsafeTest18/src/Main.java b/test/004-checker-UnsafeTest18/src/Main.java
index bb6de2e..bb020b9 100644
--- a/test/004-checker-UnsafeTest18/src/Main.java
+++ b/test/004-checker-UnsafeTest18/src/Main.java
@@ -87,18 +87,36 @@
/// CHECK-START: void Main.load() intrinsics_recognition (after)
/// CHECK-DAG: InvokeVirtual intrinsic:UnsafeLoadFence
+ //
+ /// CHECK-START: void Main.load() instruction_simplifier (after)
+ /// CHECK-NOT: InvokeVirtual intrinsic:UnsafeLoadFence
+ //
+ /// CHECK-START: void Main.load() instruction_simplifier (after)
+ /// CHECK-DAG: MemoryBarrier kind:LoadAny
private static void load() {
unsafe.loadFence();
}
/// CHECK-START: void Main.store() intrinsics_recognition (after)
/// CHECK-DAG: InvokeVirtual intrinsic:UnsafeStoreFence
+ //
+ /// CHECK-START: void Main.store() instruction_simplifier (after)
+ /// CHECK-NOT: InvokeVirtual intrinsic:UnsafeStoreFence
+ //
+ /// CHECK-START: void Main.store() instruction_simplifier (after)
+ /// CHECK-DAG: MemoryBarrier kind:AnyStore
private static void store() {
unsafe.storeFence();
}
/// CHECK-START: void Main.full() intrinsics_recognition (after)
/// CHECK-DAG: InvokeVirtual intrinsic:UnsafeFullFence
+ //
+ /// CHECK-START: void Main.full() instruction_simplifier (after)
+ /// CHECK-NOT: InvokeVirtual intrinsic:UnsafeFullFence
+ //
+ /// CHECK-START: void Main.full() instruction_simplifier (after)
+ /// CHECK-DAG: MemoryBarrier kind:AnyAny
private static void full() {
unsafe.fullFence();
}
diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java
index bcb697a..15683b0 100644
--- a/test/141-class-unload/src/Main.java
+++ b/test/141-class-unload/src/Main.java
@@ -181,6 +181,7 @@
Class intHolder = loader.loadClass("IntHolder");
Method loadLibrary = intHolder.getDeclaredMethod("loadLibrary", String.class);
loadLibrary.invoke(intHolder, nativeLibraryName);
+ waitForCompilation(intHolder);
return new WeakReference(loader);
}
diff --git a/test/580-checker-string-factory-intrinsics/expected.txt b/test/580-checker-string-factory-intrinsics/expected.txt
new file mode 100644
index 0000000..86e041d
--- /dev/null
+++ b/test/580-checker-string-factory-intrinsics/expected.txt
@@ -0,0 +1,3 @@
+foo
+bar
+baz
diff --git a/test/580-checker-string-factory-intrinsics/info.txt b/test/580-checker-string-factory-intrinsics/info.txt
new file mode 100644
index 0000000..3d01a19
--- /dev/null
+++ b/test/580-checker-string-factory-intrinsics/info.txt
@@ -0,0 +1 @@
+Ensure java.lang.StringFactory intrinsics are recognized and used.
diff --git a/test/580-checker-string-factory-intrinsics/src/Main.java b/test/580-checker-string-factory-intrinsics/src/Main.java
new file mode 100644
index 0000000..a2e34bf
--- /dev/null
+++ b/test/580-checker-string-factory-intrinsics/src/Main.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ /// CHECK-START: void Main.testNewStringFromBytes() builder (after)
+ /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromBytes intrinsic:None
+
+ /// CHECK-START: void Main.testNewStringFromBytes() intrinsics_recognition (after)
+ /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromBytes intrinsic:StringNewStringFromBytes
+
+ public static void testNewStringFromBytes() {
+ byte[] bytes = { 'f', 'o', 'o' };
+ String s = StringFactory.newStringFromBytes(bytes, 0, 0, 3);
+ System.out.println(s);
+ }
+
+ // The (native) method
+ //
+ // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+ //
+ // is recognized as intrinsic StringNewStringFromChars. However,
+ // because this method is not public, we cannot call it and check
+ // that the compiler actually intrinsifies it (as it does for the
+ // StringNewStringFromBytes and StringNewStringFromString
+ // intrinsics) with Checker.
+ //
+ // We can call a public method such as
+ //
+ // java.lang.StringFactory.newStringFromChars(char[] data)
+ //
+ // which contains a call to the former (non-public) native method.
+ // However, this call will not be inlined (because it is a method in
+ // another Dex file and which contains a call, which needs an
+ // environment), so we cannot use Checker here to ensure the native
+ // call was intrinsified either.
+
+ /// CHECK-START: void Main.testNewStringFromChars() builder (after)
+ /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromChars intrinsic:None
+
+ /// CHECK-START: void Main.testNewStringFromChars() intrinsics_recognition (after)
+ /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromChars intrinsic:None
+
+ /// CHECK-START: void Main.testNewStringFromChars() inliner (after)
+ /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromChars intrinsic:None
+
+ public static void testNewStringFromChars() {
+ char[] chars = { 'b', 'a', 'r' };
+ String s = StringFactory.newStringFromChars(chars);
+ System.out.println(s);
+ }
+
+ /// CHECK-START: void Main.testNewStringFromString() builder (after)
+ /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromString intrinsic:None
+
+ /// CHECK-START: void Main.testNewStringFromString() intrinsics_recognition (after)
+ /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromString intrinsic:StringNewStringFromString
+
+ public static void testNewStringFromString() {
+ String s1 = "baz";
+ String s2 = StringFactory.newStringFromString(s1);
+ System.out.println(s2);
+ }
+
+ public static void main(String[] args) throws Exception {
+ testNewStringFromBytes();
+ testNewStringFromChars();
+ testNewStringFromString();
+ }
+}