Merge "Pass --root to cpplint."
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 8a2c94a..09c53b6 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -100,6 +100,7 @@
"linker/arm/relative_patcher_arm_base.cc",
"linker/arm/relative_patcher_thumb2.cc",
"optimizing/code_generator_arm.cc",
+ "optimizing/code_generator_arm_vixl.cc",
"optimizing/dex_cache_array_fixups_arm.cc",
"optimizing/instruction_simplifier_arm.cc",
"optimizing/instruction_simplifier_shared.cc",
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index a18935f..421a1d5 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -56,35 +56,67 @@
compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ const std::vector<const DexFile*>& boot_class_path = class_linker->GetBootClassPath();
+ const size_t num_images = boot_class_path.size();
+
// Enable write for dex2dex.
- for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
+ for (const DexFile* dex_file : boot_class_path) {
dex_file->EnableWrite();
}
// Create a generic location tmp file, to be the base of the .art and .oat temporary files.
- ScratchFile location;
- ScratchFile image_location(location, ".art");
+ std::vector<ScratchFile> image_locations;
+ {
+ ScratchFile location;
+ for (int i = 0; i < static_cast<int>(num_images); ++i) {
+ std::string cur_location(StringPrintf("%s-%d.art", location.GetFilename().c_str(), i));
+ image_locations.push_back(ScratchFile(cur_location));
+ }
+ }
+ std::vector<std::string> image_filenames;
+ std::vector<ScratchFile> image_files;
+ std::string image_dir;
+ for (ScratchFile& file : image_locations) {
+ std::string image_filename(GetSystemImageFilename(file.GetFilename().c_str(), kRuntimeISA));
+ image_filenames.push_back(image_filename);
+ size_t pos = image_filename.rfind('/');
+ CHECK_NE(pos, std::string::npos) << image_filename;
+ if (image_dir.empty()) {
+ image_dir = image_filename.substr(0, pos);
+ int mkdir_result = mkdir(image_dir.c_str(), 0700);
+ CHECK_EQ(0, mkdir_result) << image_dir;
+ }
+ image_files.push_back(ScratchFile(OS::CreateEmptyFile(image_filename.c_str())));
+ }
- std::string image_filename(GetSystemImageFilename(image_location.GetFilename().c_str(),
- kRuntimeISA));
- size_t pos = image_filename.rfind('/');
- CHECK_NE(pos, std::string::npos) << image_filename;
- std::string image_dir(image_filename, 0, pos);
- int mkdir_result = mkdir(image_dir.c_str(), 0700);
- CHECK_EQ(0, mkdir_result) << image_dir;
- ScratchFile image_file(OS::CreateEmptyFile(image_filename.c_str()));
-
- std::string oat_filename = ReplaceFileExtension(image_filename, "oat");
- ScratchFile oat_file(OS::CreateEmptyFile(oat_filename.c_str()));
-
- std::string vdex_filename = ReplaceFileExtension(image_filename, "vdex");
- ScratchFile vdex_file(OS::CreateEmptyFile(vdex_filename.c_str()));
+ std::vector<std::string> oat_filenames;
+ std::vector<ScratchFile> oat_files;
+ std::vector<std::string> vdex_filenames;
+ std::vector<ScratchFile> vdex_files;
+ for (const std::string& image_filename : image_filenames) {
+ std::string oat_filename = ReplaceFileExtension(image_filename, "oat");
+ oat_files.push_back(ScratchFile(OS::CreateEmptyFile(oat_filename.c_str())));
+ oat_filenames.push_back(oat_filename);
+ std::string vdex_filename = ReplaceFileExtension(image_filename, "vdex");
+ vdex_files.push_back(ScratchFile(OS::CreateEmptyFile(vdex_filename.c_str())));
+ vdex_filenames.push_back(vdex_filename);
+ }
const uintptr_t requested_image_base = ART_BASE_ADDRESS;
std::unordered_map<const DexFile*, size_t> dex_file_to_oat_index_map;
- std::vector<const char*> oat_filename_vector(1, oat_filename.c_str());
- for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
- dex_file_to_oat_index_map.emplace(dex_file, 0);
+ std::vector<const char*> oat_filename_vector;
+ for (const std::string& file : oat_filenames) {
+ oat_filename_vector.push_back(file.c_str());
}
+ std::vector<const char*> image_filename_vector;
+ for (const std::string& file : image_filenames) {
+ image_filename_vector.push_back(file.c_str());
+ }
+ size_t image_idx = 0;
+ for (const DexFile* dex_file : boot_class_path) {
+ dex_file_to_oat_index_map.emplace(dex_file, image_idx);
+ ++image_idx;
+ }
+ // TODO: compile_pic should be a test argument.
std::unique_ptr<ImageWriter> writer(new ImageWriter(*compiler_driver_,
requested_image_base,
/*compile_pic*/false,
@@ -92,7 +124,6 @@
storage_mode,
oat_filename_vector,
dex_file_to_oat_index_map));
- // TODO: compile_pic should be a test argument.
{
{
jobject class_loader = nullptr;
@@ -103,97 +134,128 @@
t.NewTiming("WriteElf");
SafeMap<std::string, std::string> key_value_store;
+ std::vector<const char*> dex_filename_vector;
+ for (size_t i = 0; i < boot_class_path.size(); ++i) {
+ dex_filename_vector.push_back("");
+ }
+ key_value_store.Put(OatHeader::kBootClassPathKey,
+ gc::space::ImageSpace::GetMultiImageBootClassPath(
+ dex_filename_vector,
+ oat_filename_vector,
+ image_filename_vector));
+
const std::vector<const DexFile*>& dex_files = class_linker->GetBootClassPath();
- std::unique_ptr<ElfWriter> elf_writer = CreateElfWriterQuick(
- compiler_driver_->GetInstructionSet(),
- compiler_driver_->GetInstructionSetFeatures(),
- &compiler_driver_->GetCompilerOptions(),
- oat_file.GetFile());
- elf_writer->Start();
- OatWriter oat_writer(/*compiling_boot_image*/true, &timings);
- OutputStream* oat_rodata = elf_writer->StartRoData();
- for (const DexFile* dex_file : dex_files) {
+ std::vector<std::unique_ptr<ElfWriter>> elf_writers;
+ std::vector<std::unique_ptr<OatWriter>> oat_writers;
+ for (ScratchFile& oat_file : oat_files) {
+ elf_writers.emplace_back(CreateElfWriterQuick(compiler_driver_->GetInstructionSet(),
+ compiler_driver_->GetInstructionSetFeatures(),
+ &compiler_driver_->GetCompilerOptions(),
+ oat_file.GetFile()));
+ elf_writers.back()->Start();
+ oat_writers.emplace_back(new OatWriter(/*compiling_boot_image*/true, &timings));
+ }
+
+ std::vector<OutputStream*> rodata;
+ std::vector<std::unique_ptr<MemMap>> opened_dex_files_map;
+ std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
+ // Now that we have finalized key_value_store_, start writing the oat file.
+ for (size_t i = 0, size = oat_writers.size(); i != size; ++i) {
+ const DexFile* dex_file = dex_files[i];
+ rodata.push_back(elf_writers[i]->StartRoData());
ArrayRef<const uint8_t> raw_dex_file(
reinterpret_cast<const uint8_t*>(&dex_file->GetHeader()),
dex_file->GetHeader().file_size_);
- oat_writer.AddRawDexFileSource(raw_dex_file,
- dex_file->GetLocation().c_str(),
- dex_file->GetLocationChecksum());
- }
- std::unique_ptr<MemMap> opened_dex_files_map;
- std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
- {
- bool dex_files_ok = oat_writer.WriteAndOpenDexFiles(
- kIsVdexEnabled ? vdex_file.GetFile() : oat_file.GetFile(),
- oat_rodata,
+ oat_writers[i]->AddRawDexFileSource(raw_dex_file,
+ dex_file->GetLocation().c_str(),
+ dex_file->GetLocationChecksum());
+
+ std::unique_ptr<MemMap> cur_opened_dex_files_map;
+ std::vector<std::unique_ptr<const DexFile>> cur_opened_dex_files;
+ bool dex_files_ok = oat_writers[i]->WriteAndOpenDexFiles(
+ kIsVdexEnabled ? vdex_files[i].GetFile() : oat_files[i].GetFile(),
+ rodata.back(),
compiler_driver_->GetInstructionSet(),
compiler_driver_->GetInstructionSetFeatures(),
&key_value_store,
/* verify */ false, // Dex files may be dex-to-dex-ed, don't verify.
- &opened_dex_files_map,
- &opened_dex_files);
+ &cur_opened_dex_files_map,
+ &cur_opened_dex_files);
ASSERT_TRUE(dex_files_ok);
- }
+ if (cur_opened_dex_files_map != nullptr) {
+ opened_dex_files_map.push_back(std::move(cur_opened_dex_files_map));
+ for (std::unique_ptr<const DexFile>& cur_dex_file : cur_opened_dex_files) {
+ // dex_file_oat_index_map_.emplace(dex_file.get(), i);
+ opened_dex_files.push_back(std::move(cur_dex_file));
+ }
+ } else {
+ ASSERT_TRUE(cur_opened_dex_files.empty());
+ }
+ }
bool image_space_ok = writer->PrepareImageAddressSpace();
ASSERT_TRUE(image_space_ok);
- linker::MultiOatRelativePatcher patcher(compiler_driver_->GetInstructionSet(),
- instruction_set_features_.get());
- oat_writer.PrepareLayout(compiler_driver_.get(), writer.get(), dex_files, &patcher);
- size_t rodata_size = oat_writer.GetOatHeader().GetExecutableOffset();
- size_t text_size = oat_writer.GetOatSize() - rodata_size;
- elf_writer->SetLoadedSectionSizes(rodata_size, text_size, oat_writer.GetBssSize());
+ for (size_t i = 0, size = oat_files.size(); i != size; ++i) {
+ linker::MultiOatRelativePatcher patcher(compiler_driver_->GetInstructionSet(),
+ instruction_set_features_.get());
+ OatWriter* const oat_writer = oat_writers[i].get();
+ ElfWriter* const elf_writer = elf_writers[i].get();
+ std::vector<const DexFile*> cur_dex_files(1u, dex_files[i]);
+ oat_writer->PrepareLayout(compiler_driver_.get(), writer.get(), cur_dex_files, &patcher);
+ size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
+ size_t text_size = oat_writer->GetOatSize() - rodata_size;
+ elf_writer->SetLoadedSectionSizes(rodata_size, text_size, oat_writer->GetBssSize());
- writer->UpdateOatFileLayout(/* oat_index */ 0u,
- elf_writer->GetLoadedSize(),
- oat_writer.GetOatDataOffset(),
- oat_writer.GetOatSize());
+ writer->UpdateOatFileLayout(i,
+ elf_writer->GetLoadedSize(),
+ oat_writer->GetOatDataOffset(),
+ oat_writer->GetOatSize());
- bool rodata_ok = oat_writer.WriteRodata(oat_rodata);
- ASSERT_TRUE(rodata_ok);
- elf_writer->EndRoData(oat_rodata);
+ bool rodata_ok = oat_writer->WriteRodata(rodata[i]);
+ ASSERT_TRUE(rodata_ok);
+ elf_writer->EndRoData(rodata[i]);
- OutputStream* text = elf_writer->StartText();
- bool text_ok = oat_writer.WriteCode(text);
- ASSERT_TRUE(text_ok);
- elf_writer->EndText(text);
+ OutputStream* text = elf_writer->StartText();
+ bool text_ok = oat_writer->WriteCode(text);
+ ASSERT_TRUE(text_ok);
+ elf_writer->EndText(text);
- bool header_ok = oat_writer.WriteHeader(elf_writer->GetStream(), 0u, 0u, 0u);
- ASSERT_TRUE(header_ok);
+ bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream(), 0u, 0u, 0u);
+ ASSERT_TRUE(header_ok);
- writer->UpdateOatFileHeader(/* oat_index */ 0u, oat_writer.GetOatHeader());
+ writer->UpdateOatFileHeader(i, oat_writer->GetOatHeader());
- elf_writer->WriteDynamicSection();
- elf_writer->WriteDebugInfo(oat_writer.GetMethodDebugInfo());
- elf_writer->WritePatchLocations(oat_writer.GetAbsolutePatchLocations());
+ elf_writer->WriteDynamicSection();
+ elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo());
+ elf_writer->WritePatchLocations(oat_writer->GetAbsolutePatchLocations());
- bool success = elf_writer->End();
- ASSERT_TRUE(success);
+ bool success = elf_writer->End();
+ ASSERT_TRUE(success);
+ }
}
}
- // Workound bug that mcld::Linker::emit closes oat_file by reopening as dup_oat.
- std::unique_ptr<File> dup_oat(OS::OpenFileReadWrite(oat_file.GetFilename().c_str()));
- ASSERT_TRUE(dup_oat.get() != nullptr);
{
- std::vector<const char*> dup_oat_filename(1, dup_oat->GetPath().c_str());
- std::vector<const char*> dup_image_filename(1, image_file.GetFilename().c_str());
bool success_image = writer->Write(kInvalidFd,
- dup_image_filename,
- dup_oat_filename);
+ image_filename_vector,
+ oat_filename_vector);
ASSERT_TRUE(success_image);
- bool success_fixup = ElfWriter::Fixup(dup_oat.get(),
- writer->GetOatDataBegin(0));
- ASSERT_TRUE(success_fixup);
- ASSERT_EQ(dup_oat->FlushCloseOrErase(), 0) << "Could not flush and close oat file "
- << oat_file.GetFilename();
+ for (size_t i = 0, size = oat_filenames.size(); i != size; ++i) {
+ const char* oat_filename = oat_filenames[i].c_str();
+ std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename));
+ ASSERT_TRUE(oat_file != nullptr);
+ bool success_fixup = ElfWriter::Fixup(oat_file.get(),
+ writer->GetOatDataBegin(i));
+ ASSERT_TRUE(success_fixup);
+ ASSERT_EQ(oat_file->FlushCloseOrErase(), 0) << "Could not flush and close oat file "
+ << oat_filename;
+ }
}
- uint64_t image_file_size;
- size_t image_size;
- {
+ std::vector<uint64_t> image_file_sizes;
+ for (ScratchFile& image_file : image_files) {
std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
ASSERT_TRUE(file.get() != nullptr);
ImageHeader image_header;
@@ -209,9 +271,7 @@
ASSERT_FALSE(space->IsImageSpace());
ASSERT_TRUE(space != nullptr);
ASSERT_TRUE(space->IsMallocSpace());
-
- image_file_size = file->GetLength();
- image_size = image_header.GetImageSize();
+ image_file_sizes.push_back(file->GetLength());
}
ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr);
@@ -231,11 +291,10 @@
java_lang_dex_file_ = nullptr;
MemMap::Init();
- std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileNames()[0].c_str()));
RuntimeOptions options;
std::string image("-Ximage:");
- image.append(image_location.GetFilename());
+ image.append(image_locations[0].GetFilename());
options.push_back(std::make_pair(image.c_str(), static_cast<void*>(nullptr)));
// By default the compiler this creates will not include patch information.
options.push_back(std::make_pair("-Xnorelocate", nullptr));
@@ -257,39 +316,54 @@
ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace());
// We loaded the runtime with an explicit image, so it must exist.
- gc::space::ImageSpace* image_space = heap->GetBootImageSpaces()[0];
- ASSERT_TRUE(image_space != nullptr);
- if (storage_mode == ImageHeader::kStorageModeUncompressed) {
- // Uncompressed, image should be smaller than file.
- ASSERT_LE(image_size, image_file_size);
- } else {
- // Compressed, file should be smaller than image.
- ASSERT_LE(image_file_size, image_size);
- }
-
- image_space->VerifyImageAllocations();
- uint8_t* image_begin = image_space->Begin();
- uint8_t* image_end = image_space->End();
- CHECK_EQ(requested_image_base, reinterpret_cast<uintptr_t>(image_begin));
- for (size_t i = 0; i < dex->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex->GetClassDef(i);
- const char* descriptor = dex->GetClassDescriptor(class_def);
- mirror::Class* klass = class_linker_->FindSystemClass(soa.Self(), descriptor);
- EXPECT_TRUE(klass != nullptr) << descriptor;
- if (image_classes.find(descriptor) != image_classes.end()) {
- // Image classes should be located inside the image.
- EXPECT_LT(image_begin, reinterpret_cast<uint8_t*>(klass)) << descriptor;
- EXPECT_LT(reinterpret_cast<uint8_t*>(klass), image_end) << descriptor;
+ ASSERT_EQ(heap->GetBootImageSpaces().size(), image_file_sizes.size());
+ for (size_t i = 0; i < image_file_sizes.size(); ++i) {
+ std::unique_ptr<const DexFile> dex(
+ LoadExpectSingleDexFile(GetLibCoreDexFileNames()[i].c_str()));
+ uint64_t image_file_size = image_file_sizes[i];
+ gc::space::ImageSpace* image_space = heap->GetBootImageSpaces()[i];
+ ASSERT_TRUE(image_space != nullptr);
+ if (storage_mode == ImageHeader::kStorageModeUncompressed) {
+ // Uncompressed, image should be smaller than file.
+ ASSERT_LE(image_space->GetImageHeader().GetImageSize(), image_file_size);
} else {
- EXPECT_TRUE(reinterpret_cast<uint8_t*>(klass) >= image_end ||
- reinterpret_cast<uint8_t*>(klass) < image_begin) << descriptor;
+ // Compressed, file should be smaller than image.
+ ASSERT_LE(image_file_size, image_space->GetImageHeader().GetImageSize());
}
- EXPECT_TRUE(Monitor::IsValidLockWord(klass->GetLockWord(false)));
+
+ image_space->VerifyImageAllocations();
+ uint8_t* image_begin = image_space->Begin();
+ uint8_t* image_end = image_space->End();
+ if (i == 0) {
+ // This check is only valid for image 0.
+ CHECK_EQ(requested_image_base, reinterpret_cast<uintptr_t>(image_begin));
+ }
+ for (size_t j = 0; j < dex->NumClassDefs(); ++j) {
+ const DexFile::ClassDef& class_def = dex->GetClassDef(j);
+ const char* descriptor = dex->GetClassDescriptor(class_def);
+ mirror::Class* klass = class_linker_->FindSystemClass(soa.Self(), descriptor);
+ EXPECT_TRUE(klass != nullptr) << descriptor;
+ if (image_classes.find(descriptor) == image_classes.end()) {
+ EXPECT_TRUE(reinterpret_cast<uint8_t*>(klass) >= image_end ||
+ reinterpret_cast<uint8_t*>(klass) < image_begin) << descriptor;
+ } else {
+ // Image classes should be located inside the image.
+ EXPECT_LT(image_begin, reinterpret_cast<uint8_t*>(klass)) << descriptor;
+ EXPECT_LT(reinterpret_cast<uint8_t*>(klass), image_end) << descriptor;
+ }
+ EXPECT_TRUE(Monitor::IsValidLockWord(klass->GetLockWord(false)));
+ }
}
- image_file.Unlink();
- oat_file.Unlink();
- vdex_file.Unlink();
+ for (ScratchFile& image_file : image_files) {
+ image_file.Unlink();
+ }
+ for (ScratchFile& oat_file : oat_files) {
+ oat_file.Unlink();
+ }
+ for (ScratchFile& vdex_file : vdex_files) {
+ vdex_file.Unlink();
+ }
int rmdir_result = rmdir(image_dir.c_str());
CHECK_EQ(0, rmdir_result);
}
@@ -306,7 +380,6 @@
TestWriteRead(ImageHeader::kStorageModeLZ4HC);
}
-
TEST_F(ImageTest, ImageHeaderIsValid) {
uint32_t image_begin = ART_BASE_ADDRESS;
uint32_t image_size_ = 16 * KB;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index cdd4c68..b692c6d 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -391,12 +391,12 @@
// 3) synchronized keyword
// -- TODO: We can support (1) if we remove the mutator lock assert during stub lookup.
# define JNI_TEST_NORMAL_ONLY(TestName) \
- TEST_F(JniCompilerTest, TestName ## Default) { \
+ TEST_F(JniCompilerTest, TestName ## NormalCompiler) { \
SCOPED_TRACE("Normal JNI with compiler"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kNormal); \
TestName ## Impl(); \
} \
- TEST_F(JniCompilerTest, TestName ## Generic) { \
+ TEST_F(JniCompilerTest, TestName ## NormalGeneric) { \
SCOPED_TRACE("Normal JNI with generic"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kNormal); \
TEST_DISABLED_FOR_MIPS(); \
@@ -404,45 +404,40 @@
TestName ## Impl(); \
}
-// Test normal compiler, @FastNative compiler, and normal/@FastNative generic for normal natives.
+// Test (normal, @FastNative) x (compiler, generic).
#define JNI_TEST(TestName) \
JNI_TEST_NORMAL_ONLY(TestName) \
- TEST_F(JniCompilerTest, TestName ## Fast) { \
+ TEST_F(JniCompilerTest, TestName ## FastCompiler) { \
SCOPED_TRACE("@FastNative JNI with compiler"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kFast); \
TestName ## Impl(); \
} \
- \
-
-// TODO: maybe. @FastNative generic JNI support?
-#if 0
+ \
TEST_F(JniCompilerTest, TestName ## FastGeneric) { \
+ SCOPED_TRACE("@FastNative JNI with generic"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kFast); \
TEST_DISABLED_FOR_MIPS(); \
SetCheckGenericJni(true); \
TestName ## Impl(); \
}
-#endif
+// Test (@CriticalNative) x (compiler, generic) only.
#define JNI_TEST_CRITICAL_ONLY(TestName) \
- TEST_F(JniCompilerTest, TestName ## DefaultCritical) { \
+ TEST_F(JniCompilerTest, TestName ## CriticalCompiler) { \
SCOPED_TRACE("@CriticalNative JNI with compiler"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kCritical); \
TestName ## Impl(); \
- }
-
-// Test everything above and also the @CriticalNative compiler, and @CriticalNative generic JNI.
-#define JNI_TEST_CRITICAL(TestName) \
- JNI_TEST(TestName) \
- JNI_TEST_CRITICAL_ONLY(TestName) \
-
-// TODO: maybe, more likely since calling convention changed. @Criticalnative generic JNI support?
-#if 0
- TEST_F(JniCompilerTest, TestName ## GenericCritical) { \
+ } \
+ TEST_F(JniCompilerTest, TestName ## CriticalGeneric) { \
+ SCOPED_TRACE("@CriticalNative JNI with generic"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kCritical); \
TestName ## Impl(); \
}
-#endif
+
+// Test everything: (normal, @FastNative, @CriticalNative) x (compiler, generic).
+#define JNI_TEST_CRITICAL(TestName) \
+ JNI_TEST(TestName) \
+ JNI_TEST_CRITICAL_ONLY(TestName) \
static void expectValidThreadState() {
// Normal JNI always transitions to "Native". Other JNIs stay in the "Runnable" state.
@@ -506,6 +501,7 @@
// Temporarily disable the EXPECT_NUM_STACK_REFERENCES check (for a single test).
struct ScopedDisableCheckNumStackReferences {
ScopedDisableCheckNumStackReferences() {
+ CHECK(sCheckNumStackReferences); // No nested support.
sCheckNumStackReferences = false;
}
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 137cd21..cf633df 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -18,6 +18,7 @@
#ifdef ART_ENABLE_CODEGEN_arm
#include "code_generator_arm.h"
+#include "code_generator_arm_vixl.h"
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -575,11 +576,19 @@
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2: {
- return std::unique_ptr<CodeGenerator>(
- new (arena) arm::CodeGeneratorARM(graph,
- *isa_features.AsArmInstructionSetFeatures(),
- compiler_options,
- stats));
+ if (kArmUseVIXL32) {
+ return std::unique_ptr<CodeGenerator>(
+ new (arena) arm::CodeGeneratorARMVIXL(graph,
+ *isa_features.AsArmInstructionSetFeatures(),
+ compiler_options,
+ stats));
+ } else {
+ return std::unique_ptr<CodeGenerator>(
+ new (arena) arm::CodeGeneratorARM(graph,
+ *isa_features.AsArmInstructionSetFeatures(),
+ compiler_options,
+ stats));
+ }
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
new file mode 100644
index 0000000..b06c84d
--- /dev/null
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -0,0 +1,2145 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_arm_vixl.h"
+
+#include "arch/arm/instruction_set_features_arm.h"
+#include "art_method.h"
+#include "code_generator_utils.h"
+#include "common_arm.h"
+#include "compiled_method.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
+#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
+#include "thread.h"
+#include "utils/arm/assembler_arm_vixl.h"
+#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
+#include "utils/stack_checks.h"
+
+namespace art {
+namespace arm {
+
+namespace vixl32 = vixl::aarch32;
+using namespace vixl32; // NOLINT(build/namespaces)
+
+using helpers::DWARFReg;
+using helpers::FromLowSToD;
+using helpers::OutputRegister;
+using helpers::InputRegisterAt;
+using helpers::InputOperandAt;
+using helpers::OutputSRegister;
+using helpers::InputSRegisterAt;
+
+using RegisterList = vixl32::RegisterList;
+
+static bool ExpectedPairLayout(Location location) {
+ // We expected this for both core and fpu register pairs.
+ return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
+}
+
+static constexpr size_t kArmInstrMaxSizeInBytes = 4u;
+
+#ifdef __
+#error "ARM Codegen VIXL macro-assembler macro already defined."
+#endif
+
+// TODO: Remove with later pop when codegen complete.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler()-> // NOLINT
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, x).Int32Value()
+
+// Marker that code is yet to be, and must, be implemented.
+#define TODO_VIXL32(level) LOG(level) << __PRETTY_FUNCTION__ << " unimplemented "
+
+class DivZeroCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+ explicit DivZeroCheckSlowPathARMVIXL(HDivZeroCheck* instruction)
+ : SlowPathCodeARMVIXL(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARMVIXL* armvixl_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
+ __ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
+ armvixl_codegen->InvokeRuntime(kQuickThrowDivZero,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
+ }
+
+ bool IsFatal() const OVERRIDE { return true; }
+
+ const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARMVIXL"; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARMVIXL);
+};
+
+inline vixl32::Condition ARMCondition(IfCondition cond) {
+ switch (cond) {
+ case kCondEQ: return eq;
+ case kCondNE: return ne;
+ case kCondLT: return lt;
+ case kCondLE: return le;
+ case kCondGT: return gt;
+ case kCondGE: return ge;
+ case kCondB: return lo;
+ case kCondBE: return ls;
+ case kCondA: return hi;
+ case kCondAE: return hs;
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+}
+
+// Maps signed condition to unsigned condition.
+inline vixl32::Condition ARMUnsignedCondition(IfCondition cond) {
+ switch (cond) {
+ case kCondEQ: return eq;
+ case kCondNE: return ne;
+ // Signed to unsigned.
+ case kCondLT: return lo;
+ case kCondLE: return ls;
+ case kCondGT: return hi;
+ case kCondGE: return hs;
+ // Unsigned remain unchanged.
+ case kCondB: return lo;
+ case kCondBE: return ls;
+ case kCondA: return hi;
+ case kCondAE: return hs;
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+}
+
+inline vixl32::Condition ARMFPCondition(IfCondition cond, bool gt_bias) {
+ // The ARM condition codes can express all the necessary branches, see the
+ // "Meaning (floating-point)" column in the table A8-1 of the ARMv7 reference manual.
+ // There is no dex instruction or HIR that would need the missing conditions
+ // "equal or unordered" or "not equal".
+ switch (cond) {
+ case kCondEQ: return eq;
+ case kCondNE: return ne /* unordered */;
+ case kCondLT: return gt_bias ? cc : lt /* unordered */;
+ case kCondLE: return gt_bias ? ls : le /* unordered */;
+ case kCondGT: return gt_bias ? hi /* unordered */ : gt;
+ case kCondGE: return gt_bias ? cs /* unordered */ : ge;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
+void SlowPathCodeARMVIXL::SaveLiveRegisters(CodeGenerator* codegen ATTRIBUTE_UNUSED,
+ LocationSummary* locations ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+}
+
+void SlowPathCodeARMVIXL::RestoreLiveRegisters(CodeGenerator* codegen ATTRIBUTE_UNUSED,
+ LocationSummary* locations ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::DumpCoreRegister(std::ostream& stream, int reg) const {
+ stream << vixl32::Register(reg);
+}
+
+void CodeGeneratorARMVIXL::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+ stream << vixl32::SRegister(reg);
+}
+
+static uint32_t ComputeSRegisterMask(const SRegisterList& regs) {
+ uint32_t mask = 0;
+ for (uint32_t i = regs.GetFirstSRegister().GetCode();
+ i <= regs.GetLastSRegister().GetCode();
+ ++i) {
+ mask |= (1 << i);
+ }
+ return mask;
+}
+
+#undef __
+
+CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
+ const ArmInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
+ : CodeGenerator(graph,
+ kNumberOfCoreRegisters,
+ kNumberOfSRegisters,
+ kNumberOfRegisterPairs,
+ kCoreCalleeSaves.GetList(),
+ ComputeSRegisterMask(kFpuCalleeSaves),
+ compiler_options,
+ stats),
+ block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ location_builder_(graph, this),
+ instruction_visitor_(graph, this),
+ move_resolver_(graph->GetArena(), this),
+ assembler_(graph->GetArena()),
+ isa_features_(isa_features) {
+ // Always save the LR register to mimic Quick.
+ AddAllocatedRegister(Location::RegisterLocation(LR));
+}
+
+#define __ reinterpret_cast<ArmVIXLAssembler*>(GetAssembler())->GetVIXLAssembler()->
+
+void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
+ GetAssembler()->FinalizeCode();
+ CodeGenerator::Finalize(allocator);
+}
+
+void CodeGeneratorARMVIXL::SetupBlockedRegisters() const {
+ // Don't allocate the dalvik style register pair passing.
+ blocked_register_pairs_[R1_R2] = true;
+
+ // Stack register, LR and PC are always reserved.
+ blocked_core_registers_[SP] = true;
+ blocked_core_registers_[LR] = true;
+ blocked_core_registers_[PC] = true;
+
+ // Reserve thread register.
+ blocked_core_registers_[TR] = true;
+
+ // Reserve temp register.
+ blocked_core_registers_[IP] = true;
+
+ if (GetGraph()->IsDebuggable()) {
+ // Stubs do not save callee-save floating point registers. If the graph
+ // is debuggable, we need to deal with these registers differently. For
+ // now, just block them.
+ for (uint32_t i = kFpuCalleeSaves.GetFirstSRegister().GetCode();
+ i <= kFpuCalleeSaves.GetLastSRegister().GetCode();
+ ++i) {
+ blocked_fpu_registers_[i] = true;
+ }
+ }
+
+ UpdateBlockedPairRegisters();
+}
+
+// Blocks all register pairs containing blocked core registers.
+void CodeGeneratorARMVIXL::UpdateBlockedPairRegisters() const {
+ for (int i = 0; i < kNumberOfRegisterPairs; i++) {
+ ArmManagedRegister current =
+ ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
+ if (blocked_core_registers_[current.AsRegisterPairLow()]
+ || blocked_core_registers_[current.AsRegisterPairHigh()]) {
+ blocked_register_pairs_[i] = true;
+ }
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instruction,
+ HBasicBlock* successor) {
+ TODO_VIXL32(FATAL);
+}
+
+InstructionCodeGeneratorARMVIXL::InstructionCodeGeneratorARMVIXL(HGraph* graph,
+ CodeGeneratorARMVIXL* codegen)
+ : InstructionCodeGenerator(graph, codegen),
+ assembler_(codegen->GetAssembler()),
+ codegen_(codegen) {}
+
+void CodeGeneratorARMVIXL::ComputeSpillMask() {
+ core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
+ DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
+ // There is no easy instruction to restore just the PC on thumb2. We spill and
+ // restore another arbitrary register.
+ core_spill_mask_ |= (1 << kCoreAlwaysSpillRegister.GetCode());
+ fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
+ // We use vpush and vpop for saving and restoring floating point registers, which take
+ // a SRegister and the number of registers to save/restore after that SRegister. We
+ // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
+ // but in the range.
+ if (fpu_spill_mask_ != 0) {
+ uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
+ uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
+ for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
+ fpu_spill_mask_ |= (1 << i);
+ }
+ }
+}
+
+void CodeGeneratorARMVIXL::GenerateFrameEntry() {
+ bool skip_overflow_check =
+ IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
+ DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
+ __ Bind(&frame_entry_label_);
+
+ if (HasEmptyFrame()) {
+ return;
+ }
+
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ if (!skip_overflow_check) {
+ __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
+ // The load must immediately precede RecordPcInfo.
+ {
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ kArmInstrMaxSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ __ ldr(temp, MemOperand(temp));
+ RecordPcInfo(nullptr, 0);
+ }
+ }
+
+ __ Push(RegisterList(core_spill_mask_));
+ GetAssembler()->cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(core_spill_mask_));
+ GetAssembler()->cfi().RelOffsetForMany(DWARFReg(kMethodRegister),
+ 0,
+ core_spill_mask_,
+ kArmWordSize);
+ if (fpu_spill_mask_ != 0) {
+ uint32_t first = LeastSignificantBit(fpu_spill_mask_);
+
+ // Check that list is contiguous.
+ DCHECK_EQ(fpu_spill_mask_ >> CTZ(fpu_spill_mask_), ~0u >> (32 - POPCOUNT(fpu_spill_mask_)));
+
+ __ Vpush(SRegisterList(vixl32::SRegister(first), POPCOUNT(fpu_spill_mask_)));
+ GetAssembler()->cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
+ GetAssembler()->cfi().RelOffsetForMany(DWARFReg(s0),
+ 0,
+ fpu_spill_mask_,
+ kArmWordSize);
+ }
+ int adjust = GetFrameSize() - FrameEntrySpillSize();
+ __ Sub(sp, sp, adjust);
+ GetAssembler()->cfi().AdjustCFAOffset(adjust);
+ GetAssembler()->StoreToOffset(kStoreWord, kMethodRegister, sp, 0);
+}
+
+void CodeGeneratorARMVIXL::GenerateFrameExit() {
+ if (HasEmptyFrame()) {
+ __ Bx(lr);
+ return;
+ }
+ GetAssembler()->cfi().RememberState();
+ int adjust = GetFrameSize() - FrameEntrySpillSize();
+ __ Add(sp, sp, adjust);
+ GetAssembler()->cfi().AdjustCFAOffset(-adjust);
+ if (fpu_spill_mask_ != 0) {
+ uint32_t first = LeastSignificantBit(fpu_spill_mask_);
+
+ // Check that list is contiguous.
+ DCHECK_EQ(fpu_spill_mask_ >> CTZ(fpu_spill_mask_), ~0u >> (32 - POPCOUNT(fpu_spill_mask_)));
+
+ __ Vpop(SRegisterList(vixl32::SRegister(first), POPCOUNT(fpu_spill_mask_)));
+ GetAssembler()->cfi().AdjustCFAOffset(
+ -static_cast<int>(kArmWordSize) * POPCOUNT(fpu_spill_mask_));
+ GetAssembler()->cfi().RestoreMany(DWARFReg(vixl32::SRegister(0)),
+ fpu_spill_mask_);
+ }
+ // Pop LR into PC to return.
+ DCHECK_NE(core_spill_mask_ & (1 << kLrCode), 0U);
+ uint32_t pop_mask = (core_spill_mask_ & (~(1 << kLrCode))) | 1 << kPcCode;
+ __ Pop(RegisterList(pop_mask));
+ GetAssembler()->cfi().RestoreState();
+ GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
+}
+
+void CodeGeneratorARMVIXL::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
+}
+
+void CodeGeneratorARMVIXL::MoveConstant(Location destination, int32_t value) {
+ TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+ TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ TODO_VIXL32(FATAL);
+}
+
+uintptr_t CodeGeneratorARMVIXL::GetAddressOf(HBasicBlock* block) {
+ TODO_VIXL32(FATAL);
+ return 0;
+}
+
+void CodeGeneratorARMVIXL::GenerateImplicitNullCheck(HNullCheck* null_check) {
+ TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::GenerateExplicitNullCheck(HNullCheck* null_check) {
+ TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::InvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
+ GenerateInvokeRuntime(GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value());
+ if (EntrypointRequiresStackMap(entrypoint)) {
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ }
+}
+
+void CodeGeneratorARMVIXL::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path) {
+ ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
+ GenerateInvokeRuntime(entry_point_offset);
+}
+
+void CodeGeneratorARMVIXL::GenerateInvokeRuntime(int32_t entry_point_offset) {
+ GetAssembler()->LoadFromOffset(kLoadWord, lr, tr, entry_point_offset);
+ __ Blx(lr);
+}
+
+// Check if the desired_string_load_kind is supported. If it is, return it,
+// otherwise return a fall-back kind that should be used instead.
+HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind(
+ HLoadString::LoadKind desired_string_load_kind) {
+ TODO_VIXL32(FATAL);
+ return desired_string_load_kind;
+}
+
+// Check if the desired_class_load_kind is supported. If it is, return it,
+// otherwise return a fall-back kind that should be used instead.
+HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
+ HLoadClass::LoadKind desired_class_load_kind) {
+ TODO_VIXL32(FATAL);
+ return desired_class_load_kind;
+}
+
+// Check if the desired_dispatch_info is supported. If it is, return it,
+// otherwise return a fall-back info that should be used instead.
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) {
+ TODO_VIXL32(FATAL);
+ return desired_dispatch_info;
+}
+
+// Generate a call to a static or direct method.
+void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp) {
+ TODO_VIXL32(FATAL);
+}
+
+// Generate a call to a virtual method.
+void CodeGeneratorARMVIXL::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) {
+ TODO_VIXL32(FATAL);
+}
+
+// Copy the result of a call into the given target.
+void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, Primitive::Type type) {
+ TODO_VIXL32(FATAL);
+}
+
+void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock* successor) {
+ DCHECK(!successor->IsExitBlock());
+ HBasicBlock* block = got->GetBlock();
+ HInstruction* previous = got->GetPrevious();
+ HLoopInformation* info = block->GetLoopInformation();
+
+ if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
+ GenerateSuspendCheck(info->GetSuspendCheck(), successor);
+ return;
+ }
+ if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
+ GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
+ }
+ if (!codegen_->GoesToNextBlock(block, successor)) {
+ __ B(codegen_->GetLabelOf(successor));
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitGoto(HGoto* got) {
+ got->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitGoto(HGoto* got) {
+ HandleGoto(got, got->GetSuccessor());
+}
+
+void LocationsBuilderARMVIXL::VisitExit(HExit* exit) {
+ exit->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateVcmp(HInstruction* instruction) {
+ Primitive::Type type = instruction->InputAt(0)->GetType();
+ Location lhs_loc = instruction->GetLocations()->InAt(0);
+ Location rhs_loc = instruction->GetLocations()->InAt(1);
+ if (rhs_loc.IsConstant()) {
+ // 0.0 is the only immediate that can be encoded directly in
+ // a VCMP instruction.
+ //
+ // Both the JLS (section 15.20.1) and the JVMS (section 6.5)
+ // specify that in a floating-point comparison, positive zero
+ // and negative zero are considered equal, so we can use the
+ // literal 0.0 for both cases here.
+ //
+ // Note however that some methods (Float.equal, Float.compare,
+ // Float.compareTo, Double.equal, Double.compare,
+ // Double.compareTo, Math.max, Math.min, StrictMath.max,
+ // StrictMath.min) consider 0.0 to be (strictly) greater than
+ // -0.0. So if we ever translate calls to these methods into a
+ // HCompare instruction, we must handle the -0.0 case with
+ // care here.
+ DCHECK(rhs_loc.GetConstant()->IsArithmeticZero());
+ if (type == Primitive::kPrimFloat) {
+ __ Vcmp(F32, InputSRegisterAt(instruction, 0), 0.0);
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ __ Vcmp(F64, FromLowSToD(lhs_loc.AsFpuRegisterPairLow<vixl32::SRegister>()), 0.0);
+ }
+ } else {
+ if (type == Primitive::kPrimFloat) {
+ __ Vcmp(F32, InputSRegisterAt(instruction, 0), InputSRegisterAt(instruction, 1));
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ __ Vcmp(F64,
+ FromLowSToD(lhs_loc.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(rhs_loc.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ }
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateFPJumps(HCondition* cond,
+ vixl32::Label* true_label,
+ vixl32::Label* false_label ATTRIBUTE_UNUSED) {
+ // To branch on the result of the FP compare we transfer FPSCR to APSR (encoded as PC in VMRS).
+ __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+ __ B(ARMFPCondition(cond->GetCondition(), cond->IsGtBias()), true_label);
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateLongComparesAndJumps(HCondition* cond,
+ vixl32::Label* true_label,
+ vixl32::Label* false_label) {
+ LocationSummary* locations = cond->GetLocations();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+ IfCondition if_cond = cond->GetCondition();
+
+ vixl32::Register left_high = left.AsRegisterPairHigh<vixl32::Register>();
+ vixl32::Register left_low = left.AsRegisterPairLow<vixl32::Register>();
+ IfCondition true_high_cond = if_cond;
+ IfCondition false_high_cond = cond->GetOppositeCondition();
+ vixl32::Condition final_condition = ARMUnsignedCondition(if_cond); // unsigned on lower part
+
+ // Set the conditions for the test, remembering that == needs to be
+ // decided using the low words.
+ // TODO: consider avoiding jumps with temporary and CMP low+SBC high
+ switch (if_cond) {
+ case kCondEQ:
+ case kCondNE:
+ // Nothing to do.
+ break;
+ case kCondLT:
+ false_high_cond = kCondGT;
+ break;
+ case kCondLE:
+ true_high_cond = kCondLT;
+ break;
+ case kCondGT:
+ false_high_cond = kCondLT;
+ break;
+ case kCondGE:
+ true_high_cond = kCondGT;
+ break;
+ case kCondB:
+ false_high_cond = kCondA;
+ break;
+ case kCondBE:
+ true_high_cond = kCondB;
+ break;
+ case kCondA:
+ false_high_cond = kCondB;
+ break;
+ case kCondAE:
+ true_high_cond = kCondA;
+ break;
+ }
+ if (right.IsConstant()) {
+ int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
+ int32_t val_low = Low32Bits(value);
+ int32_t val_high = High32Bits(value);
+
+ __ Cmp(left_high, val_high);
+ if (if_cond == kCondNE) {
+ __ B(ARMCondition(true_high_cond), true_label);
+ } else if (if_cond == kCondEQ) {
+ __ B(ARMCondition(false_high_cond), false_label);
+ } else {
+ __ B(ARMCondition(true_high_cond), true_label);
+ __ B(ARMCondition(false_high_cond), false_label);
+ }
+ // Must be equal high, so compare the lows.
+ __ Cmp(left_low, val_low);
+ } else {
+ vixl32::Register right_high = right.AsRegisterPairHigh<vixl32::Register>();
+ vixl32::Register right_low = right.AsRegisterPairLow<vixl32::Register>();
+
+ __ Cmp(left_high, right_high);
+ if (if_cond == kCondNE) {
+ __ B(ARMCondition(true_high_cond), true_label);
+ } else if (if_cond == kCondEQ) {
+ __ B(ARMCondition(false_high_cond), false_label);
+ } else {
+ __ B(ARMCondition(true_high_cond), true_label);
+ __ B(ARMCondition(false_high_cond), false_label);
+ }
+ // Must be equal high, so compare the lows.
+ __ Cmp(left_low, right_low);
+ }
+ // The last comparison might be unsigned.
+ // TODO: optimize cases where this is always true/false
+ __ B(final_condition, true_label);
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateCompareTestAndBranch(HCondition* condition,
+ vixl32::Label* true_target_in,
+ vixl32::Label* false_target_in) {
+ // Generated branching requires both targets to be explicit. If either of the
+ // targets is nullptr (fallthrough) use and bind `fallthrough` instead.
+ vixl32::Label fallthrough;
+ vixl32::Label* true_target = (true_target_in == nullptr) ? &fallthrough : true_target_in;
+ vixl32::Label* false_target = (false_target_in == nullptr) ? &fallthrough : false_target_in;
+
+ Primitive::Type type = condition->InputAt(0)->GetType();
+ switch (type) {
+ case Primitive::kPrimLong:
+ GenerateLongComparesAndJumps(condition, true_target, false_target);
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ GenerateVcmp(condition);
+ GenerateFPJumps(condition, true_target, false_target);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected compare type " << type;
+ }
+
+ if (false_target != &fallthrough) {
+ __ B(false_target);
+ }
+
+ if (true_target_in == nullptr || false_target_in == nullptr) {
+ __ Bind(&fallthrough);
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateTestAndBranch(HInstruction* instruction,
+ size_t condition_input_index,
+ vixl32::Label* true_target,
+ vixl32::Label* false_target) {
+ HInstruction* cond = instruction->InputAt(condition_input_index);
+
+ if (true_target == nullptr && false_target == nullptr) {
+ // Nothing to do. The code always falls through.
+ return;
+ } else if (cond->IsIntConstant()) {
+ // Constant condition, statically compared against "true" (integer value 1).
+ if (cond->AsIntConstant()->IsTrue()) {
+ if (true_target != nullptr) {
+ __ B(true_target);
+ }
+ } else {
+ DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue();
+ if (false_target != nullptr) {
+ __ B(false_target);
+ }
+ }
+ return;
+ }
+
+ // The following code generates these patterns:
+ // (1) true_target == nullptr && false_target != nullptr
+ // - opposite condition true => branch to false_target
+ // (2) true_target != nullptr && false_target == nullptr
+ // - condition true => branch to true_target
+ // (3) true_target != nullptr && false_target != nullptr
+ // - condition true => branch to true_target
+ // - branch to false_target
+ if (IsBooleanValueOrMaterializedCondition(cond)) {
+ // Condition has been materialized, compare the output to 0.
+ if (kIsDebugBuild) {
+ Location cond_val = instruction->GetLocations()->InAt(condition_input_index);
+ DCHECK(cond_val.IsRegister());
+ }
+ if (true_target == nullptr) {
+ __ Cbz(InputRegisterAt(instruction, condition_input_index), false_target);
+ } else {
+ __ Cbnz(InputRegisterAt(instruction, condition_input_index), true_target);
+ }
+ } else {
+ // Condition has not been materialized. Use its inputs as the comparison and
+ // its condition as the branch condition.
+ HCondition* condition = cond->AsCondition();
+
+ // If this is a long or FP comparison that has been folded into
+ // the HCondition, generate the comparison directly.
+ Primitive::Type type = condition->InputAt(0)->GetType();
+ if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
+ GenerateCompareTestAndBranch(condition, true_target, false_target);
+ return;
+ }
+
+ LocationSummary* locations = cond->GetLocations();
+ DCHECK(locations->InAt(0).IsRegister());
+ vixl32::Register left = InputRegisterAt(cond, 0);
+ Location right = locations->InAt(1);
+ if (right.IsRegister()) {
+ __ Cmp(left, InputRegisterAt(cond, 1));
+ } else {
+ DCHECK(right.IsConstant());
+ __ Cmp(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
+ }
+ if (true_target == nullptr) {
+ __ B(ARMCondition(condition->GetOppositeCondition()), false_target);
+ } else {
+ __ B(ARMCondition(condition->GetCondition()), true_target);
+ }
+ }
+
+ // If neither branch falls through (case 3), the conditional branch to `true_target`
+ // was already emitted (case 2) and we need to emit a jump to `false_target`.
+ if (true_target != nullptr && false_target != nullptr) {
+ __ B(false_target);
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitIf(HIf* if_instr) {
+ HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
+ HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
+ vixl32::Label* true_target =
+ codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
+ nullptr : codegen_->GetLabelOf(true_successor);
+ vixl32::Label* false_target =
+ codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
+ nullptr : codegen_->GetLabelOf(false_successor);
+ GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+}
+
+void CodeGeneratorARMVIXL::GenerateNop() {
+ __ Nop();
+}
+
+void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
+ // Handle the long/FP comparisons made in instruction simplification.
+ switch (cond->InputAt(0)->GetType()) {
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+ if (!cond->IsEmittedAtUseSite()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ }
+ break;
+
+ // TODO: https://android-review.googlesource.com/#/c/252265/
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ if (!cond->IsEmittedAtUseSite()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ break;
+
+ default:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+ if (!cond->IsEmittedAtUseSite()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::HandleCondition(HCondition* cond) {
+ if (cond->IsEmittedAtUseSite()) {
+ return;
+ }
+
+ LocationSummary* locations = cond->GetLocations();
+ Location right = locations->InAt(1);
+ vixl32::Register out = OutputRegister(cond);
+ vixl32::Label true_label, false_label;
+
+ switch (cond->InputAt(0)->GetType()) {
+ default: {
+ // Integer case.
+ if (right.IsRegister()) {
+ __ Cmp(InputRegisterAt(cond, 0), InputRegisterAt(cond, 1));
+ } else {
+ DCHECK(right.IsConstant());
+ __ Cmp(InputRegisterAt(cond, 0), CodeGenerator::GetInt32ValueOf(right.GetConstant()));
+ }
+ {
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ kArmInstrMaxSizeInBytes * 3u,
+ CodeBufferCheckScope::kMaximumSize);
+ __ ite(ARMCondition(cond->GetCondition()));
+ __ mov(ARMCondition(cond->GetCondition()), OutputRegister(cond), 1);
+ __ mov(ARMCondition(cond->GetOppositeCondition()), OutputRegister(cond), 0);
+ }
+ return;
+ }
+ case Primitive::kPrimLong:
+ GenerateLongComparesAndJumps(cond, &true_label, &false_label);
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ GenerateVcmp(cond);
+ GenerateFPJumps(cond, &true_label, &false_label);
+ break;
+ }
+
+ // Convert the jumps into the result.
+ vixl32::Label done_label;
+
+ // False case: result = 0.
+ __ Bind(&false_label);
+ __ Mov(out, 0);
+ __ B(&done_label);
+
+ // True case: result = 1.
+ __ Bind(&true_label);
+ __ Mov(out, 1);
+ __ Bind(&done_label);
+}
+
+void LocationsBuilderARMVIXL::VisitEqual(HEqual* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitEqual(HEqual* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitNotEqual(HNotEqual* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitNotEqual(HNotEqual* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitLessThan(HLessThan* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitLessThan(HLessThan* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitGreaterThan(HGreaterThan* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitGreaterThan(HGreaterThan* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitBelow(HBelow* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitBelow(HBelow* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitAbove(HAbove* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitAbove(HAbove* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitIntConstant(HIntConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARMVIXL::VisitLongConstant(HLongConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARMVIXL::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ memory_barrier->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ codegen_->GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+}
+
+void LocationsBuilderARMVIXL::VisitReturnVoid(HReturnVoid* ret) {
+ ret->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
+ codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderARMVIXL::VisitReturn(HReturn* ret) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
+ locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
+ codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) {
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ DCHECK_NE(result_type, input_type);
+
+ // The float-to-long, double-to-long and long-to-float type conversions
+ // rely on a call to the runtime.
+ LocationSummary::CallKind call_kind =
+ (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
+ && result_type == Primitive::kPrimLong)
+ || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat))
+ ? LocationSummary::kCallOnMainOnly
+ : LocationSummary::kNoCall;
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+
+ // The Java language does not allow treating boolean as an integral type but
+ // our bit representation makes it safe.
+
+ switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to byte is a result of code transformations.
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimShort:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to short is a result of code transformations.
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-short' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ // Processing a Dex `float-to-int' instruction.
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimDouble:
+ // Processing a Dex `double-to-int' instruction.
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-long' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat: {
+ // Processing a Dex `float-to-long' instruction.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::FpuRegisterLocation(
+ calling_convention.GetFpuRegisterAt(0)));
+ locations->SetOut(Location::RegisterPairLocation(R0, R1));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ // Processing a Dex `double-to-long' instruction.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::FpuRegisterPairLocation(
+ calling_convention.GetFpuRegisterAt(0),
+ calling_convention.GetFpuRegisterAt(1)));
+ locations->SetOut(Location::RegisterPairLocation(R0, R1));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to char is a result of code transformations.
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // Processing a Dex `int-to-char' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-float' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimLong: {
+ // Processing a Dex `long-to-float' instruction.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ break;
+ }
+
+ case Primitive::kPrimDouble:
+ // Processing a Dex `double-to-float' instruction.
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ case Primitive::kPrimDouble:
+ switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-double' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-double' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimFloat:
+ // Processing a Dex `float-to-double' instruction.
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ DCHECK_NE(result_type, input_type);
+ switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to byte is a result of code transformations.
+ __ Sbfx(OutputRegister(conversion), in.AsRegisterPairLow<vixl32::Register>(), 0, 8);
+ break;
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ __ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 8);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimShort:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to short is a result of code transformations.
+ __ Sbfx(OutputRegister(conversion), in.AsRegisterPairLow<vixl32::Register>(), 0, 16);
+ break;
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-short' instruction.
+ __ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 16);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ DCHECK(out.IsRegister());
+ if (in.IsRegisterPair()) {
+ __ Mov(OutputRegister(conversion), in.AsRegisterPairLow<vixl32::Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ GetAssembler()->LoadFromOffset(kLoadWord,
+ OutputRegister(conversion),
+ sp,
+ in.GetStackIndex());
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ Mov(OutputRegister(conversion), static_cast<int32_t>(value));
+ }
+ break;
+
+ case Primitive::kPrimFloat: {
+ // Processing a Dex `float-to-int' instruction.
+ vixl32::SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<vixl32::SRegister>();
+ __ Vcvt(I32, F32, temp, InputSRegisterAt(conversion, 0));
+ __ Vmov(OutputRegister(conversion), temp);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ // Processing a Dex `double-to-int' instruction.
+ vixl32::SRegister temp_s =
+ locations->GetTemp(0).AsFpuRegisterPairLow<vixl32::SRegister>();
+ __ Vcvt(I32, F64, temp_s, FromLowSToD(in.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ __ Vmov(OutputRegister(conversion), temp_s);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-long' instruction.
+ DCHECK(out.IsRegisterPair());
+ DCHECK(in.IsRegister());
+ __ Mov(out.AsRegisterPairLow<vixl32::Register>(), InputRegisterAt(conversion, 0));
+ // Sign extension.
+ __ Asr(out.AsRegisterPairHigh<vixl32::Register>(),
+ out.AsRegisterPairLow<vixl32::Register>(),
+ 31);
+ break;
+
+ case Primitive::kPrimFloat:
+ // Processing a Dex `float-to-long' instruction.
+ codegen_->InvokeRuntime(kQuickF2l, conversion, conversion->GetDexPc());
+ CheckEntrypointTypes<kQuickF2l, int64_t, float>();
+ break;
+
+ case Primitive::kPrimDouble:
+ // Processing a Dex `double-to-long' instruction.
+ codegen_->InvokeRuntime(kQuickD2l, conversion, conversion->GetDexPc());
+ CheckEntrypointTypes<kQuickD2l, int64_t, double>();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to char is a result of code transformations.
+ __ Ubfx(OutputRegister(conversion), in.AsRegisterPairLow<vixl32::Register>(), 0, 16);
+ break;
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // Processing a Dex `int-to-char' instruction.
+ __ Ubfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 16);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar: {
+ // Processing a Dex `int-to-float' instruction.
+ __ Vmov(OutputSRegister(conversion), InputRegisterAt(conversion, 0));
+ __ Vcvt(F32, I32, OutputSRegister(conversion), OutputSRegister(conversion));
+ break;
+ }
+
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-float' instruction.
+ codegen_->InvokeRuntime(kQuickL2f, conversion, conversion->GetDexPc());
+ CheckEntrypointTypes<kQuickL2f, float, int64_t>();
+ break;
+
+ case Primitive::kPrimDouble:
+ // Processing a Dex `double-to-float' instruction.
+ __ Vcvt(F32,
+ F64,
+ OutputSRegister(conversion),
+ FromLowSToD(in.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ case Primitive::kPrimDouble:
+ switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar: {
+ // Processing a Dex `int-to-double' instruction.
+ __ Vmov(out.AsFpuRegisterPairLow<vixl32::SRegister>(), InputRegisterAt(conversion, 0));
+ __ Vcvt(F64,
+ I32,
+ FromLowSToD(out.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ out.AsFpuRegisterPairLow<vixl32::SRegister>());
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ // Processing a Dex `long-to-double' instruction.
+ vixl32::Register low = in.AsRegisterPairLow<vixl32::Register>();
+ vixl32::Register high = in.AsRegisterPairHigh<vixl32::Register>();
+
+ vixl32::SRegister out_s = out.AsFpuRegisterPairLow<vixl32::SRegister>();
+ vixl32::DRegister out_d = FromLowSToD(out_s);
+
+ vixl32::SRegister temp_s =
+ locations->GetTemp(0).AsFpuRegisterPairLow<vixl32::SRegister>();
+ vixl32::DRegister temp_d = FromLowSToD(temp_s);
+
+ vixl32::SRegister constant_s =
+ locations->GetTemp(1).AsFpuRegisterPairLow<vixl32::SRegister>();
+ vixl32::DRegister constant_d = FromLowSToD(constant_s);
+
+ // temp_d = int-to-double(high)
+ __ Vmov(temp_s, high);
+ __ Vcvt(F64, I32, temp_d, temp_s);
+ // constant_d = k2Pow32EncodingForDouble
+ __ Vmov(F64,
+ constant_d,
+ vixl32::DOperand(bit_cast<double, int64_t>(k2Pow32EncodingForDouble)));
+ // out_d = unsigned-to-double(low)
+ __ Vmov(out_s, low);
+ __ Vcvt(F64, U32, out_d, out_s);
+ // out_d += temp_d * constant_d
+ __ Vmla(F64, out_d, temp_d, constant_d);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ // Processing a Dex `float-to-double' instruction.
+ __ Vcvt(F64,
+ F32,
+ FromLowSToD(out.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ InputSRegisterAt(conversion, 0));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitAdd(HAdd* add) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
+ switch (add->GetResultType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ // TODO: https://android-review.googlesource.com/#/c/254144/
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected add type " << add->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitAdd(HAdd* add) {
+ LocationSummary* locations = add->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+
+ switch (add->GetResultType()) {
+ case Primitive::kPrimInt: {
+ __ Add(OutputRegister(add), InputRegisterAt(add, 0), InputOperandAt(add, 1));
+ }
+ break;
+
+ // TODO: https://android-review.googlesource.com/#/c/254144/
+ case Primitive::kPrimLong: {
+ DCHECK(second.IsRegisterPair());
+ __ Adds(out.AsRegisterPairLow<vixl32::Register>(),
+ first.AsRegisterPairLow<vixl32::Register>(),
+ Operand(second.AsRegisterPairLow<vixl32::Register>()));
+ __ Adc(out.AsRegisterPairHigh<vixl32::Register>(),
+ first.AsRegisterPairHigh<vixl32::Register>(),
+ second.AsRegisterPairHigh<vixl32::Register>());
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ __ Vadd(F32, OutputSRegister(add), InputSRegisterAt(add, 0), InputSRegisterAt(add, 1));
+ }
+ break;
+
+ case Primitive::kPrimDouble:
+ __ Vadd(F64,
+ FromLowSToD(out.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(first.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(second.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected add type " << add->GetResultType();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitSub(HSub* sub) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
+ switch (sub->GetResultType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ // TODO: https://android-review.googlesource.com/#/c/254144/
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitSub(HSub* sub) {
+ LocationSummary* locations = sub->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ switch (sub->GetResultType()) {
+ case Primitive::kPrimInt: {
+ if (second.IsRegister()) {
+ __ Sub(OutputRegister(sub), InputRegisterAt(sub, 0), InputRegisterAt(sub, 1));
+ } else {
+ __ Sub(OutputRegister(sub),
+ InputRegisterAt(sub, 0),
+ second.GetConstant()->AsIntConstant()->GetValue());
+ }
+ break;
+ }
+
+ // TODO: https://android-review.googlesource.com/#/c/254144/
+ case Primitive::kPrimLong: {
+ DCHECK(second.IsRegisterPair());
+ __ Subs(out.AsRegisterPairLow<vixl32::Register>(),
+ first.AsRegisterPairLow<vixl32::Register>(),
+ Operand(second.AsRegisterPairLow<vixl32::Register>()));
+ __ Sbc(out.AsRegisterPairHigh<vixl32::Register>(),
+ first.AsRegisterPairHigh<vixl32::Register>(),
+ Operand(second.AsRegisterPairHigh<vixl32::Register>()));
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ __ Vsub(F32, OutputSRegister(sub), InputSRegisterAt(sub, 0), InputSRegisterAt(sub, 1));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ Vsub(F64,
+ FromLowSToD(out.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(first.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(second.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitMul(HMul* mul) {
+ LocationSummary* locations = mul->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt: {
+ __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
+ break;
+ }
+ case Primitive::kPrimLong: {
+ vixl32::Register out_hi = out.AsRegisterPairHigh<vixl32::Register>();
+ vixl32::Register out_lo = out.AsRegisterPairLow<vixl32::Register>();
+ vixl32::Register in1_hi = first.AsRegisterPairHigh<vixl32::Register>();
+ vixl32::Register in1_lo = first.AsRegisterPairLow<vixl32::Register>();
+ vixl32::Register in2_hi = second.AsRegisterPairHigh<vixl32::Register>();
+ vixl32::Register in2_lo = second.AsRegisterPairLow<vixl32::Register>();
+
+ // Extra checks to protect caused by the existence of R1_R2.
+ // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
+ // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
+ DCHECK_NE(out_hi.GetCode(), in1_lo.GetCode());
+ DCHECK_NE(out_hi.GetCode(), in2_lo.GetCode());
+
+ // input: in1 - 64 bits, in2 - 64 bits
+ // output: out
+ // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
+ // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
+ // parts: out.lo = (in1.lo * in2.lo)[31:0]
+
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ // temp <- in1.lo * in2.hi
+ __ Mul(temp, in1_lo, in2_hi);
+ // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
+ __ Mla(out_hi, in1_hi, in2_lo, temp);
+ // out.lo <- (in1.lo * in2.lo)[31:0];
+ __ Umull(out_lo, temp, in1_lo, in2_lo);
+ // out.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
+ __ Add(out_hi, out_hi, Operand(temp));
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ __ Vmul(F32, OutputSRegister(mul), InputSRegisterAt(mul, 0), InputSRegisterAt(mul, 1));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ Vmul(F64,
+ FromLowSToD(out.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(first.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(second.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitNot(HNot* not_) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitNot(HNot* not_) {
+ LocationSummary* locations = not_->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ switch (not_->GetResultType()) {
+ case Primitive::kPrimInt:
+ __ Mvn(OutputRegister(not_), InputRegisterAt(not_, 0));
+ break;
+
+ case Primitive::kPrimLong:
+ __ Mvn(out.AsRegisterPairLow<vixl32::Register>(),
+ Operand(in.AsRegisterPairLow<vixl32::Register>()));
+ __ Mvn(out.AsRegisterPairHigh<vixl32::Register>(),
+ Operand(in.AsRegisterPairHigh<vixl32::Register>()));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
+ }
+}
+
+void CodeGeneratorARMVIXL::GenerateMemoryBarrier(MemBarrierKind kind) {
+ // TODO (ported from quick): revisit ARM barrier kinds.
+ DmbOptions flavor = DmbOptions::ISH; // Quiet C++ warnings.
+ switch (kind) {
+ case MemBarrierKind::kAnyStore:
+ case MemBarrierKind::kLoadAny:
+ case MemBarrierKind::kAnyAny: {
+ flavor = DmbOptions::ISH;
+ break;
+ }
+ case MemBarrierKind::kStoreStore: {
+ flavor = DmbOptions::ISHST;
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected memory barrier " << kind;
+ }
+ __ Dmb(flavor);
+}
+
+void InstructionCodeGeneratorARMVIXL::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ vixl32::Register out = OutputRegister(instruction);
+ vixl32::Register dividend = InputRegisterAt(instruction, 0);
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ DCHECK(imm == 1 || imm == -1);
+
+ if (instruction->IsRem()) {
+ __ Mov(out, 0);
+ } else {
+ if (imm == 1) {
+ __ Mov(out, dividend);
+ } else {
+ __ Rsb(out, dividend, 0);
+ }
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ vixl32::Register out = OutputRegister(instruction);
+ vixl32::Register dividend = InputRegisterAt(instruction, 0);
+ vixl32::Register temp = locations->GetTemp(0).AsRegister<vixl32::Register>();
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
+ int ctz_imm = CTZ(abs_imm);
+
+ if (ctz_imm == 1) {
+ __ Lsr(temp, dividend, 32 - ctz_imm);
+ } else {
+ __ Asr(temp, dividend, 31);
+ __ Lsr(temp, temp, 32 - ctz_imm);
+ }
+ __ Add(out, temp, Operand(dividend));
+
+ if (instruction->IsDiv()) {
+ __ Asr(out, out, ctz_imm);
+ if (imm < 0) {
+ __ Rsb(out, out, Operand(0));
+ }
+ } else {
+ __ Ubfx(out, out, 0, ctz_imm);
+ __ Sub(out, out, Operand(temp));
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ vixl32::Register out = OutputRegister(instruction);
+ vixl32::Register dividend = InputRegisterAt(instruction, 0);
+ vixl32::Register temp1 = locations->GetTemp(0).AsRegister<vixl32::Register>();
+ vixl32::Register temp2 = locations->GetTemp(1).AsRegister<vixl32::Register>();
+ int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+
+ int64_t magic;
+ int shift;
+ CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+
+ __ Mov(temp1, magic);
+ __ Smull(temp2, temp1, dividend, temp1);
+
+ if (imm > 0 && magic < 0) {
+ __ Add(temp1, temp1, Operand(dividend));
+ } else if (imm < 0 && magic > 0) {
+ __ Sub(temp1, temp1, Operand(dividend));
+ }
+
+ if (shift != 0) {
+ __ Asr(temp1, temp1, shift);
+ }
+
+ if (instruction->IsDiv()) {
+ __ Sub(out, temp1, Operand(temp1, vixl32::Shift(ASR), 31));
+ } else {
+ __ Sub(temp1, temp1, Operand(temp1, vixl32::Shift(ASR), 31));
+ // TODO: Strength reduction for mls.
+ __ Mov(temp2, imm);
+ __ Mls(out, temp1, temp2, dividend);
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateDivRemConstantIntegral(
+ HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ if (imm == 0) {
+ // Do not generate anything. DivZeroCheck would prevent any code to be executed.
+ } else if (imm == 1 || imm == -1) {
+ DivRemOneOrMinusOne(instruction);
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ DivRemByPowerOfTwo(instruction);
+ } else {
+ DCHECK(imm <= -2 || imm >= 2);
+ GenerateDivRemWithAnyConstant(instruction);
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitDiv(HDiv* div) {
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ if (div->GetResultType() == Primitive::kPrimLong) {
+ // pLdiv runtime call.
+ call_kind = LocationSummary::kCallOnMainOnly;
+ } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
+ // sdiv will be replaced by other instruction sequence.
+ } else if (div->GetResultType() == Primitive::kPrimInt &&
+ !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ // pIdivmod runtime call.
+ call_kind = LocationSummary::kCallOnMainOnly;
+ }
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt: {
+ if (div->InputAt(1)->IsConstant()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant()));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ int32_t value = div->InputAt(1)->AsIntConstant()->GetValue();
+ if (value == 1 || value == 0 || value == -1) {
+ // No temp register required.
+ } else {
+ locations->AddTemp(Location::RequiresRegister());
+ if (!IsPowerOfTwo(AbsOrMin(value))) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ }
+ } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ } else {
+ TODO_VIXL32(FATAL);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ TODO_VIXL32(FATAL);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitDiv(HDiv* div) {
+ LocationSummary* locations = div->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt: {
+ if (second.IsConstant()) {
+ GenerateDivRemConstantIntegral(div);
+ } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1));
+ } else {
+ TODO_VIXL32(FATAL);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ TODO_VIXL32(FATAL);
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ __ Vdiv(F32, OutputSRegister(div), InputSRegisterAt(div, 0), InputSRegisterAt(div, 1));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ Vdiv(F64,
+ FromLowSToD(out.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(first.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(second.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ DivZeroCheckSlowPathARMVIXL* slow_path =
+ new (GetGraph()->GetArena()) DivZeroCheckSlowPathARMVIXL(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location value = locations->InAt(0);
+
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ B(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ if (value.IsRegisterPair()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Orrs(temp,
+ value.AsRegisterPairLow<vixl32::Register>(),
+ Operand(value.AsRegisterPairHigh<vixl32::Register>()));
+ __ B(eq, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ B(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitParallelMove(HParallelMove* instruction) {
+ codegen_->GetMoveResolver()->EmitNativeCode(instruction);
+}
+
+ArmVIXLAssembler* ParallelMoveResolverARMVIXL::GetAssembler() const {
+ return codegen_->GetAssembler();
+}
+
+void ParallelMoveResolverARMVIXL::EmitMove(size_t index) {
+ MoveOperands* move = moves_[index];
+ Location source = move->GetSource();
+ Location destination = move->GetDestination();
+
+ if (source.IsRegister()) {
+ if (destination.IsRegister()) {
+ __ Mov(destination.AsRegister<vixl32::Register>(), source.AsRegister<vixl32::Register>());
+ } else if (destination.IsFpuRegister()) {
+ __ Vmov(destination.AsFpuRegister<vixl32::SRegister>(),
+ source.AsRegister<vixl32::Register>());
+ } else {
+ DCHECK(destination.IsStackSlot());
+ GetAssembler()->StoreToOffset(kStoreWord,
+ source.AsRegister<vixl32::Register>(),
+ sp,
+ destination.GetStackIndex());
+ }
+ } else if (source.IsStackSlot()) {
+ TODO_VIXL32(FATAL);
+ } else if (source.IsFpuRegister()) {
+ TODO_VIXL32(FATAL);
+ } else if (source.IsDoubleStackSlot()) {
+ TODO_VIXL32(FATAL);
+ } else if (source.IsRegisterPair()) {
+ if (destination.IsRegisterPair()) {
+ __ Mov(destination.AsRegisterPairLow<vixl32::Register>(),
+ source.AsRegisterPairLow<vixl32::Register>());
+ __ Mov(destination.AsRegisterPairHigh<vixl32::Register>(),
+ source.AsRegisterPairHigh<vixl32::Register>());
+ } else if (destination.IsFpuRegisterPair()) {
+ __ Vmov(FromLowSToD(destination.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ source.AsRegisterPairLow<vixl32::Register>(),
+ source.AsRegisterPairHigh<vixl32::Register>());
+ } else {
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
+ DCHECK(ExpectedPairLayout(source));
+ GetAssembler()->StoreToOffset(kStoreWordPair,
+ source.AsRegisterPairLow<vixl32::Register>(),
+ sp,
+ destination.GetStackIndex());
+ }
+ } else if (source.IsFpuRegisterPair()) {
+ TODO_VIXL32(FATAL);
+ } else {
+ DCHECK(source.IsConstant()) << source;
+ HConstant* constant = source.GetConstant();
+ if (constant->IsIntConstant() || constant->IsNullConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(constant);
+ if (destination.IsRegister()) {
+ __ Mov(destination.AsRegister<vixl32::Register>(), value);
+ } else {
+ DCHECK(destination.IsStackSlot());
+ UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Mov(temp, value);
+ GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex());
+ }
+ } else if (constant->IsLongConstant()) {
+ int64_t value = constant->AsLongConstant()->GetValue();
+ if (destination.IsRegisterPair()) {
+ __ Mov(destination.AsRegisterPairLow<vixl32::Register>(), Low32Bits(value));
+ __ Mov(destination.AsRegisterPairHigh<vixl32::Register>(), High32Bits(value));
+ } else {
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
+ UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Mov(temp, Low32Bits(value));
+ GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex());
+ __ Mov(temp, High32Bits(value));
+ GetAssembler()->StoreToOffset(kStoreWord,
+ temp,
+ sp,
+ destination.GetHighStackIndex(kArmWordSize));
+ }
+ } else if (constant->IsDoubleConstant()) {
+ double value = constant->AsDoubleConstant()->GetValue();
+ if (destination.IsFpuRegisterPair()) {
+ __ Vmov(F64, FromLowSToD(destination.AsFpuRegisterPairLow<vixl32::SRegister>()), value);
+ } else {
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
+ uint64_t int_value = bit_cast<uint64_t, double>(value);
+ UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ GetAssembler()->LoadImmediate(temp, Low32Bits(int_value));
+ GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex());
+ GetAssembler()->LoadImmediate(temp, High32Bits(int_value));
+ GetAssembler()->StoreToOffset(kStoreWord,
+ temp,
+ sp,
+ destination.GetHighStackIndex(kArmWordSize));
+ }
+ } else {
+ DCHECK(constant->IsFloatConstant()) << constant->DebugName();
+ float value = constant->AsFloatConstant()->GetValue();
+ if (destination.IsFpuRegister()) {
+ __ Vmov(F32, destination.AsFpuRegister<vixl32::SRegister>(), value);
+ } else {
+ DCHECK(destination.IsStackSlot());
+ UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ GetAssembler()->LoadImmediate(temp, bit_cast<int32_t, float>(value));
+ GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex());
+ }
+ }
+ }
+}
+
+void ParallelMoveResolverARMVIXL::Exchange(Register reg, int mem) {
+ TODO_VIXL32(FATAL);
+}
+
+void ParallelMoveResolverARMVIXL::Exchange(int mem1, int mem2) {
+ TODO_VIXL32(FATAL);
+}
+
+void ParallelMoveResolverARMVIXL::EmitSwap(size_t index) {
+ TODO_VIXL32(FATAL);
+}
+
+void ParallelMoveResolverARMVIXL::SpillScratch(int reg ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+}
+
+void ParallelMoveResolverARMVIXL::RestoreScratch(int reg ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+}
+
+
+// TODO: Remove when codegen complete.
+#pragma GCC diagnostic pop
+
+#undef __
+#undef QUICK_ENTRY_POINT
+#undef TODO_VIXL32
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
new file mode 100644
index 0000000..d0c2c85
--- /dev/null
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
+#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
+
+#include "code_generator_arm.h"
+#include "utils/arm/assembler_arm_vixl.h"
+
+// TODO(VIXL): make vixl clean wrt -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "aarch32/constants-aarch32.h"
+#include "aarch32/instructions-aarch32.h"
+#include "aarch32/macro-assembler-aarch32.h"
+#pragma GCC diagnostic pop
+
+// True if VIXL32 should be used for codegen on ARM.
+#ifdef USE_VIXL_ARM_BACKEND
+static constexpr bool kArmUseVIXL32 = true;
+#else
+static constexpr bool kArmUseVIXL32 = false;
+#endif
+
+namespace art {
+namespace arm {
+
+static const vixl::aarch32::Register kMethodRegister = vixl::aarch32::r0;
+static const vixl::aarch32::Register kCoreAlwaysSpillRegister = vixl::aarch32::r5;
+static const vixl::aarch32::RegisterList kCoreCalleeSaves = vixl::aarch32::RegisterList(
+ (1 << R5) | (1 << R6) | (1 << R7) | (1 << R8) | (1 << R10) | (1 << R11) | (1 << LR));
+// Callee saves s16 to s31 inc.
+static const vixl::aarch32::SRegisterList kFpuCalleeSaves =
+ vixl::aarch32::SRegisterList(vixl::aarch32::s16, 16);
+
+#define FOR_EACH_IMPLEMENTED_INSTRUCTION(M) \
+ M(Above) \
+ M(AboveOrEqual) \
+ M(Add) \
+ M(Below) \
+ M(BelowOrEqual) \
+ M(Div) \
+ M(DivZeroCheck) \
+ M(Equal) \
+ M(Exit) \
+ M(Goto) \
+ M(GreaterThan) \
+ M(GreaterThanOrEqual) \
+ M(If) \
+ M(IntConstant) \
+ M(LessThan) \
+ M(LessThanOrEqual) \
+ M(LongConstant) \
+ M(MemoryBarrier) \
+ M(Mul) \
+ M(Not) \
+ M(NotEqual) \
+ M(ParallelMove) \
+ M(Return) \
+ M(ReturnVoid) \
+ M(Sub) \
+ M(TypeConversion) \
+
+// TODO: Remove once the VIXL32 backend is implemented completely.
+#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
+ M(And) \
+ M(ArrayGet) \
+ M(ArrayLength) \
+ M(ArraySet) \
+ M(BooleanNot) \
+ M(BoundsCheck) \
+ M(BoundType) \
+ M(CheckCast) \
+ M(ClassTableGet) \
+ M(ClearException) \
+ M(ClinitCheck) \
+ M(Compare) \
+ M(CurrentMethod) \
+ M(Deoptimize) \
+ M(DoubleConstant) \
+ M(FloatConstant) \
+ M(InstanceFieldGet) \
+ M(InstanceFieldSet) \
+ M(InstanceOf) \
+ M(InvokeInterface) \
+ M(InvokeStaticOrDirect) \
+ M(InvokeUnresolved) \
+ M(InvokeVirtual) \
+ M(LoadClass) \
+ M(LoadException) \
+ M(LoadString) \
+ M(MonitorOperation) \
+ M(NativeDebugInfo) \
+ M(Neg) \
+ M(NewArray) \
+ M(NewInstance) \
+ M(NullCheck) \
+ M(NullConstant) \
+ M(Or) \
+ M(PackedSwitch) \
+ M(ParameterValue) \
+ M(Phi) \
+ M(Rem) \
+ M(Ror) \
+ M(Select) \
+ M(Shl) \
+ M(Shr) \
+ M(StaticFieldGet) \
+ M(StaticFieldSet) \
+ M(SuspendCheck) \
+ M(Throw) \
+ M(TryBoundary) \
+ M(UnresolvedInstanceFieldGet) \
+ M(UnresolvedInstanceFieldSet) \
+ M(UnresolvedStaticFieldGet) \
+ M(UnresolvedStaticFieldSet) \
+ M(UShr) \
+ M(Xor) \
+
+class CodeGeneratorARMVIXL;
+
+class SlowPathCodeARMVIXL : public SlowPathCode {
+ public:
+ explicit SlowPathCodeARMVIXL(HInstruction* instruction)
+ : SlowPathCode(instruction), entry_label_(), exit_label_() {}
+
+ vixl::aarch32::Label* GetEntryLabel() { return &entry_label_; }
+ vixl::aarch32::Label* GetExitLabel() { return &exit_label_; }
+
+ void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+ void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+
+ private:
+ vixl::aarch32::Label entry_label_;
+ vixl::aarch32::Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARMVIXL);
+};
+
+class ParallelMoveResolverARMVIXL : public ParallelMoveResolverWithSwap {
+ public:
+ ParallelMoveResolverARMVIXL(ArenaAllocator* allocator, CodeGeneratorARMVIXL* codegen)
+ : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
+
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
+
+ ArmVIXLAssembler* GetAssembler() const;
+
+ private:
+ void Exchange(Register reg, int mem);
+ void Exchange(int mem1, int mem2);
+
+ CodeGeneratorARMVIXL* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARMVIXL);
+};
+
+#define DEFINE_IMPLEMENTED_INSTRUCTION_VISITOR(Name) \
+ void Visit##Name(H##Name*) OVERRIDE;
+
+#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITOR(Name) \
+ void Visit##Name(H##Name* instr) OVERRIDE { \
+ VisitUnimplemementedInstruction(instr); }
+
+class LocationsBuilderARMVIXL : public HGraphVisitor {
+ public:
+ LocationsBuilderARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen)
+ : HGraphVisitor(graph), codegen_(codegen) {}
+
+ FOR_EACH_IMPLEMENTED_INSTRUCTION(DEFINE_IMPLEMENTED_INSTRUCTION_VISITOR)
+
+ FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITOR)
+
+ private:
+ void VisitUnimplemementedInstruction(HInstruction* instruction) {
+ LOG(FATAL) << "Unimplemented Instruction: " << instruction->DebugName();
+ }
+
+ void HandleCondition(HCondition* condition);
+
+ CodeGeneratorARMVIXL* const codegen_;
+ InvokeDexCallingConventionVisitorARM parameter_visitor_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARMVIXL);
+};
+
+class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
+ public:
+ InstructionCodeGeneratorARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen);
+
+ FOR_EACH_IMPLEMENTED_INSTRUCTION(DEFINE_IMPLEMENTED_INSTRUCTION_VISITOR)
+
+ FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITOR)
+
+ ArmVIXLAssembler* GetAssembler() const { return assembler_; }
+ vixl::aarch32::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
+
+ private:
+ void VisitUnimplemementedInstruction(HInstruction* instruction) {
+ LOG(FATAL) << "Unimplemented Instruction: " << instruction->DebugName();
+ }
+
+ void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
+ void HandleGoto(HInstruction* got, HBasicBlock* successor);
+ void HandleCondition(HCondition* condition);
+ void GenerateTestAndBranch(HInstruction* instruction,
+ size_t condition_input_index,
+ vixl::aarch32::Label* true_target,
+ vixl::aarch32::Label* false_target);
+ void GenerateCompareTestAndBranch(HCondition* condition,
+ vixl::aarch32::Label* true_target,
+ vixl::aarch32::Label* false_target);
+ void GenerateVcmp(HInstruction* instruction);
+ void GenerateFPJumps(HCondition* cond,
+ vixl::aarch32::Label* true_label,
+ vixl::aarch32::Label* false_label);
+ void GenerateLongComparesAndJumps(HCondition* cond,
+ vixl::aarch32::Label* true_label,
+ vixl::aarch32::Label* false_label);
+ void DivRemOneOrMinusOne(HBinaryOperation* instruction);
+ void DivRemByPowerOfTwo(HBinaryOperation* instruction);
+ void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
+ void GenerateDivRemConstantIntegral(HBinaryOperation* instruction);
+
+ ArmVIXLAssembler* const assembler_;
+ CodeGeneratorARMVIXL* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARMVIXL);
+};
+
+class CodeGeneratorARMVIXL : public CodeGenerator {
+ public:
+ CodeGeneratorARMVIXL(HGraph* graph,
+ const ArmInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr);
+
+ virtual ~CodeGeneratorARMVIXL() {}
+
+ void Initialize() OVERRIDE {
+ block_labels_.resize(GetGraph()->GetBlocks().size());
+ }
+
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+ void Bind(HBasicBlock* block) OVERRIDE;
+ void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
+ ArmVIXLAssembler* GetAssembler() OVERRIDE { return &assembler_; }
+
+ const ArmVIXLAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+
+ vixl::aarch32::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
+
+ size_t GetWordSize() const OVERRIDE { return kArmWordSize; }
+
+ size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return vixl::aarch32::kRegSizeInBytes; }
+
+ HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE;
+
+ void GenerateMemoryBarrier(MemBarrierKind kind);
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
+
+ // Blocks all register pairs made out of blocked core registers.
+ void UpdateBlockedPairRegisters() const;
+
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+ InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kThumb2; }
+
+ const ArmInstructionSetFeatures& GetInstructionSetFeatures() const { return isa_features_; }
+
+ vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; }
+
+ // Saves the register in the stack. Returns the size taken on stack.
+ size_t SaveCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
+ uint32_t reg_id ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
+ return 0;
+ }
+
+ // Restores the register from the stack. Returns the size taken on stack.
+ size_t RestoreCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
+ uint32_t reg_id ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
+ return 0;
+ }
+
+ size_t SaveFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
+ uint32_t reg_id ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: SaveFloatingPointRegister";
+ return 0;
+ }
+
+ size_t RestoreFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
+ uint32_t reg_id ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: RestoreFloatingPointRegister";
+ return 0;
+ }
+
+ bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE {
+ return type == Primitive::kPrimDouble || type == Primitive::kPrimLong;
+ }
+
+ void ComputeSpillMask() OVERRIDE;
+
+ void GenerateImplicitNullCheck(HNullCheck* null_check) OVERRIDE;
+ void GenerateExplicitNullCheck(HNullCheck* null_check) OVERRIDE;
+
+ ParallelMoveResolver* GetMoveResolver() OVERRIDE {
+ return &move_resolver_;
+ }
+
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path = nullptr) OVERRIDE;
+
+ // Generate code to invoke a runtime entry point, but do not record
+ // PC-related information in a stack map.
+ void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path);
+
+ void GenerateInvokeRuntime(int32_t entry_point_offset);
+
+ // Check if the desired_string_load_kind is supported. If it is, return it,
+ // otherwise return a fall-back kind that should be used instead.
+ HLoadString::LoadKind GetSupportedLoadStringKind(
+ HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+
+ // Check if the desired_class_load_kind is supported. If it is, return it,
+ // otherwise return a fall-back kind that should be used instead.
+ HLoadClass::LoadKind GetSupportedLoadClassKind(
+ HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
+ void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
+
+ void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
+
+ void GenerateNop() OVERRIDE;
+
+ vixl::aarch32::Label* GetLabelOf(HBasicBlock* block) {
+ block = FirstNonEmptyBlock(block);
+ return &(block_labels_[block->GetBlockId()]);
+ }
+
+ private:
+ // Labels for each block that will be compiled.
+ // We use a deque so that the `vixl::aarch32::Label` objects do not move in memory.
+ ArenaDeque<vixl::aarch32::Label> block_labels_; // Indexed by block id.
+ vixl::aarch32::Label frame_entry_label_;
+
+ LocationsBuilderARMVIXL location_builder_;
+ InstructionCodeGeneratorARMVIXL instruction_visitor_;
+ ParallelMoveResolverARMVIXL move_resolver_;
+
+ ArmVIXLAssembler assembler_;
+ const ArmInstructionSetFeatures& isa_features_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARMVIXL);
+};
+
+#undef FOR_EACH_IMPLEMENTED_INSTRUCTION
+#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
+#undef DEFINE_IMPLEMENTED_INSTRUCTION_VISITOR
+#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITOR
+
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 070cbb3..f19faa3 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -41,6 +41,7 @@
#include "register_allocator_linear_scan.h"
#include "ssa_liveness_analysis.h"
#include "utils.h"
+#include "utils/arm/assembler_arm_vixl.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/mips/managed_register_mips.h"
#include "utils/mips64/managed_register_mips64.h"
@@ -48,6 +49,7 @@
#ifdef ART_ENABLE_CODEGEN_arm
#include "code_generator_arm.h"
+#include "code_generator_arm_vixl.h"
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -117,6 +119,28 @@
blocked_register_pairs_[arm::R6_R7] = false;
}
};
+
+// A way to test the VIXL32-based code generator on ARM. This will replace
+// TestCodeGeneratorARM when the VIXL32-based backend replaces the existing one.
+class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL {
+ public:
+ TestCodeGeneratorARMVIXL(HGraph* graph,
+ const ArmInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options)
+ : arm::CodeGeneratorARMVIXL(graph, isa_features, compiler_options) {
+ AddAllocatedRegister(Location::RegisterLocation(arm::R6));
+ AddAllocatedRegister(Location::RegisterLocation(arm::R7));
+ }
+
+ void SetupBlockedRegisters() const OVERRIDE {
+ arm::CodeGeneratorARMVIXL::SetupBlockedRegisters();
+ blocked_core_registers_[arm::R4] = true;
+ blocked_core_registers_[arm::R6] = false;
+ blocked_core_registers_[arm::R7] = false;
+ // Makes pair R6-R7 available.
+ blocked_register_pairs_[arm::R6_R7] = false;
+ }
+};
#endif
#ifdef ART_ENABLE_CODEGEN_x86
@@ -296,6 +320,13 @@
*features_arm.get(),
compiler_options);
}
+
+CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& compiler_options) {
+ std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
+ ArmInstructionSetFeatures::FromCppDefines());
+ return new (graph->GetArena())
+ TestCodeGeneratorARMVIXL(graph, *features_arm.get(), compiler_options);
+}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -351,6 +382,7 @@
#ifdef ART_ENABLE_CODEGEN_arm
CodegenTargetConfig(kArm, create_codegen_arm),
CodegenTargetConfig(kThumb2, create_codegen_arm),
+ CodegenTargetConfig(kArm, create_codegen_arm_vixl32),
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
CodegenTargetConfig(kArm64, create_codegen_arm64),
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
new file mode 100644
index 0000000..8535417
--- /dev/null
+++ b/compiler/optimizing/common_arm.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_COMMON_ARM_H_
+#define ART_COMPILER_OPTIMIZING_COMMON_ARM_H_
+
+// TODO(VIXL): Make VIXL compile with -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "aarch32/macro-assembler-aarch32.h"
+#pragma GCC diagnostic pop
+
+namespace art {
+namespace arm {
+namespace helpers {
+
+static_assert(vixl::aarch32::kSpCode == SP, "vixl::aarch32::kSpCode must equal ART's SP");
+
+inline dwarf::Reg DWARFReg(vixl::aarch32::Register reg) {
+ return dwarf::Reg::ArmCore(static_cast<int>(reg.GetCode()));
+}
+
+inline dwarf::Reg DWARFReg(vixl::aarch32::SRegister reg) {
+ return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
+}
+
+inline vixl::aarch32::DRegister FromLowSToD(vixl::aarch32::SRegister reg) {
+ DCHECK_EQ(reg.GetCode() % 2, 0u) << reg;
+ return vixl::aarch32::DRegister(reg.GetCode() / 2);
+}
+
+inline vixl::aarch32::Register RegisterFrom(Location location) {
+ DCHECK(location.IsRegister()) << location;
+ return vixl::aarch32::Register(location.reg());
+}
+
+inline vixl::aarch32::Register RegisterFrom(Location location, Primitive::Type type) {
+ DCHECK(type != Primitive::kPrimVoid && !Primitive::IsFloatingPointType(type)) << type;
+ return RegisterFrom(location);
+}
+
+inline vixl::aarch32::DRegister DRegisterFrom(Location location) {
+ DCHECK(location.IsFpuRegister()) << location;
+ return vixl::aarch32::DRegister(location.reg());
+}
+
+inline vixl::aarch32::SRegister SRegisterFrom(Location location) {
+ DCHECK(location.IsFpuRegister()) << location;
+ return vixl::aarch32::SRegister(location.reg());
+}
+
+inline vixl::aarch32::SRegister OutputSRegister(HInstruction* instr) {
+ Primitive::Type type = instr->GetType();
+ DCHECK_EQ(type, Primitive::kPrimFloat) << type;
+ return SRegisterFrom(instr->GetLocations()->Out());
+}
+
+inline vixl::aarch32::DRegister OutputDRegister(HInstruction* instr) {
+ Primitive::Type type = instr->GetType();
+ DCHECK_EQ(type, Primitive::kPrimDouble) << type;
+ return DRegisterFrom(instr->GetLocations()->Out());
+}
+
+inline vixl::aarch32::SRegister InputSRegisterAt(HInstruction* instr, int input_index) {
+ Primitive::Type type = instr->InputAt(input_index)->GetType();
+ DCHECK_EQ(type, Primitive::kPrimFloat) << type;
+ return SRegisterFrom(instr->GetLocations()->InAt(input_index));
+}
+
+inline vixl::aarch32::DRegister InputDRegisterAt(HInstruction* instr, int input_index) {
+ Primitive::Type type = instr->InputAt(input_index)->GetType();
+ DCHECK_EQ(type, Primitive::kPrimDouble) << type;
+ return DRegisterFrom(instr->GetLocations()->InAt(input_index));
+}
+
+inline vixl::aarch32::Register OutputRegister(HInstruction* instr) {
+ return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
+}
+
+inline vixl::aarch32::Register InputRegisterAt(HInstruction* instr, int input_index) {
+ return RegisterFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+inline int64_t Int64ConstantFrom(Location location) {
+ HConstant* instr = location.GetConstant();
+ if (instr->IsIntConstant()) {
+ return instr->AsIntConstant()->GetValue();
+ } else if (instr->IsNullConstant()) {
+ return 0;
+ } else {
+ DCHECK(instr->IsLongConstant()) << instr->DebugName();
+ return instr->AsLongConstant()->GetValue();
+ }
+}
+
+inline vixl::aarch32::Operand OperandFrom(Location location, Primitive::Type type) {
+ if (location.IsRegister()) {
+ return vixl::aarch32::Operand(RegisterFrom(location, type));
+ } else {
+ return vixl::aarch32::Operand(Int64ConstantFrom(location));
+ }
+}
+
+inline vixl::aarch32::Operand InputOperandAt(HInstruction* instr, int input_index) {
+ return OperandFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+} // namespace helpers
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_COMMON_ARM_H_
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 4ca0600..b787888 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1577,6 +1577,18 @@
return;
}
+ if ((input_cst != nullptr) && input_cst->IsOne()
+ && input_other->GetType() == Primitive::kPrimBoolean) {
+ // Replace code looking like
+ // XOR dst, src, 1
+ // with
+ // BOOLEAN_NOT dst, src
+ HBooleanNot* boolean_not = new (GetGraph()->GetArena()) HBooleanNot(input_other);
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, boolean_not);
+ RecordSimplification();
+ return;
+ }
+
if ((input_cst != nullptr) && AreAllBitsSet(input_cst)) {
// Replace code looking like
// XOR dst, src, 0xFFF...FF
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 245653d..c422163 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1386,7 +1386,10 @@
if (IsBootImage() && image_filenames_.size() > 1) {
// If we're compiling the boot image, store the boot classpath into the Key-Value store.
// We need this for the multi-image case.
- key_value_store_->Put(OatHeader::kBootClassPathKey, GetMultiImageBootClassPath());
+ key_value_store_->Put(OatHeader::kBootClassPathKey,
+ gc::space::ImageSpace::GetMultiImageBootClassPath(dex_locations_,
+ oat_filenames_,
+ image_filenames_));
}
if (!IsBootImage()) {
@@ -2052,49 +2055,6 @@
return result;
}
- std::string GetMultiImageBootClassPath() {
- DCHECK(IsBootImage());
- DCHECK_GT(oat_filenames_.size(), 1u);
- // If the image filename was adapted (e.g., for our tests), we need to change this here,
- // too, but need to strip all path components (they will be re-established when loading).
- std::ostringstream bootcp_oss;
- bool first_bootcp = true;
- for (size_t i = 0; i < dex_locations_.size(); ++i) {
- if (!first_bootcp) {
- bootcp_oss << ":";
- }
-
- std::string dex_loc = dex_locations_[i];
- std::string image_filename = image_filenames_[i];
-
- // Use the dex_loc path, but the image_filename name (without path elements).
- size_t dex_last_slash = dex_loc.rfind('/');
-
- // npos is max(size_t). That makes this a bit ugly.
- size_t image_last_slash = image_filename.rfind('/');
- size_t image_last_at = image_filename.rfind('@');
- size_t image_last_sep = (image_last_slash == std::string::npos)
- ? image_last_at
- : (image_last_at == std::string::npos)
- ? std::string::npos
- : std::max(image_last_slash, image_last_at);
- // Note: whenever image_last_sep == npos, +1 overflow means using the full string.
-
- if (dex_last_slash == std::string::npos) {
- dex_loc = image_filename.substr(image_last_sep + 1);
- } else {
- dex_loc = dex_loc.substr(0, dex_last_slash + 1) +
- image_filename.substr(image_last_sep + 1);
- }
-
- // Image filenames already end with .art, no need to replace.
-
- bootcp_oss << dex_loc;
- first_bootcp = false;
- }
- return bootcp_oss.str();
- }
-
std::vector<std::string> GetClassPathLocations(const std::string& class_path) {
// This function is used only for apps and for an app we have exactly one oat file.
DCHECK(!IsBootImage());
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 7234952..8a48604 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -76,9 +76,10 @@
file_.reset(new File(fd, GetFilename(), true));
}
-ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix) {
- filename_ = other.GetFilename();
- filename_ += suffix;
+ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix)
+ : ScratchFile(other.GetFilename() + suffix) {}
+
+ScratchFile::ScratchFile(const std::string& filename) : filename_(filename) {
int fd = open(filename_.c_str(), O_RDWR | O_CREAT, 0666);
CHECK_NE(-1, fd);
file_.reset(new File(fd, GetFilename(), true));
@@ -90,6 +91,18 @@
file_.reset(file);
}
+ScratchFile::ScratchFile(ScratchFile&& other) {
+ *this = std::move(other);
+}
+
+ScratchFile& ScratchFile::operator=(ScratchFile&& other) {
+ if (GetFile() != other.GetFile()) {
+ std::swap(filename_, other.filename_);
+ std::swap(file_, other.file_);
+ }
+ return *this;
+}
+
ScratchFile::~ScratchFile() {
Unlink();
}
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index b2090b7..2376e6a 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -40,8 +40,14 @@
public:
ScratchFile();
+ explicit ScratchFile(const std::string& filename);
+
ScratchFile(const ScratchFile& other, const char* suffix);
+ explicit ScratchFile(ScratchFile&& other);
+
+ ScratchFile& operator=(ScratchFile&& other);
+
explicit ScratchFile(File* file);
~ScratchFile();
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 70b7f87..409fbba 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -564,6 +564,34 @@
return nullptr;
}
+uint32_t DexFile::FindCodeItemOffset(const DexFile::ClassDef& class_def,
+ uint32_t method_idx) const {
+ const uint8_t* class_data = GetClassData(class_def);
+ CHECK(class_data != nullptr);
+ ClassDataItemIterator it(*this, class_data);
+ // Skip fields
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ while (it.HasNextInstanceField()) {
+ it.Next();
+ }
+ while (it.HasNextDirectMethod()) {
+ if (it.GetMemberIndex() == method_idx) {
+ return it.GetMethodCodeItemOffset();
+ }
+ it.Next();
+ }
+ while (it.HasNextVirtualMethod()) {
+ if (it.GetMemberIndex() == method_idx) {
+ return it.GetMethodCodeItemOffset();
+ }
+ it.Next();
+ }
+ LOG(FATAL) << "Unable to find method " << method_idx;
+ UNREACHABLE();
+}
+
const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_klass,
const DexFile::StringId& name,
const DexFile::TypeId& type) const {
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 14bde09..28aeb1e 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -590,6 +590,9 @@
const DexFile::StringId& name,
const DexFile::TypeId& type) const;
+ uint32_t FindCodeItemOffset(const DexFile::ClassDef& class_def,
+ uint32_t dex_method_idx) const;
+
// Returns the declaring class descriptor string of a field id.
const char* GetFieldDeclaringClassDescriptor(const FieldId& field_id) const {
const DexFile::TypeId& type_id = GetTypeId(field_id.class_idx_);
@@ -1060,6 +1063,16 @@
std::string* error_msg,
VerifyResult* verify_result = nullptr);
+
+ // Opens a .dex file at the given address, optionally backed by a MemMap
+ static std::unique_ptr<const DexFile> OpenMemory(const uint8_t* dex_file,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ std::unique_ptr<MemMap> mem_map,
+ const OatDexFile* oat_dex_file,
+ std::string* error_msg);
+
DexFile(const uint8_t* base,
size_t size,
const std::string& location,
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 6a06177..3dffc40 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -26,6 +26,7 @@
#include "os.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
+#include "utils.h"
namespace art {
@@ -37,65 +38,13 @@
ASSERT_TRUE(dex.get() != nullptr);
}
-static const uint8_t kBase64Map[256] = {
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63,
- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255,
- 255, 254, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, // NOLINT
- 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // NOLINT
- 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
- 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, // NOLINT
- 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, // NOLINT
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255
-};
-
-static inline std::vector<uint8_t> DecodeBase64(const char* src) {
- std::vector<uint8_t> tmp;
- uint32_t t = 0, y = 0;
- int g = 3;
- for (size_t i = 0; src[i] != '\0'; ++i) {
- uint8_t c = kBase64Map[src[i] & 0xFF];
- if (c == 255) continue;
- // the final = symbols are read and used to trim the remaining bytes
- if (c == 254) {
- c = 0;
- // prevent g < 0 which would potentially allow an overflow later
- if (--g < 0) {
- return std::vector<uint8_t>();
- }
- } else if (g != 3) {
- // we only allow = to be at the end
- return std::vector<uint8_t>();
- }
- t = (t << 6) | c;
- if (++y == 4) {
- tmp.push_back((t >> 16) & 255);
- if (g > 1) {
- tmp.push_back((t >> 8) & 255);
- }
- if (g > 2) {
- tmp.push_back(t & 255);
- }
- y = t = 0;
- }
- }
- if (y != 0) {
- return std::vector<uint8_t>();
- }
- return tmp;
+static inline std::vector<uint8_t> DecodeBase64Vec(const char* src) {
+ std::vector<uint8_t> res;
+ size_t size;
+ std::unique_ptr<uint8_t[]> data(DecodeBase64(src, &size));
+ res.resize(size);
+ memcpy(res.data(), data.get(), size);
+ return res;
}
// Although this is the same content logically as the Nested test dex,
@@ -166,7 +115,7 @@
static void DecodeAndWriteDexFile(const char* base64, const char* location) {
// decode base64
CHECK(base64 != nullptr);
- std::vector<uint8_t> dex_bytes = DecodeBase64(base64);
+ std::vector<uint8_t> dex_bytes = DecodeBase64Vec(base64);
CHECK_NE(dex_bytes.size(), 0u);
// write to provided file
@@ -202,7 +151,7 @@
const char* location,
uint32_t location_checksum) {
CHECK(base64 != nullptr);
- std::vector<uint8_t> dex_bytes = DecodeBase64(base64);
+ std::vector<uint8_t> dex_bytes = DecodeBase64Vec(base64);
CHECK_NE(dex_bytes.size(), 0u);
std::string error_message;
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 5939ef3..c5a4d75 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -29,34 +29,10 @@
#include "leb128.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
+#include "utils.h"
namespace art {
-static const uint8_t kBase64Map[256] = {
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63,
- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255,
- 255, 254, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, // NOLINT
- 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // NOLINT
- 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
- 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, // NOLINT
- 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, // NOLINT
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255
-};
-
// Make the Dex file version 37.
static void MakeDexVersion37(DexFile* dex_file) {
size_t offset = OFFSETOF_MEMBER(DexFile::Header, magic_) + 6;
@@ -64,52 +40,6 @@
*(const_cast<uint8_t*>(dex_file->Begin()) + offset) = '7';
}
-static inline std::unique_ptr<uint8_t[]> DecodeBase64(const char* src, size_t* dst_size) {
- std::vector<uint8_t> tmp;
- uint32_t t = 0, y = 0;
- int g = 3;
- for (size_t i = 0; src[i] != '\0'; ++i) {
- uint8_t c = kBase64Map[src[i] & 0xFF];
- if (c == 255) continue;
- // the final = symbols are read and used to trim the remaining bytes
- if (c == 254) {
- c = 0;
- // prevent g < 0 which would potentially allow an overflow later
- if (--g < 0) {
- *dst_size = 0;
- return nullptr;
- }
- } else if (g != 3) {
- // we only allow = to be at the end
- *dst_size = 0;
- return nullptr;
- }
- t = (t << 6) | c;
- if (++y == 4) {
- tmp.push_back((t >> 16) & 255);
- if (g > 1) {
- tmp.push_back((t >> 8) & 255);
- }
- if (g > 2) {
- tmp.push_back(t & 255);
- }
- y = t = 0;
- }
- }
- if (y != 0) {
- *dst_size = 0;
- return nullptr;
- }
- std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
- if (dst_size != nullptr) {
- *dst_size = tmp.size();
- } else {
- *dst_size = 0;
- }
- std::copy(tmp.begin(), tmp.end(), dst.get());
- return dst;
-}
-
static void FixUpChecksum(uint8_t* dex_file) {
DexFile::Header* header = reinterpret_cast<DexFile::Header*>(dex_file);
uint32_t expected_size = header->file_size_;
@@ -131,7 +61,7 @@
std::function<void(DexFile*)> f,
const char* expected_error) {
size_t length;
- std::unique_ptr<uint8_t[]> dex_bytes = DecodeBase64(dex_file_base64_content, &length);
+ std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(dex_file_base64_content, &length));
CHECK(dex_bytes != nullptr);
// Note: `dex_file` will be destroyed before `dex_bytes`.
std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
@@ -1704,7 +1634,7 @@
TEST_F(DexFileVerifierTest, Checksum) {
size_t length;
- std::unique_ptr<uint8_t[]> dex_bytes = DecodeBase64(kGoodTestDex, &length);
+ std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(kGoodTestDex, &length));
CHECK(dex_bytes != nullptr);
// Note: `dex_file` will be destroyed before `dex_bytes`.
std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 76b5456..446e343 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -169,22 +169,33 @@
HandleScope* handle_scope)
// TODO: NO_THREAD_SAFETY_ANALYSIS as GoToRunnable() is NO_THREAD_SAFETY_ANALYSIS
NO_THREAD_SAFETY_ANALYSIS {
- GoToRunnable(self);
+ bool critical_native = called->IsAnnotatedWithCriticalNative();
+ bool fast_native = called->IsAnnotatedWithFastNative();
+ bool normal_native = !critical_native && !fast_native;
+
+ // @Fast and @CriticalNative do not do a state transition.
+ if (LIKELY(normal_native)) {
+ GoToRunnable(self);
+ }
// We need the mutator lock (i.e., calling GoToRunnable()) before accessing the shorty or the
// locked object.
jobject locked = called->IsSynchronized() ? handle_scope->GetHandle(0).ToJObject() : nullptr;
char return_shorty_char = called->GetShorty()[0];
if (return_shorty_char == 'L') {
if (locked != nullptr) {
+ DCHECK(normal_native) << " @FastNative and synchronize is not supported";
UnlockJniSynchronizedMethod(locked, self);
}
return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceHandleResult(
result.l, saved_local_ref_cookie, self));
} else {
if (locked != nullptr) {
+ DCHECK(normal_native) << " @FastNative and synchronize is not supported";
UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
}
- PopLocalReferences(saved_local_ref_cookie, self);
+ if (LIKELY(!critical_native)) {
+ PopLocalReferences(saved_local_ref_cookie, self);
+ }
switch (return_shorty_char) {
case 'F': {
if (kRuntimeISA == kX86) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 3c6f807..fcbd834 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1625,7 +1625,8 @@
class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
public:
- ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {}
+ explicit ComputeGenericJniFrameSize(bool critical_native)
+ : num_handle_scope_references_(0), critical_native_(critical_native) {}
// Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs
// is at *m = sp. Will update to point to the bottom of the save frame.
@@ -1711,6 +1712,7 @@
private:
uint32_t num_handle_scope_references_;
+ const bool critical_native_;
};
uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
@@ -1720,6 +1722,11 @@
void ComputeGenericJniFrameSize::WalkHeader(
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
+ // First 2 parameters are always excluded for @CriticalNative.
+ if (UNLIKELY(critical_native_)) {
+ return;
+ }
+
// JNIEnv
sm->AdvancePointer(nullptr);
@@ -1778,11 +1785,16 @@
// of transitioning into native code.
class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len,
+ BuildGenericJniFrameVisitor(Thread* self,
+ bool is_static,
+ bool critical_native,
+ const char* shorty,
+ uint32_t shorty_len,
ArtMethod*** sp)
: QuickArgumentVisitor(*sp, is_static, shorty, shorty_len),
- jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) {
- ComputeGenericJniFrameSize fsc;
+ jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native),
+ sm_(&jni_call_) {
+ ComputeGenericJniFrameSize fsc(critical_native);
uintptr_t* start_gpr_reg;
uint32_t* start_fpr_reg;
uintptr_t* start_stack_arg;
@@ -1793,11 +1805,14 @@
jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_);
- // jni environment is always first argument
- sm_.AdvancePointer(self->GetJniEnv());
+ // First 2 parameters are always excluded for CriticalNative methods.
+ if (LIKELY(!critical_native)) {
+ // jni environment is always first argument
+ sm_.AdvancePointer(self->GetJniEnv());
- if (is_static) {
- sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
+ if (is_static) {
+ sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
+ } // else "this" reference is already handled by QuickArgumentVisitor.
}
}
@@ -1822,8 +1837,11 @@
class FillJniCall FINAL : public FillNativeCall {
public:
FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
- HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args),
- handle_scope_(handle_scope), cur_entry_(0) {}
+ HandleScope* handle_scope, bool critical_native)
+ : FillNativeCall(gpr_regs, fpr_regs, stack_args),
+ handle_scope_(handle_scope),
+ cur_entry_(0),
+ critical_native_(critical_native) {}
uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1839,12 +1857,17 @@
while (cur_entry_ < expected_slots) {
handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr);
}
- DCHECK_NE(cur_entry_, 0U);
+
+ if (!critical_native_) {
+ // Non-critical natives have at least the self class (jclass) or this (jobject).
+ DCHECK_NE(cur_entry_, 0U);
+ }
}
private:
HandleScope* handle_scope_;
size_t cur_entry_;
+ const bool critical_native_;
};
HandleScope* handle_scope_;
@@ -1924,7 +1947,12 @@
extern "C" void* artFindNativeMethod(Thread* self);
#endif
-uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) {
+static uint64_t artQuickGenericJniEndJNIRef(Thread* self,
+ uint32_t cookie,
+ bool fast_native ATTRIBUTE_UNUSED,
+ jobject l,
+ jobject lock) {
+ // TODO: add entrypoints for @FastNative returning objects.
if (lock != nullptr) {
return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
} else {
@@ -1932,11 +1960,19 @@
}
}
-void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) {
+static void artQuickGenericJniEndJNINonRef(Thread* self,
+ uint32_t cookie,
+ bool fast_native,
+ jobject lock) {
if (lock != nullptr) {
JniMethodEndSynchronized(cookie, lock, self);
+ // Ignore "fast_native" here because synchronized functions aren't very fast.
} else {
- JniMethodEnd(cookie, self);
+ if (UNLIKELY(fast_native)) {
+ JniMethodFastEnd(cookie, self);
+ } else {
+ JniMethodEnd(cookie, self);
+ }
}
}
@@ -1958,9 +1994,17 @@
DCHECK(called->IsNative()) << PrettyMethod(called, true);
uint32_t shorty_len = 0;
const char* shorty = called->GetShorty(&shorty_len);
+ bool critical_native = called->IsAnnotatedWithCriticalNative();
+ bool fast_native = called->IsAnnotatedWithFastNative();
+ bool normal_native = !critical_native && !fast_native;
// Run the visitor and update sp.
- BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp);
+ BuildGenericJniFrameVisitor visitor(self,
+ called->IsStatic(),
+ critical_native,
+ shorty,
+ shorty_len,
+ &sp);
{
ScopedAssertNoThreadSuspension sants(__FUNCTION__);
visitor.VisitArguments();
@@ -1973,20 +2017,30 @@
self->VerifyStack();
- // Start JNI, save the cookie.
uint32_t cookie;
- if (called->IsSynchronized()) {
- cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
- if (self->IsExceptionPending()) {
- self->PopHandleScope();
- // A negative value denotes an error.
- return GetTwoWordFailureValue();
+ uint32_t* sp32;
+ // Skip calling JniMethodStart for @CriticalNative.
+ if (LIKELY(!critical_native)) {
+ // Start JNI, save the cookie.
+ if (called->IsSynchronized()) {
+ DCHECK(normal_native) << " @FastNative and synchronize is not supported";
+ cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
+ if (self->IsExceptionPending()) {
+ self->PopHandleScope();
+ // A negative value denotes an error.
+ return GetTwoWordFailureValue();
+ }
+ } else {
+ if (fast_native) {
+ cookie = JniMethodFastStart(self);
+ } else {
+ DCHECK(normal_native);
+ cookie = JniMethodStart(self);
+ }
}
- } else {
- cookie = JniMethodStart(self);
+ sp32 = reinterpret_cast<uint32_t*>(sp);
+ *(sp32 - 1) = cookie;
}
- uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
- *(sp32 - 1) = cookie;
// Retrieve the stored native code.
void* nativeCode = called->GetEntryPointFromJni();
@@ -2007,12 +2061,15 @@
if (nativeCode == nullptr) {
DCHECK(self->IsExceptionPending()); // There should be an exception pending now.
- // End JNI, as the assembly will move to deliver the exception.
- jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
- if (shorty[0] == 'L') {
- artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
- } else {
- artQuickGenericJniEndJNINonRef(self, cookie, lock);
+ // @CriticalNative calls do not need to call back into JniMethodEnd.
+ if (LIKELY(!critical_native)) {
+ // End JNI, as the assembly will move to deliver the exception.
+ jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
+ if (shorty[0] == 'L') {
+ artQuickGenericJniEndJNIRef(self, cookie, fast_native, nullptr, lock);
+ } else {
+ artQuickGenericJniEndJNINonRef(self, cookie, fast_native, lock);
+ }
}
return GetTwoWordFailureValue();
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index cb5226b..11d4af8 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -327,9 +327,9 @@
continue;
}
- space::ImageSpace::CreateMultiImageLocations(image_file_name,
- boot_classpath,
- &image_file_names);
+ space::ImageSpace::ExtractMultiImageLocations(image_file_name,
+ boot_classpath,
+ &image_file_names);
}
} else {
LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e41c532..d17ef81 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1619,9 +1619,54 @@
<< ",name=\"" << GetName() << "\"]";
}
-void ImageSpace::CreateMultiImageLocations(const std::string& input_image_file_name,
- const std::string& boot_classpath,
- std::vector<std::string>* image_file_names) {
+std::string ImageSpace::GetMultiImageBootClassPath(
+ const std::vector<const char*>& dex_locations,
+ const std::vector<const char*>& oat_filenames,
+ const std::vector<const char*>& image_filenames) {
+ DCHECK_GT(oat_filenames.size(), 1u);
+ // If the image filename was adapted (e.g., for our tests), we need to change this here,
+ // too, but need to strip all path components (they will be re-established when loading).
+ std::ostringstream bootcp_oss;
+ bool first_bootcp = true;
+ for (size_t i = 0; i < dex_locations.size(); ++i) {
+ if (!first_bootcp) {
+ bootcp_oss << ":";
+ }
+
+ std::string dex_loc = dex_locations[i];
+ std::string image_filename = image_filenames[i];
+
+ // Use the dex_loc path, but the image_filename name (without path elements).
+ size_t dex_last_slash = dex_loc.rfind('/');
+
+ // npos is max(size_t). That makes this a bit ugly.
+ size_t image_last_slash = image_filename.rfind('/');
+ size_t image_last_at = image_filename.rfind('@');
+ size_t image_last_sep = (image_last_slash == std::string::npos)
+ ? image_last_at
+ : (image_last_at == std::string::npos)
+ ? std::string::npos
+ : std::max(image_last_slash, image_last_at);
+ // Note: whenever image_last_sep == npos, +1 overflow means using the full string.
+
+ if (dex_last_slash == std::string::npos) {
+ dex_loc = image_filename.substr(image_last_sep + 1);
+ } else {
+ dex_loc = dex_loc.substr(0, dex_last_slash + 1) +
+ image_filename.substr(image_last_sep + 1);
+ }
+
+ // Image filenames already end with .art, no need to replace.
+
+ bootcp_oss << dex_loc;
+ first_bootcp = false;
+ }
+ return bootcp_oss.str();
+}
+
+void ImageSpace::ExtractMultiImageLocations(const std::string& input_image_file_name,
+ const std::string& boot_classpath,
+ std::vector<std::string>* image_file_names) {
DCHECK(image_file_names != nullptr);
std::vector<std::string> images;
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index c407259..0ba131b 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -125,10 +125,14 @@
// Use the input image filename to adapt the names in the given boot classpath to establish
// complete locations for secondary images.
- static void CreateMultiImageLocations(const std::string& input_image_file_name,
+ static void ExtractMultiImageLocations(const std::string& input_image_file_name,
const std::string& boot_classpath,
std::vector<std::string>* image_filenames);
+ static std::string GetMultiImageBootClassPath(const std::vector<const char*>& dex_locations,
+ const std::vector<const char*>& oat_filenames,
+ const std::vector<const char*>& image_filenames);
+
// Return the end of the image which includes non-heap objects such as ArtMethods and ArtFields.
uint8_t* GetImageEnd() const {
return Begin() + GetImageHeader().GetImageSize();
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index 977ef44..08272fa 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -17,7 +17,8 @@
name: "libopenjdkjvmti_defaults",
defaults: ["art_defaults"],
host_supported: true,
- srcs: ["OpenjdkJvmTi.cc"],
+ srcs: ["OpenjdkJvmTi.cc",
+ "transform.cc"],
include_dirs: ["art/runtime"],
shared_libs: ["libnativehelper"],
}
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index d3561c1..a1a2361 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -29,15 +29,17 @@
* questions.
*/
+#include <string>
+#include <vector>
+
#include <jni.h>
+
#include "openjdkjvmti/jvmti.h"
#include "art_jvmti.h"
-#include "gc_root-inl.h"
-#include "globals.h"
#include "jni_env_ext-inl.h"
-#include "scoped_thread_state_change.h"
-#include "thread_list.h"
+#include "runtime.h"
+#include "transform.h"
// TODO Remove this at some point by annotating all the methods. It was put in to make the skeleton
// easier to create.
@@ -904,6 +906,66 @@
static jvmtiError GetJLocationFormat(jvmtiEnv* env, jvmtiJlocationFormat* format_ptr) {
return ERR(NOT_IMPLEMENTED);
}
+
+ // TODO Remove this once events are working.
+ static jvmtiError RetransformClassWithHook(jvmtiEnv* env,
+ jclass klass,
+ jvmtiEventClassFileLoadHook hook) {
+ std::vector<jclass> classes;
+ classes.push_back(klass);
+ return RetransformClassesWithHook(reinterpret_cast<ArtJvmTiEnv*>(env), classes, hook);
+ }
+
+ // TODO This will be called by the event handler for the art::ti Event Load Event
+ static jvmtiError RetransformClassesWithHook(ArtJvmTiEnv* env,
+ const std::vector<jclass>& classes,
+ jvmtiEventClassFileLoadHook hook) {
+ if (!IsValidEnv(env)) {
+ return ERR(INVALID_ENVIRONMENT);
+ }
+ for (jclass klass : classes) {
+ JNIEnv* jni_env = nullptr;
+ jobject loader = nullptr;
+ std::string name;
+ jobject protection_domain = nullptr;
+ jint data_len = 0;
+ unsigned char* dex_data = nullptr;
+ jvmtiError ret = OK;
+ std::string location;
+ if ((ret = GetTransformationData(env,
+ klass,
+ /*out*/&location,
+ /*out*/&jni_env,
+ /*out*/&loader,
+ /*out*/&name,
+ /*out*/&protection_domain,
+ /*out*/&data_len,
+ /*out*/&dex_data)) != OK) {
+ // TODO Do something more here? Maybe give log statements?
+ return ret;
+ }
+ jint new_data_len = 0;
+ unsigned char* new_dex_data = nullptr;
+ hook(env,
+ jni_env,
+ klass,
+ loader,
+ name.c_str(),
+ protection_domain,
+ data_len,
+ dex_data,
+ /*out*/&new_data_len,
+ /*out*/&new_dex_data);
+ // Check if anything actually changed.
+ if ((new_data_len != 0 || new_dex_data != nullptr) && new_dex_data != dex_data) {
+ MoveTransformedFileIntoRuntime(klass, std::move(location), new_data_len, new_dex_data);
+ env->Deallocate(new_dex_data);
+ }
+ // Deallocate the old dex data.
+ env->Deallocate(dex_data);
+ }
+ return OK;
+ }
};
static bool IsJvmtiVersion(jint version) {
@@ -942,7 +1004,10 @@
// The actual struct holding all of the entrypoints into the jvmti interface.
const jvmtiInterface_1 gJvmtiInterface = {
- nullptr, // reserved1
+ // SPECIAL FUNCTION: RetransformClassWithHook Is normally reserved1
+ // TODO Remove once we have events working.
+ reinterpret_cast<void*>(JvmtiFunctions::RetransformClassWithHook),
+ // nullptr, // reserved1
JvmtiFunctions::SetEventNotificationMode,
nullptr, // reserved3
JvmtiFunctions::GetAllThreads,
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
new file mode 100644
index 0000000..a0d79f3
--- /dev/null
+++ b/runtime/openjdkjvmti/transform.cc
@@ -0,0 +1,362 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "transform.h"
+
+#include "class_linker.h"
+#include "dex_file.h"
+#include "gc_root-inl.h"
+#include "globals.h"
+#include "jni_env_ext-inl.h"
+#include "jvmti.h"
+#include "linear_alloc.h"
+#include "mem_map.h"
+#include "mirror/array.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader-inl.h"
+#include "mirror/string-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread_list.h"
+#include "transform.h"
+#include "utf.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
+
+namespace openjdkjvmti {
+
+static bool ReadChecksum(jint data_len, const unsigned char* dex, /*out*/uint32_t* res) {
+ if (data_len < static_cast<jint>(sizeof(art::DexFile::Header))) {
+ return false;
+ }
+ *res = reinterpret_cast<const art::DexFile::Header*>(dex)->checksum_;
+ return true;
+}
+
+static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
+ jint data_len,
+ unsigned char* dex_data) {
+ std::string error_msg;
+ std::unique_ptr<art::MemMap> map(art::MemMap::MapAnonymous(
+ art::StringPrintf("%s-transformed", original_location.c_str()).c_str(),
+ nullptr,
+ data_len,
+ PROT_READ|PROT_WRITE,
+ /*low_4gb*/false,
+ /*reuse*/false,
+ &error_msg));
+ if (map == nullptr) {
+ return map;
+ }
+ memcpy(map->Begin(), dex_data, data_len);
+ map->Protect(PROT_READ);
+ return map;
+}
+
+static void InvalidateExistingMethods(art::Thread* self,
+ art::Handle<art::mirror::Class> klass,
+ art::Handle<art::mirror::DexCache> cache,
+ const art::DexFile* dex_file)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // Create new DexCache with new DexFile.
+ // reset dex_class_def_idx_
+ // for each method reset entry_point_from_quick_compiled_code_ to bridge
+ // for each method reset dex_code_item_offset_
+ // for each method reset dex_method_index_
+ // for each method set dex_cache_resolved_methods_ to new DexCache
+ // for each method set dex_cache_resolved_types_ to new DexCache
+ auto* runtime = art::Runtime::Current();
+ art::ClassLinker* linker = runtime->GetClassLinker();
+ art::PointerSize image_pointer_size = linker->GetImagePointerSize();
+ std::string descriptor_storage;
+ const char* descriptor = klass->GetDescriptor(&descriptor_storage);
+ // Get the new class def
+ const art::DexFile::ClassDef* class_def = art::OatFile::OatDexFile::FindClassDef(
+ *dex_file, descriptor, art::ComputeModifiedUtf8Hash(descriptor));
+ CHECK(class_def != nullptr);
+ const art::DexFile::TypeId& declaring_class_id = dex_file->GetTypeId(class_def->class_idx_);
+ art::StackHandleScope<6> hs(self);
+ const art::DexFile& old_dex_file = klass->GetDexFile();
+ for (art::ArtMethod& method : klass->GetMethods(image_pointer_size)) {
+ // Find the code_item for the method then find the dex_method_index and dex_code_item_offset to
+ // set.
+ const art::DexFile::StringId* new_name_id = dex_file->FindStringId(method.GetName());
+ uint16_t method_return_idx =
+ dex_file->GetIndexForTypeId(*dex_file->FindTypeId(method.GetReturnTypeDescriptor()));
+ const auto* old_type_list = method.GetParameterTypeList();
+ std::vector<uint16_t> new_type_list;
+ for (uint32_t i = 0; old_type_list != nullptr && i < old_type_list->Size(); i++) {
+ new_type_list.push_back(
+ dex_file->GetIndexForTypeId(
+ *dex_file->FindTypeId(
+ old_dex_file.GetTypeDescriptor(
+ old_dex_file.GetTypeId(
+ old_type_list->GetTypeItem(i).type_idx_)))));
+ }
+ const art::DexFile::ProtoId* proto_id = dex_file->FindProtoId(method_return_idx,
+ new_type_list);
+ CHECK(proto_id != nullptr || old_type_list == nullptr);
+ const art::DexFile::MethodId* method_id = dex_file->FindMethodId(declaring_class_id,
+ *new_name_id,
+ *proto_id);
+ CHECK(method_id != nullptr);
+ uint32_t dex_method_idx = dex_file->GetIndexForMethodId(*method_id);
+ method.SetDexMethodIndex(dex_method_idx);
+ linker->SetEntryPointsToInterpreter(&method);
+ method.SetCodeItemOffset(dex_file->FindCodeItemOffset(*class_def, dex_method_idx));
+ method.SetDexCacheResolvedMethods(cache->GetResolvedMethods(), image_pointer_size);
+ method.SetDexCacheResolvedTypes(cache->GetResolvedTypes(), image_pointer_size);
+ }
+
+ // Update the class fields.
+ // Need to update class last since the ArtMethod gets its DexFile from the class (which is needed
+ // to call GetReturnTypeDescriptor and GetParameterTypeList above).
+ klass->SetDexCache(cache.Get());
+ klass->SetDexCacheStrings(cache->GetStrings());
+ klass->SetDexClassDefIndex(dex_file->GetIndexForClassDef(*class_def));
+ klass->SetDexTypeIndex(dex_file->GetIndexForTypeId(*dex_file->FindTypeId(descriptor)));
+}
+
+// Adds the dex file.
+static art::mirror::LongArray* InsertDexFileIntoArray(art::Thread* self,
+ const art::DexFile* dex,
+ art::Handle<art::mirror::LongArray>& orig)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::StackHandleScope<1> hs(self);
+ CHECK_GE(orig->GetLength(), 1);
+ art::Handle<art::mirror::LongArray> ret(
+ hs.NewHandle(art::mirror::LongArray::Alloc(self, orig->GetLength() + 1)));
+ CHECK(ret.Get() != nullptr);
+ // Copy the oat-dex.
+ // TODO Should I clear the oatdex element?
+ ret->SetWithoutChecks<false>(0, orig->GetWithoutChecks(0));
+ ret->SetWithoutChecks<false>(1, static_cast<int64_t>(reinterpret_cast<intptr_t>(dex)));
+ ret->Memcpy(2, orig.Get(), 1, orig->GetLength() - 1);
+ return ret.Get();
+}
+
+// TODO Handle all types of class loaders.
+static bool FindDalvikSystemDexFileAndLoaderForClass(
+ art::Handle<art::mirror::Class> klass,
+ /*out*/art::mirror::Object** dex_file,
+ /*out*/art::mirror::ClassLoader** loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ const char* dex_path_list_element_array_name = "[Ldalvik/system/DexPathList$Element;";
+ const char* dex_path_list_element_name = "Ldalvik/system/DexPathList$Element;";
+ const char* dex_file_name = "Ldalvik/system/DexFile;";
+ const char* dex_path_list_name = "Ldalvik/system/DexPathList;";
+ const char* dex_class_loader_name = "Ldalvik/system/BaseDexClassLoader;";
+
+ art::Thread* self = art::Thread::Current();
+ CHECK(!self->IsExceptionPending());
+ art::StackHandleScope<11> hs(self);
+ art::ClassLinker* class_linker = art::Runtime::Current()->GetClassLinker();
+
+ art::Handle<art::mirror::ClassLoader> null_loader(hs.NewHandle<art::mirror::ClassLoader>(
+ nullptr));
+ art::Handle<art::mirror::Class> base_dex_loader_class(hs.NewHandle(class_linker->FindClass(
+ self, dex_class_loader_name, null_loader)));
+
+ art::ArtField* path_list_field = base_dex_loader_class->FindDeclaredInstanceField(
+ "pathList", dex_path_list_name);
+ CHECK(path_list_field != nullptr);
+
+ art::ArtField* dex_path_list_element_field =
+ class_linker->FindClass(self, dex_path_list_name, null_loader)
+ ->FindDeclaredInstanceField("dexElements", dex_path_list_element_array_name);
+ CHECK(dex_path_list_element_field != nullptr);
+
+ art::ArtField* element_dex_file_field =
+ class_linker->FindClass(self, dex_path_list_element_name, null_loader)
+ ->FindDeclaredInstanceField("dexFile", dex_file_name);
+ CHECK(element_dex_file_field != nullptr);
+
+ art::Handle<art::mirror::ClassLoader> h_class_loader(hs.NewHandle(klass->GetClassLoader()));
+ art::Handle<art::mirror::Class> loader_class(hs.NewHandle(h_class_loader->GetClass()));
+ // Check if loader is a BaseDexClassLoader
+ if (!loader_class->IsSubClass(base_dex_loader_class.Get())) {
+ LOG(art::ERROR) << "The classloader is not a BaseDexClassLoader which is currently the only "
+ << "supported class loader type!";
+ return false;
+ }
+ art::Handle<art::mirror::Object> path_list(
+ hs.NewHandle(path_list_field->GetObject(h_class_loader.Get())));
+ CHECK(path_list.Get() != nullptr);
+ CHECK(!self->IsExceptionPending());
+ art::Handle<art::mirror::ObjectArray<art::mirror::Object>> dex_elements_list(
+ hs.NewHandle(art::down_cast<art::mirror::ObjectArray<art::mirror::Object>*>(
+ dex_path_list_element_field->GetObject(path_list.Get()))));
+ CHECK(!self->IsExceptionPending());
+ CHECK(dex_elements_list.Get() != nullptr);
+ size_t num_elements = dex_elements_list->GetLength();
+ art::MutableHandle<art::mirror::Object> current_element(
+ hs.NewHandle<art::mirror::Object>(nullptr));
+ art::MutableHandle<art::mirror::Object> first_dex_file(
+ hs.NewHandle<art::mirror::Object>(nullptr));
+ for (size_t i = 0; i < num_elements; i++) {
+ current_element.Assign(dex_elements_list->Get(i));
+ CHECK(current_element.Get() != nullptr);
+ CHECK(!self->IsExceptionPending());
+ CHECK(dex_elements_list.Get() != nullptr);
+ CHECK_EQ(current_element->GetClass(), class_linker->FindClass(self,
+ dex_path_list_element_name,
+ null_loader));
+ // TODO It would be cleaner to put the art::DexFile into the dalvik.system.DexFile the class
+ // comes from but it is more annoying because we would need to find this class. It is not
+ // necessary for proper function since we just need to be in front of the classes old dex file
+ // in the path.
+ first_dex_file.Assign(element_dex_file_field->GetObject(current_element.Get()));
+ if (first_dex_file.Get() != nullptr) {
+ *dex_file = first_dex_file.Get();
+ *loader = h_class_loader.Get();
+ return true;
+ }
+ }
+ return false;
+}
+
+// Gets the data surrounding the given class.
+jvmtiError GetTransformationData(ArtJvmTiEnv* env,
+ jclass klass,
+ /*out*/std::string* location,
+ /*out*/JNIEnv** jni_env_ptr,
+ /*out*/jobject* loader,
+ /*out*/std::string* name,
+ /*out*/jobject* protection_domain,
+ /*out*/jint* data_len,
+ /*out*/unsigned char** dex_data) {
+ jint ret = env->art_vm->GetEnv(reinterpret_cast<void**>(jni_env_ptr), JNI_VERSION_1_1);
+ if (ret != JNI_OK) {
+ // TODO Different error might be better?
+ return ERR(INTERNAL);
+ }
+ JNIEnv* jni_env = *jni_env_ptr;
+ art::ScopedObjectAccess soa(jni_env);
+ art::StackHandleScope<3> hs(art::Thread::Current());
+ art::Handle<art::mirror::Class> hs_klass(hs.NewHandle(soa.Decode<art::mirror::Class*>(klass)));
+ *loader = soa.AddLocalReference<jobject>(hs_klass->GetClassLoader());
+ *name = art::mirror::Class::ComputeName(hs_klass)->ToModifiedUtf8();
+ // TODO is this always null?
+ *protection_domain = nullptr;
+ const art::DexFile& dex = hs_klass->GetDexFile();
+ *location = dex.GetLocation();
+ *data_len = static_cast<jint>(dex.Size());
+ // TODO We should maybe change env->Allocate to allow us to mprotect this memory and stop writes.
+ jvmtiError alloc_error = env->Allocate(*data_len, dex_data);
+ if (alloc_error != OK) {
+ return alloc_error;
+ }
+ // Copy the data into a temporary buffer.
+ memcpy(reinterpret_cast<void*>(*dex_data),
+ reinterpret_cast<const void*>(dex.Begin()),
+ *data_len);
+ return OK;
+}
+
+// Install the new dex file.
+// TODO do error checks for bad state (method in a stack, changes to number of methods/fields/etc).
+jvmtiError MoveTransformedFileIntoRuntime(jclass jklass,
+ std::string original_location,
+ jint data_len,
+ unsigned char* dex_data) {
+ const char* dex_file_name = "Ldalvik/system/DexFile;";
+ art::Thread* self = art::Thread::Current();
+ art::Runtime* runtime = art::Runtime::Current();
+ art::ThreadList* threads = runtime->GetThreadList();
+ art::ClassLinker* class_linker = runtime->GetClassLinker();
+ uint32_t checksum = 0;
+ if (!ReadChecksum(data_len, dex_data, &checksum)) {
+ return ERR(INVALID_CLASS_FORMAT);
+ }
+
+ std::unique_ptr<art::MemMap> map(MoveDataToMemMap(original_location, data_len, dex_data));
+ if (map.get() == nullptr) {
+ return ERR(INTERNAL);
+ }
+ std::string error_msg;
+ // Load the new dex_data in memory (mmap it, etc)
+ std::unique_ptr<const art::DexFile> new_dex_file = art::DexFile::Open(map->GetName(),
+ checksum,
+ std::move(map),
+ /*verify*/ true,
+ /*verify_checksum*/ true,
+ &error_msg);
+ CHECK(new_dex_file.get() != nullptr) << "Unable to load dex file! " << error_msg;
+
+ // Get mutator lock. We need the lifetimes of these variables (hs, the classes, etc.) to be longer
+ // then current lock (since there isn't upgrading of the lock) so we don't use soa.
+ art::ThreadState old_state = self->TransitionFromSuspendedToRunnable();
+ // This scope is needed to make sure that the HandleScope dies with mutator_lock_ since we need to
+ // upgrade the mutator_lock during the execution.
+ {
+ art::StackHandleScope<11> hs(self);
+ art::Handle<art::mirror::ClassLoader> null_loader(
+ hs.NewHandle<art::mirror::ClassLoader>(nullptr));
+ CHECK(null_loader.Get() == nullptr);
+ art::ArtField* dex_file_cookie_field = class_linker->
+ FindClass(self, dex_file_name, null_loader)->
+ FindDeclaredInstanceField("mCookie", "Ljava/lang/Object;");
+ art::ArtField* dex_file_internal_cookie_field =
+ class_linker->FindClass(self, dex_file_name, null_loader)
+ ->FindDeclaredInstanceField("mInternalCookie", "Ljava/lang/Object;");
+ CHECK(dex_file_cookie_field != nullptr);
+ art::Handle<art::mirror::Class> klass(
+ hs.NewHandle(art::down_cast<art::mirror::Class*>(self->DecodeJObject(jklass))));
+ art::mirror::Object* dex_file_ptr = nullptr;
+ art::mirror::ClassLoader* class_loader_ptr = nullptr;
+ // Find dalvik.system.DexFile that represents the dex file we are changing.
+ if (!FindDalvikSystemDexFileAndLoaderForClass(klass, &dex_file_ptr, &class_loader_ptr)) {
+ self->TransitionFromRunnableToSuspended(old_state);
+ LOG(art::ERROR) << "Could not find DexFile.";
+ return ERR(INTERNAL);
+ }
+ art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle(dex_file_ptr));
+ art::Handle<art::mirror::ClassLoader> class_loader(hs.NewHandle(class_loader_ptr));
+ art::Handle<art::mirror::LongArray> art_dex_array(
+ hs.NewHandle<art::mirror::LongArray>(
+ dex_file_cookie_field->GetObject(dex_file_obj.Get())->AsLongArray()));
+ art::Handle<art::mirror::LongArray> new_art_dex_array(
+ hs.NewHandle<art::mirror::LongArray>(
+ InsertDexFileIntoArray(self, new_dex_file.get(), art_dex_array)));
+ art::Handle<art::mirror::DexCache> cache(
+ hs.NewHandle(class_linker->RegisterDexFile(*new_dex_file.get(), class_loader.Get())));
+ self->TransitionFromRunnableToSuspended(old_state);
+
+ threads->SuspendAll("moving dex file into runtime", /*long_suspend*/true);
+ // Change the mCookie field. Old value will be GC'd as normal.
+ dex_file_cookie_field->SetObject<false>(dex_file_obj.Get(), new_art_dex_array.Get());
+ dex_file_internal_cookie_field->SetObject<false>(dex_file_obj.Get(), new_art_dex_array.Get());
+ // Invalidate existing methods.
+ InvalidateExistingMethods(self, klass, cache, new_dex_file.release());
+ }
+ threads->ResumeAll();
+ return OK;
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/transform.h b/runtime/openjdkjvmti/transform.h
new file mode 100644
index 0000000..85bcb00
--- /dev/null
+++ b/runtime/openjdkjvmti/transform.h
@@ -0,0 +1,64 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_TRANSFORM_H_
+#define ART_RUNTIME_OPENJDKJVMTI_TRANSFORM_H_
+
+#include <string>
+
+#include <jni.h>
+
+#include "art_jvmti.h"
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+// Gets the data surrounding the given class.
+jvmtiError GetTransformationData(ArtJvmTiEnv* env,
+ jclass klass,
+ /*out*/std::string* location,
+ /*out*/JNIEnv** jni_env_ptr,
+ /*out*/jobject* loader,
+ /*out*/std::string* name,
+ /*out*/jobject* protection_domain,
+ /*out*/jint* data_len,
+ /*out*/unsigned char** dex_data);
+
+// Install the new dex file.
+jvmtiError MoveTransformedFileIntoRuntime(jclass jklass,
+ std::string original_location,
+ jint data_len,
+ unsigned char* dex_data);
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_TRANSFORM_H_
+
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 15e3b1c..2be3b52 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -856,9 +856,9 @@
const OatHeader& boot_oat_header = oat_file->GetOatHeader();
const char* boot_cp = boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
if (boot_cp != nullptr) {
- gc::space::ImageSpace::CreateMultiImageLocations(image_locations[0],
- boot_cp,
- &image_locations);
+ gc::space::ImageSpace::ExtractMultiImageLocations(image_locations[0],
+ boot_cp,
+ &image_locations);
}
}
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 6f10aaa..b52e2f2 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -52,6 +52,77 @@
namespace art {
+static const uint8_t kBase64Map[256] = {
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255,
+ 255, 254, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, // NOLINT
+ 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // NOLINT
+ 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, // NOLINT
+ 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, // NOLINT
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255
+};
+
+uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
+ std::vector<uint8_t> tmp;
+ uint32_t t = 0, y = 0;
+ int g = 3;
+ for (size_t i = 0; src[i] != '\0'; ++i) {
+ uint8_t c = kBase64Map[src[i] & 0xFF];
+ if (c == 255) continue;
+ // the final = symbols are read and used to trim the remaining bytes
+ if (c == 254) {
+ c = 0;
+ // prevent g < 0 which would potentially allow an overflow later
+ if (--g < 0) {
+ *dst_size = 0;
+ return nullptr;
+ }
+ } else if (g != 3) {
+ // we only allow = to be at the end
+ *dst_size = 0;
+ return nullptr;
+ }
+ t = (t << 6) | c;
+ if (++y == 4) {
+ tmp.push_back((t >> 16) & 255);
+ if (g > 1) {
+ tmp.push_back((t >> 8) & 255);
+ }
+ if (g > 2) {
+ tmp.push_back(t & 255);
+ }
+ y = t = 0;
+ }
+ }
+ if (y != 0) {
+ *dst_size = 0;
+ return nullptr;
+ }
+ std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
+ if (dst_size != nullptr) {
+ *dst_size = tmp.size();
+ } else {
+ *dst_size = 0;
+ }
+ std::copy(tmp.begin(), tmp.end(), dst.get());
+ return dst.release();
+}
+
pid_t GetTid() {
#if defined(__APPLE__)
uint64_t owner;
diff --git a/runtime/utils.h b/runtime/utils.h
index f3284e8..e65b947 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -116,6 +116,8 @@
return static_cast<typename std::make_unsigned<T>::type>(x);
}
+uint8_t* DecodeBase64(const char* src, size_t* dst_size);
+
std::string PrintableChar(uint16_t ch);
// Returns an ASCII string corresponding to the given UTF-8 string.
diff --git a/test/005-annotations/build b/test/005-annotations/build
index 216843d..8b9f550 100644
--- a/test/005-annotations/build
+++ b/test/005-annotations/build
@@ -30,7 +30,8 @@
if [ ${USE_JACK} = "true" ]; then
jar cf classes.jill.jar -C classes .
- ${JACK} --import classes.jill.jar --output-dex .
+ # Jack needs to emit annotations with CLASS retention.
+ ${JACK} -D jack.dex.annotation.class-retention=true --import classes.jill.jar --output-dex .
else
if [ ${NEED_DEX} = "true" ]; then
${DX} -JXmx256m --debug --dex --output=classes.dex classes
diff --git a/test/458-checker-instruct-simplification/src/Main.java b/test/458-checker-instruct-simplification/src/Main.java
index 5b14735..e71a0e1 100644
--- a/test/458-checker-instruct-simplification/src/Main.java
+++ b/test/458-checker-instruct-simplification/src/Main.java
@@ -1178,16 +1178,28 @@
* remove the second.
*/
+ /// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Result:z\d+>> InvokeStaticOrDirect
+ /// CHECK-DAG: <<NotResult:i\d+>> Xor [<<Result>>,<<Const1>>]
+ /// CHECK-DAG: Return [<<NotResult>>]
+
+ /// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:z\d+>> InvokeStaticOrDirect
+ /// CHECK-DAG: <<NotResult:z\d+>> BooleanNot [<<Result>>]
+ /// CHECK-DAG: Return [<<NotResult>>]
+
/// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier$after_bce (before)
/// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<NotArg:i\d+>> Select [<<Const1>>,<<Const0>>,<<Arg>>]
- /// CHECK-DAG: <<NotNotArg:i\d+>> Select [<<Const1>>,<<Const0>>,<<NotArg>>]
+ /// CHECK-DAG: <<NotArg:z\d+>> BooleanNot [<<Arg>>]
+ /// CHECK-DAG: <<NotNotArg:z\d+>> BooleanNot [<<NotArg>>]
/// CHECK-DAG: Return [<<NotNotArg>>]
/// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ /// CHECK-DAG: <<NotArg:z\d+>> BooleanNot [<<Arg>>]
/// CHECK-DAG: Return [<<Arg>>]
public static boolean NegateValue(boolean arg) {
diff --git a/test/463-checker-boolean-simplifier/smali/BooleanNotDx.smali b/test/463-checker-boolean-simplifier/smali/BooleanNotDx.smali
new file mode 100644
index 0000000..765d0eb
--- /dev/null
+++ b/test/463-checker-boolean-simplifier/smali/BooleanNotDx.smali
@@ -0,0 +1,65 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LBooleanNotSmali;
+.super Ljava/lang/Object;
+
+#
+# Elementary test negating a boolean. Verifies that blocks are merged and
+# empty branches removed.
+#
+
+## CHECK-START: boolean BooleanNotSmali.BooleanNot(boolean) select_generator (before)
+## CHECK-DAG: <<Param:z\d+>> ParameterValue
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: If [<<Param>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<Const0>>,<<Const1>>]
+## CHECK-DAG: Return [<<Phi>>]
+
+## CHECK-START: boolean BooleanNotSmali.BooleanNot(boolean) select_generator (before)
+## CHECK: Goto
+## CHECK: Goto
+## CHECK: Goto
+## CHECK-NOT: Goto
+
+## CHECK-START: boolean BooleanNotSmali.BooleanNot(boolean) select_generator (after)
+## CHECK-DAG: <<Param:z\d+>> ParameterValue
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: <<NotParam:i\d+>> Select [<<Const1>>,<<Const0>>,<<Param>>]
+## CHECK-DAG: Return [<<NotParam>>]
+
+## CHECK-START: boolean BooleanNotSmali.BooleanNot(boolean) select_generator (after)
+## CHECK-NOT: If
+## CHECK-NOT: Phi
+
+## CHECK-START: boolean BooleanNotSmali.BooleanNot(boolean) select_generator (after)
+## CHECK: Goto
+## CHECK-NOT: Goto
+
+.method public static BooleanNot(Z)Z
+ .registers 2
+
+ if-eqz v1, :true_start
+ const/4 v0, 0x0
+
+:return_start
+ return v0
+
+:true_start
+ const/4 v0, 0x1
+ goto :return_start
+
+.end method
diff --git a/test/463-checker-boolean-simplifier/src/Main.java b/test/463-checker-boolean-simplifier/src/Main.java
index f0fe1b1..9368488 100644
--- a/test/463-checker-boolean-simplifier/src/Main.java
+++ b/test/463-checker-boolean-simplifier/src/Main.java
@@ -32,42 +32,14 @@
}
}
- /*
- * Elementary test negating a boolean. Verifies that blocks are merged and
- * empty branches removed.
- */
-
- /// CHECK-START: boolean Main.BooleanNot(boolean) select_generator (before)
- /// CHECK-DAG: <<Param:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: If [<<Param>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Const0>>,<<Const1>>]
- /// CHECK-DAG: Return [<<Phi>>]
-
- /// CHECK-START: boolean Main.BooleanNot(boolean) select_generator (before)
- /// CHECK: Goto
- /// CHECK: Goto
- /// CHECK: Goto
- /// CHECK-NOT: Goto
-
- /// CHECK-START: boolean Main.BooleanNot(boolean) select_generator (after)
- /// CHECK-DAG: <<Param:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<NotParam:i\d+>> Select [<<Const1>>,<<Const0>>,<<Param>>]
- /// CHECK-DAG: Return [<<NotParam>>]
-
- /// CHECK-START: boolean Main.BooleanNot(boolean) select_generator (after)
- /// CHECK-NOT: If
- /// CHECK-NOT: Phi
-
- /// CHECK-START: boolean Main.BooleanNot(boolean) select_generator (after)
- /// CHECK: Goto
- /// CHECK-NOT: Goto
-
- public static boolean BooleanNot(boolean x) {
- return !x;
+ // Invoke a method written in smali that implements the boolean ! operator. This method
+ // uses the if/else pattern generated by dx (while Jack generates a different pattern).
+ // Since this method is in a smali-generated class, we invoke it through reflection.
+ public static boolean SmaliBooleanNot(boolean x) throws Exception {
+ Class<?> c = Class.forName("BooleanNotSmali");
+ java.lang.reflect.Method method = c.getMethod("BooleanNot", boolean.class);
+ Object retValue = method.invoke(null, new Object[] { Boolean.valueOf(x) });
+ return ((Boolean) retValue).booleanValue();
}
/*
@@ -357,9 +329,9 @@
return x ? 42 : (write_field = 43);
}
- public static void main(String[] args) {
- assertBoolEquals(false, BooleanNot(true));
- assertBoolEquals(true, BooleanNot(false));
+ public static void main(String[] args) throws Exception {
+ assertBoolEquals(false, SmaliBooleanNot(true));
+ assertBoolEquals(true, SmaliBooleanNot(false));
assertBoolEquals(true, GreaterThan(10, 5));
assertBoolEquals(false, GreaterThan(10, 10));
assertBoolEquals(false, GreaterThan(5, 10));
diff --git a/test/565-checker-doublenegbitwise/src/Main.java b/test/565-checker-doublenegbitwise/src/Main.java
index 811c280..5ccc648 100644
--- a/test/565-checker-doublenegbitwise/src/Main.java
+++ b/test/565-checker-doublenegbitwise/src/Main.java
@@ -70,20 +70,19 @@
* same pass.
*/
- /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier$after_bce (before)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier (before)
/// CHECK: <<P1:z\d+>> ParameterValue
/// CHECK: <<P2:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK: <<Select1:i\d+>> Select [<<Const1>>,<<Const0>>,<<P1>>]
- /// CHECK: <<Select2:i\d+>> Select [<<Const1>>,<<Const0>>,<<P2>>]
- /// CHECK: <<And:i\d+>> And [<<Select2>>,<<Select1>>]
+ /// CHECK-DAG: <<NotP1:i\d+>> Xor [<<P1>>,<<Const1>>]
+ /// CHECK-DAG: <<NotP2:i\d+>> Xor [<<P2>>,<<Const1>>]
+ /// CHECK: <<And:i\d+>> And [<<NotP1>>,<<NotP2>>]
/// CHECK: Return [<<And>>]
- /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier$after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier (after)
/// CHECK: <<Cond1:z\d+>> ParameterValue
/// CHECK: <<Cond2:z\d+>> ParameterValue
- /// CHECK: <<Or:i\d+>> Or [<<Cond2>>,<<Cond1>>]
+ /// CHECK: <<Or:i\d+>> Or [<<Cond1>>,<<Cond2>>]
/// CHECK: <<BooleanNot:z\d+>> BooleanNot [<<Or>>]
/// CHECK: Return [<<BooleanNot>>]
@@ -138,20 +137,19 @@
* same pass.
*/
- /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier$after_bce (before)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier (before)
/// CHECK: <<P1:z\d+>> ParameterValue
/// CHECK: <<P2:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK: <<Select1:i\d+>> Select [<<Const1>>,<<Const0>>,<<P1>>]
- /// CHECK: <<Select2:i\d+>> Select [<<Const1>>,<<Const0>>,<<P2>>]
- /// CHECK: <<Or:i\d+>> Or [<<Select2>>,<<Select1>>]
+ /// CHECK: <<NotP1:i\d+>> Xor [<<P1>>,<<Const1>>]
+ /// CHECK: <<NotP2:i\d+>> Xor [<<P2>>,<<Const1>>]
+ /// CHECK: <<Or:i\d+>> Or [<<NotP1>>,<<NotP2>>]
/// CHECK: Return [<<Or>>]
- /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier$after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier (after)
/// CHECK: <<Cond1:z\d+>> ParameterValue
/// CHECK: <<Cond2:z\d+>> ParameterValue
- /// CHECK: <<And:i\d+>> And [<<Cond2>>,<<Cond1>>]
+ /// CHECK: <<And:i\d+>> And [<<Cond1>>,<<Cond2>>]
/// CHECK: <<BooleanNot:z\d+>> BooleanNot [<<And>>]
/// CHECK: Return [<<BooleanNot>>]
@@ -246,20 +244,19 @@
* same pass.
*/
- /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier$after_bce (before)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier (before)
/// CHECK: <<P1:z\d+>> ParameterValue
/// CHECK: <<P2:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK: <<Select1:i\d+>> Select [<<Const1>>,<<Const0>>,<<P1>>]
- /// CHECK: <<Select2:i\d+>> Select [<<Const1>>,<<Const0>>,<<P2>>]
- /// CHECK: <<Xor:i\d+>> Xor [<<Select2>>,<<Select1>>]
+ /// CHECK: <<NotP1:i\d+>> Xor [<<P1>>,<<Const1>>]
+ /// CHECK: <<NotP2:i\d+>> Xor [<<P2>>,<<Const1>>]
+ /// CHECK: <<Xor:i\d+>> Xor [<<NotP1>>,<<NotP2>>]
/// CHECK: Return [<<Xor>>]
- /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier$after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier (after)
/// CHECK: <<Cond1:z\d+>> ParameterValue
/// CHECK: <<Cond2:z\d+>> ParameterValue
- /// CHECK: <<Xor:i\d+>> Xor [<<Cond2>>,<<Cond1>>]
+ /// CHECK: <<Xor:i\d+>> Xor [<<Cond1>>,<<Cond2>>]
/// CHECK: Return [<<Xor>>]
/// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier$after_bce (after)
diff --git a/tools/javafuzz/Android.mk b/test/902-hello-transformation/build
old mode 100644
new mode 100755
similarity index 62%
copy from tools/javafuzz/Android.mk
copy to test/902-hello-transformation/build
index 63db57a..898e2e5
--- a/tools/javafuzz/Android.mk
+++ b/test/902-hello-transformation/build
@@ -1,4 +1,6 @@
-# Copyright (C) 2016 The Android Open Source Project
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,14 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Java fuzzer tool.
-
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := javafuzz.cc
-LOCAL_CFLAGS += -O0 -g -Wall
-LOCAL_MODULE_HOST_OS := darwin linux windows
-LOCAL_MODULE := javafuzz
-include $(BUILD_HOST_EXECUTABLE)
+./default-build "$@" --experimental agents
diff --git a/test/902-hello-transformation/expected.txt b/test/902-hello-transformation/expected.txt
new file mode 100644
index 0000000..e86e814
--- /dev/null
+++ b/test/902-hello-transformation/expected.txt
@@ -0,0 +1,3 @@
+Hello
+modifying class 'Transform'
+Goodbye
diff --git a/test/902-hello-transformation/info.txt b/test/902-hello-transformation/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/902-hello-transformation/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/902-hello-transformation/run b/test/902-hello-transformation/run
new file mode 100755
index 0000000..204e4cc
--- /dev/null
+++ b/test/902-hello-transformation/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+else
+ other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=902-hello-transformation,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/902-hello-transformation/src/Main.java b/test/902-hello-transformation/src/Main.java
new file mode 100644
index 0000000..204b6e7
--- /dev/null
+++ b/test/902-hello-transformation/src/Main.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ System.loadLibrary(args[1]);
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi();
+ doClassTransformation(Transform.class);
+ t.sayHi();
+ }
+
+ // Transforms the class
+ private static native void doClassTransformation(Class target);
+}
diff --git a/test/902-hello-transformation/src/Transform.java b/test/902-hello-transformation/src/Transform.java
new file mode 100644
index 0000000..dc0a0c4
--- /dev/null
+++ b/test/902-hello-transformation/src/Transform.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ public void sayHi() {
+ System.out.println("Hello");
+ }
+}
diff --git a/test/902-hello-transformation/transform.cc b/test/902-hello-transformation/transform.cc
new file mode 100644
index 0000000..e0d623e
--- /dev/null
+++ b/test/902-hello-transformation/transform.cc
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <pthread.h>
+#include <stdio.h>
+#include <vector>
+
+#include "art_method-inl.h"
+#include "base/logging.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "utils.h"
+
+namespace art {
+namespace Test902HelloTransformation {
+
+static bool RuntimeIsJvm = false;
+
+jvmtiEnv* jvmti_env;
+bool IsJVM() {
+ return RuntimeIsJvm;
+}
+
+// base64 encoded class/dex file for
+//
+// class Transform {
+// public void sayHi() {
+// System.out.println("Goodbye");
+// }
+// }
+const char* class_file_base64 =
+ "yv66vgAAADQAHAoABgAOCQAPABAIABEKABIAEwcAFAcAFQEABjxpbml0PgEAAygpVgEABENvZGUB"
+ "AA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwA"
+ "BwAIBwAWDAAXABgBAAdHb29kYnllBwAZDAAaABsBAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2Jq"
+ "ZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2ph"
+ "dmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAABQAG"
+ "AAAAAAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAEQABAAsACAAB"
+ "AAkAAAAlAAIAAQAAAAmyAAISA7YABLEAAAABAAoAAAAKAAIAAAATAAgAFAABAAwAAAACAA0=";
+
+const char* dex_file_base64 =
+ "ZGV4CjAzNQCLXSBQ5FiS3f16krSYZFF8xYZtFVp0GRXMAgAAcAAAAHhWNBIAAAAAAAAAACwCAAAO"
+ "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACsAQAAIAEAAGIB"
+ "AABqAQAAcwEAAIABAACXAQAAqwEAAL8BAADTAQAA4wEAAOYBAADqAQAA/gEAAAMCAAAMAgAAAgAA"
+ "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA"
+ "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAAB4CAAAA"
+ "AAAAAQABAAEAAAATAgAABAAAAHAQAwAAAA4AAwABAAIAAAAYAgAACQAAAGIAAAAbAQEAAABuIAIA"
+ "EAAOAAAAAQAAAAMABjxpbml0PgAHR29vZGJ5ZQALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50"
+ "U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xh"
+ "bmcvU3lzdGVtOwAOVHJhbnNmb3JtLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTMuMzYAA291"
+ "dAAHcHJpbnRsbgAFc2F5SGkAEQAHDgATAAcOhQAAAAEBAICABKACAQG4Ag0AAAAAAAAAAQAAAAAA"
+ "AAABAAAADgAAAHAAAAACAAAABgAAAKgAAAADAAAAAgAAAMAAAAAEAAAAAQAAANgAAAAFAAAABAAA"
+ "AOAAAAAGAAAAAQAAAAABAAABIAAAAgAAACABAAABEAAAAQAAAFwBAAACIAAADgAAAGIBAAADIAAA"
+ "AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=";
+
+static void JNICALL transformationHook(jvmtiEnv *jvmtienv,
+ JNIEnv* jni_env ATTRIBUTE_UNUSED,
+ jclass class_being_redefined ATTRIBUTE_UNUSED,
+ jobject loader ATTRIBUTE_UNUSED,
+ const char* name,
+ jobject protection_domain ATTRIBUTE_UNUSED,
+ jint class_data_len ATTRIBUTE_UNUSED,
+ const unsigned char* class_data ATTRIBUTE_UNUSED,
+ jint* new_class_data_len,
+ unsigned char** new_class_data) {
+ if (strcmp("Transform", name)) {
+ return;
+ }
+ printf("modifying class '%s'\n", name);
+ bool is_jvm = IsJVM();
+ size_t decode_len = 0;
+ unsigned char* new_data;
+ std::unique_ptr<uint8_t[]> file_data(
+ DecodeBase64((is_jvm) ? class_file_base64 : dex_file_base64, &decode_len));
+ jvmtiError ret = JVMTI_ERROR_NONE;
+ if ((ret = jvmtienv->Allocate(static_cast<jlong>(decode_len), &new_data)) != JVMTI_ERROR_NONE) {
+ printf("Unable to allocate buffer!\n");
+ return;
+ }
+ memcpy(new_data, file_data.get(), decode_len);
+ *new_class_data_len = static_cast<jint>(decode_len);
+ *new_class_data = new_data;
+ return;
+}
+
+using RetransformWithHookFunction = jvmtiError (*)(jvmtiEnv*, jclass, jvmtiEventClassFileLoadHook);
+static void DoClassTransformation(jvmtiEnv* jvmtienv, JNIEnv* jnienv, jclass target) {
+ if (IsJVM()) {
+ UNUSED(jnienv);
+ jvmtienv->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_CLASS_FILE_LOAD_HOOK, nullptr);
+ jvmtiError ret = jvmtienv->RetransformClasses(1, &target);
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmtienv->GetErrorName(ret, &err);
+ printf("Error transforming: %s\n", err);
+ }
+ } else {
+ RetransformWithHookFunction f =
+ reinterpret_cast<RetransformWithHookFunction>(jvmtienv->functions->reserved1);
+ if (f(jvmtienv, target, transformationHook) != JVMTI_ERROR_NONE) {
+ printf("Failed to tranform class!");
+ return;
+ }
+ }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_doClassTransformation(JNIEnv* env,
+ jclass,
+ jclass target) {
+ JavaVM* vm;
+ if (env->GetJavaVM(&vm)) {
+ printf("Unable to get javaVM!\n");
+ return;
+ }
+ DoClassTransformation(jvmti_env, env, target);
+}
+
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+ char* options,
+ void* reserved ATTRIBUTE_UNUSED) {
+ jvmtiCapabilities caps;
+ RuntimeIsJvm = (strcmp("jvm", options) == 0);
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ if (IsJVM()) {
+ jvmti_env->GetPotentialCapabilities(&caps);
+ jvmti_env->AddCapabilities(&caps);
+ jvmtiEventCallbacks cbs;
+ memset(&cbs, 0, sizeof(cbs));
+ cbs.ClassFileLoadHook = transformationHook;
+ jvmti_env->SetEventCallbacks(&cbs, sizeof(jvmtiEventCallbacks));
+ }
+ return 0;
+}
+
+} // namespace Test902HelloTransformation
+} // namespace art
+
diff --git a/test/902-hello-transformation/transform.h b/test/902-hello-transformation/transform.h
new file mode 100644
index 0000000..661058d
--- /dev/null
+++ b/test/902-hello-transformation/transform.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_902_HELLO_TRANSFORMATION_TRANSFORM_H_
+#define ART_TEST_902_HELLO_TRANSFORMATION_TRANSFORM_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test902HelloTransformation {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test902HelloTransformation
+} // namespace art
+
+#endif // ART_TEST_902_HELLO_TRANSFORMATION_TRANSFORM_H_
diff --git a/test/Android.bp b/test/Android.bp
index ff408f4..2d61000 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -40,6 +40,9 @@
enabled: false,
},
},
+ cflags: [
+ "-Wno-frame-larger-than=",
+ ],
}
art_cc_defaults {
@@ -239,6 +242,7 @@
srcs: [
"ti-agent/common_load.cc",
"901-hello-ti-agent/basics.cc",
+ "902-hello-transformation/transform.cc",
],
shared_libs: [
"libart",
@@ -255,6 +259,7 @@
srcs: [
"ti-agent/common_load.cc",
"901-hello-ti-agent/basics.cc",
+ "902-hello-transformation/transform.cc",
],
shared_libs: [
"libartd",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 3bcea18..0497735 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -346,9 +346,7 @@
TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
# Temporarily disable some broken tests when forcing access checks in interpreter b/22414682
-# 004-JniTest is disabled because @CriticalNative is unsupported by generic JNI b/31400248
TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := \
- 004-JniTest \
137-cfi
ifneq (,$(filter interp-ac,$(COMPILER_TYPES)))
@@ -407,11 +405,9 @@
# All these tests check that we have sane behavior if we don't have a patchoat or dex2oat.
# Therefore we shouldn't run them in situations where we actually don't have these since they
# explicitly test for them. These all also assume we have an image.
-# 004-JniTest is disabled because @CriticalNative is unsupported by generic JNI b/31400248
# 147-stripped-dex-fallback is disabled because it requires --prebuild.
# 554-jit-profile-file is disabled because it needs a primary oat file to know what it should save.
TEST_ART_BROKEN_FALLBACK_RUN_TESTS := \
- 004-JniTest \
116-nodex2oat \
117-nopatchoat \
118-noimage-dex2oat \
@@ -485,9 +481,7 @@
# Known broken tests for the JIT.
# CFI unwinding expects managed frames, and the test does not iterate enough to even compile. JIT
# also uses Generic JNI instead of the JNI compiler.
-# 004-JniTest is disabled because @CriticalNative is unsupported by generic JNI b/31400248
TEST_ART_BROKEN_JIT_RUN_TESTS := \
- 004-JniTest \
137-cfi
ifneq (,$(filter jit,$(COMPILER_TYPES)))
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index a445f4d..c51cb0d 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -310,8 +310,9 @@
fi
if [ "$USE_JVM" = "y" ]; then
+ export LD_LIBRARY_PATH=${ANDROID_HOST_OUT}/lib64
# Xmx is necessary since we don't pass down the ART flags to JVM.
- cmdline="${JAVA} ${DEBUGGER_OPTS} ${JVM_VERIFY_ARG} -Xmx256m -classpath classes ${FLAGS} $MAIN $@"
+ cmdline="${JAVA} ${DEBUGGER_OPTS} ${JVM_VERIFY_ARG} -Xmx256m -classpath classes ${FLAGS} $MAIN $@ ${ARGS}"
if [ "$DEV_MODE" = "y" ]; then
echo $cmdline
fi
diff --git a/test/run-test b/test/run-test
index ae53f9e..250263a 100755
--- a/test/run-test
+++ b/test/run-test
@@ -743,9 +743,7 @@
fi
fi
-if [ "$runtime" != "jvm" ]; then
run_args="${run_args} --testlib ${testlib}"
-fi
# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and ART output to 2MB.
build_file_size_limit=2048
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index ed280e4..53bb153 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -24,6 +24,7 @@
#include "base/macros.h"
#include "901-hello-ti-agent/basics.h"
+#include "902-hello-transformation/transform.h"
namespace art {
@@ -39,6 +40,7 @@
// A list of all the agents we have for testing.
AgentLib agents[] = {
{ "901-hello-ti-agent", Test901HelloTi::OnLoad, nullptr },
+ { "902-hello-transformation", Test902HelloTransformation::OnLoad, nullptr },
};
static AgentLib* FindAgent(char* name) {
diff --git a/tools/javafuzz/Android.mk b/tools/jfuzz/Android.mk
similarity index 90%
rename from tools/javafuzz/Android.mk
rename to tools/jfuzz/Android.mk
index 63db57a..c7002d6 100644
--- a/tools/javafuzz/Android.mk
+++ b/tools/jfuzz/Android.mk
@@ -12,14 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Java fuzzer tool.
+# Fuzzer tool.
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := javafuzz.cc
+LOCAL_SRC_FILES := jfuzz.cc
LOCAL_CFLAGS += -O0 -g -Wall
LOCAL_MODULE_HOST_OS := darwin linux windows
-LOCAL_MODULE := javafuzz
+LOCAL_MODULE := jfuzz
include $(BUILD_HOST_EXECUTABLE)
diff --git a/tools/javafuzz/README.md b/tools/jfuzz/README.md
similarity index 86%
rename from tools/javafuzz/README.md
rename to tools/jfuzz/README.md
index a70e4c1..1d566a9 100644
--- a/tools/javafuzz/README.md
+++ b/tools/jfuzz/README.md
@@ -1,19 +1,19 @@
-JavaFuzz
-========
+JFuzz
+=====
-JavaFuzz is a tool for generating random Java programs with the objective
-of fuzz testing the ART infrastructure. Each randomly generated Java program
+JFuzz is a tool for generating random programs with the objective
+of fuzz testing the ART infrastructure. Each randomly generated program
can be run under various modes of execution, such as using the interpreter,
using the optimizing compiler, using an external reference implementation,
or using various target architectures. Any difference between the outputs
(**divergence**) may indicate a bug in one of the execution modes.
-JavaFuzz can be combined with DexFuzz to get multi-layered fuzz testing.
+JFuzz can be combined with DexFuzz to get multi-layered fuzz testing.
-How to run JavaFuzz
+How to run JFuzz
===================
- javafuzz [-s seed] [-d expr-depth] [-l stmt-length]
+ jfuzz [-s seed] [-d expr-depth] [-l stmt-length]
[-i if-nest] [-n loop-nest]
where
@@ -29,17 +29,17 @@
-n : defines a fuzzing nest for for/while/do-while loops
(higher values yield deeper nested loops)
-The current version of JavaFuzz sends all output to stdout, and uses
+The current version of JFuzz sends all output to stdout, and uses
a fixed testing class named Test. So a typical test run looks as follows.
- javafuzz > Test.java
+ jfuzz > Test.java
jack -cp ${JACK_CLASSPATH} --output-dex . Test.java
art -classpath classes.dex Test
-How to start JavaFuzz testing
+How to start JFuzz testing
=============================
- run_java_fuzz_test.py
+ run_jfuzz_test.py
[--num_tests=NUM_TESTS]
[--device=DEVICE]
[--mode1=MODE] [--mode2=MODE]
@@ -56,8 +56,8 @@
tint = Art interpreter on target
topt = Art optimizing on target
-How to start Java/DexFuzz testing (multi-layered)
-=================================================
+How to start J/DexFuzz testing (multi-layered)
+==============================================
run_dex_fuzz_test.py
[--num_tests=NUM_TESTS]
@@ -67,7 +67,7 @@
where
--num_tests : number of tests to run (10000 by default)
- --num_inputs: number of JavaFuzz programs to generate
+ --num_inputs: number of JFuzz programs to generate
--device : target device serial number (passed to adb -s)
Background
diff --git a/tools/javafuzz/__init__.py b/tools/jfuzz/__init__.py
similarity index 100%
rename from tools/javafuzz/__init__.py
rename to tools/jfuzz/__init__.py
diff --git a/tools/javafuzz/javafuzz.cc b/tools/jfuzz/jfuzz.cc
similarity index 96%
rename from tools/javafuzz/javafuzz.cc
rename to tools/jfuzz/jfuzz.cc
index 161ae0a..125b56a 100644
--- a/tools/javafuzz/javafuzz.cc
+++ b/tools/jfuzz/jfuzz.cc
@@ -26,7 +26,7 @@
namespace {
/*
- * Java operators.
+ * Operators.
*/
#define EMIT(x) fputs((x)[random0(sizeof(x)/sizeof(const char*))], out_);
@@ -49,33 +49,33 @@
static constexpr const char* kRelOps[] = { "==", "!=", ">", ">=", "<", "<=" };
/*
- * Version of JavaFuzz. Increase this each time changes are made to the program
- * to preserve the property that a given version of JavaFuzz yields the same
- * fuzzed Java program for a deterministic random seed.
+ * Version of JFuzz. Increase this each time changes are made to the program
+ * to preserve the property that a given version of JFuzz yields the same
+ * fuzzed program for a deterministic random seed.
*/
const char* VERSION = "1.1";
static const uint32_t MAX_DIMS[11] = { 0, 1000, 32, 10, 6, 4, 3, 3, 2, 2, 2 };
/**
- * A class that generates a random Java program that compiles correctly. The program
+ * A class that generates a random program that compiles correctly. The program
* is generated using rules that generate various programming constructs. Each rule
* has a fixed probability to "fire". Running a generated program yields deterministic
* output, making it suited to test various modes of execution (e.g an interpreter vs.
* an compiler or two different run times) for divergences.
*
- * TODO: Due to the original scope of this project, the generated Java program is heavy
- * on loops, arrays, and basic operations; fuzzing other aspects of Java programs,
- * like elaborate typing, class hierarchies, and interfaces is still TBD.
+ * TODO: Due to the original scope of this project, the generated program is heavy
+ * on loops, arrays, and basic operations; fuzzing other aspects, like elaborate
+ * typing, class hierarchies, and interfaces is still TBD.
*/
-class JavaFuzz {
+class JFuzz {
public:
- JavaFuzz(FILE* out,
- uint32_t seed,
- uint32_t expr_depth,
- uint32_t stmt_length,
- uint32_t if_nest,
- uint32_t loop_nest)
+ JFuzz(FILE* out,
+ uint32_t seed,
+ uint32_t expr_depth,
+ uint32_t stmt_length,
+ uint32_t if_nest,
+ uint32_t loop_nest)
: out_(out),
fuzz_random_engine_(seed),
fuzz_seed_(seed),
@@ -100,7 +100,7 @@
float_local_(0),
double_local_(0) { }
- ~JavaFuzz() { }
+ ~JFuzz() { }
void emitProgram() {
emitHeader();
@@ -978,10 +978,10 @@
// Emit program header. Emit command line options in the comments.
void emitHeader() {
- fputs("\n/**\n * AOSP Java Fuzz Tester.\n", out_);
- fputs(" * Automatically generated Java program.\n", out_);
+ fputs("\n/**\n * AOSP JFuzz Tester.\n", out_);
+ fputs(" * Automatically generated program.\n", out_);
fprintf(out_,
- " * javafuzz -s %u -d %u -l %u -i %u -n %u (version %s)\n */\n\n",
+ " * jfuzz -s %u -d %u -l %u -i %u -n %u (version %s)\n */\n\n",
fuzz_seed_,
fuzz_expr_depth_,
fuzz_stmt_length_,
@@ -1101,8 +1101,8 @@
// Seed global random generator.
srand(seed);
- // Generate fuzzed Java program.
- JavaFuzz fuzz(stdout, seed, expr_depth, stmt_length, if_nest, loop_nest);
+ // Generate fuzzed program.
+ JFuzz fuzz(stdout, seed, expr_depth, stmt_length, if_nest, loop_nest);
fuzz.emitProgram();
return 0;
}
diff --git a/tools/javafuzz/run_dex_fuzz_test.py b/tools/jfuzz/run_dex_fuzz_test.py
similarity index 87%
rename from tools/javafuzz/run_dex_fuzz_test.py
rename to tools/jfuzz/run_dex_fuzz_test.py
index ff87aa4..56cdf02 100755
--- a/tools/javafuzz/run_dex_fuzz_test.py
+++ b/tools/jfuzz/run_dex_fuzz_test.py
@@ -37,14 +37,14 @@
class DexFuzzTester(object):
- """Tester that feeds JavaFuzz programs into DexFuzz testing."""
+ """Tester that feeds JFuzz programs into DexFuzz testing."""
def __init__(self, num_tests, num_inputs, device):
"""Constructor for the tester.
Args:
num_tests: int, number of tests to run
- num_inputs: int, number of JavaFuzz programs to generate
+ num_inputs: int, number of JFuzz programs to generate
device: string, target device serial number (or None)
"""
self._num_tests = num_tests
@@ -77,20 +77,20 @@
# TODO: detect divergences or shutil.rmtree(self._results_dir)
def Run(self):
- """Feeds JavaFuzz programs into DexFuzz testing."""
+ """Feeds JFuzz programs into DexFuzz testing."""
print()
- print('**\n**** JavaFuzz Testing\n**')
+ print('**\n**** JFuzz Testing\n**')
print()
print('#Tests :', self._num_tests)
print('Device :', self._device)
print('Directory :', self._results_dir)
print()
- self.GenerateJavaFuzzPrograms()
+ self.GenerateJFuzzPrograms()
self.RunDexFuzz()
- def GenerateJavaFuzzPrograms(self):
- """Generates JavaFuzzPrograms.
+ def GenerateJFuzzPrograms(self):
+ """Generates JFuzz programs.
Raises:
FatalError: error when generation fails
@@ -98,8 +98,8 @@
os.chdir(self._inputs_dir)
for i in range(1, self._num_inputs + 1):
jack_args = ['-cp', GetJackClassPath(), '--output-dex', '.', 'Test.java']
- if RunCommand(['javafuzz'], out='Test.java', err=None) != RetCode.SUCCESS:
- raise FatalError('Unexpected error while running JavaFuzz')
+ if RunCommand(['jfuzz'], out='Test.java', err=None) != RetCode.SUCCESS:
+ raise FatalError('Unexpected error while running JFuzz')
if RunCommand(['jack'] + jack_args, out=None, err='jackerr.txt',
timeout=30) != RetCode.SUCCESS:
raise FatalError('Unexpected error while running Jack')
@@ -128,7 +128,7 @@
parser.add_argument('--num_tests', default=10000,
type=int, help='number of tests to run')
parser.add_argument('--num_inputs', default=50,
- type=int, help='number of JavaFuzz program to generate')
+ type=int, help='number of JFuzz program to generate')
parser.add_argument('--device', help='target device serial number')
args = parser.parse_args()
# Run the DexFuzz tester.
diff --git a/tools/javafuzz/run_java_fuzz_test.py b/tools/jfuzz/run_jfuzz_test.py
similarity index 92%
rename from tools/javafuzz/run_java_fuzz_test.py
rename to tools/jfuzz/run_jfuzz_test.py
index 6cf3e85..cf2364b 100755
--- a/tools/javafuzz/run_java_fuzz_test.py
+++ b/tools/jfuzz/run_jfuzz_test.py
@@ -208,7 +208,7 @@
device: string, target device serial number (or None)
extra_args: list of strings, extra arguments for dalvikvm
"""
- self._test_env = DeviceTestEnv('javafuzz_', specific_device=device)
+ self._test_env = DeviceTestEnv('jfuzz_', specific_device=device)
self._dalvik_cmd = ['dalvikvm']
if extra_args is not None:
self._dalvik_cmd += extra_args
@@ -299,8 +299,8 @@
#
-class JavaFuzzTester(object):
- """Tester that runs JavaFuzz many times and report divergences."""
+class JFuzzTester(object):
+ """Tester that runs JFuzz many times and report divergences."""
def __init__(self, num_tests, device, mode1, mode2):
"""Constructor for the tester.
@@ -317,7 +317,7 @@
self._runner2 = GetExecutionModeRunner(device, mode2)
self._save_dir = None
self._results_dir = None
- self._javafuzz_dir = None
+ self._jfuzz_dir = None
# Statistics.
self._test = 0
self._num_success = 0
@@ -334,23 +334,23 @@
"""
self._save_dir = os.getcwd()
self._results_dir = mkdtemp(dir='/tmp/')
- self._javafuzz_dir = mkdtemp(dir=self._results_dir)
- if self._results_dir is None or self._javafuzz_dir is None:
+ self._jfuzz_dir = mkdtemp(dir=self._results_dir)
+ if self._results_dir is None or self._jfuzz_dir is None:
raise FatalError('Cannot obtain temp directory')
- os.chdir(self._javafuzz_dir)
+ os.chdir(self._jfuzz_dir)
return self
def __exit__(self, etype, evalue, etraceback):
"""On exit, re-enters previously saved current directory and cleans up."""
os.chdir(self._save_dir)
- shutil.rmtree(self._javafuzz_dir)
+ shutil.rmtree(self._jfuzz_dir)
if self._num_divergences == 0:
shutil.rmtree(self._results_dir)
def Run(self):
- """Runs JavaFuzz many times and report divergences."""
+ """Runs JFuzz many times and report divergences."""
print()
- print('**\n**** JavaFuzz Testing\n**')
+ print('**\n**** JFuzz Testing\n**')
print()
print('#Tests :', self._num_tests)
print('Device :', self._device)
@@ -360,7 +360,7 @@
print()
self.ShowStats()
for self._test in range(1, self._num_tests + 1):
- self.RunJavaFuzzTest()
+ self.RunJFuzzTest()
self.ShowStats()
if self._num_divergences == 0:
print('\n\nsuccess (no divergences)\n')
@@ -378,8 +378,8 @@
end='')
sys.stdout.flush()
- def RunJavaFuzzTest(self):
- """Runs a single JavaFuzz test, comparing two execution modes."""
+ def RunJFuzzTest(self):
+ """Runs a single JFuzz test, comparing two execution modes."""
self.ConstructTest()
retc1 = self._runner1.CompileAndRunTest()
retc2 = self._runner2.CompileAndRunTest()
@@ -387,13 +387,13 @@
self.CleanupTest()
def ConstructTest(self):
- """Use JavaFuzz to generate next Test.java test.
+ """Use JFuzz to generate next Test.java test.
Raises:
- FatalError: error when javafuzz fails
+ FatalError: error when jfuzz fails
"""
- if RunCommand(['javafuzz'], out='Test.java', err=None) != RetCode.SUCCESS:
- raise FatalError('Unexpected error while running JavaFuzz')
+ if RunCommand(['jfuzz'], out='Test.java', err=None) != RetCode.SUCCESS:
+ raise FatalError('Unexpected error while running JFuzz')
def CheckForDivergence(self, retc1, retc2):
"""Checks for divergences and updates statistics.
@@ -477,8 +477,8 @@
def CleanupTest(self):
"""Cleans up after a single test run."""
- for file_name in os.listdir(self._javafuzz_dir):
- file_path = os.path.join(self._javafuzz_dir, file_name)
+ for file_name in os.listdir(self._jfuzz_dir):
+ file_path = os.path.join(self._jfuzz_dir, file_name)
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
@@ -498,8 +498,8 @@
args = parser.parse_args()
if args.mode1 == args.mode2:
raise FatalError('Identical execution modes given')
- # Run the JavaFuzz tester.
- with JavaFuzzTester(args.num_tests, args.device,
+ # Run the JFuzz tester.
+ with JFuzzTester(args.num_tests, args.device,
args.mode1, args.mode2) as fuzzer:
fuzzer.Run()