Merge "Mterp/arm: Add CFI directives."
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 669d8cd..9d39bf2 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -103,6 +103,17 @@
return (it != verified_methods_.end()) ? it->second : nullptr;
}
+void VerificationResults::CreateVerifiedMethodFor(MethodReference ref) {
+ // This method should only be called for classes verified at compile time,
+ // which have no verifier error, nor has methods that we know will throw
+ // at runtime.
+ AtomicMap::InsertResult result = atomic_verified_methods_.Insert(
+ ref,
+ /*expected*/ nullptr,
+ new VerifiedMethod(/* encountered_error_types */ 0, /* has_runtime_throw */ false));
+ DCHECK_EQ(result, AtomicMap::kInsertResultSuccess);
+}
+
void VerificationResults::AddRejectedClass(ClassReference ref) {
{
WriterMutexLock mu(Thread::Current(), rejected_classes_lock_);
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index ea38f4d..ab735c1 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -47,6 +47,9 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!verified_methods_lock_);
+ void CreateVerifiedMethodFor(MethodReference ref)
+ REQUIRES(!verified_methods_lock_);
+
const VerifiedMethod* GetVerifiedMethod(MethodReference ref)
REQUIRES(!verified_methods_lock_);
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 04331e5..ce53417 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -32,6 +32,8 @@
class VerifiedMethod {
public:
+ VerifiedMethod(uint32_t encountered_error_types, bool has_runtime_throw);
+
// Cast elision set type.
// Since we're adding the dex PCs to the set in increasing order, a sorted vector
// is better for performance (not just memory usage), especially for large sets.
@@ -80,8 +82,6 @@
}
private:
- VerifiedMethod(uint32_t encountered_error_types, bool has_runtime_throw);
-
/*
* Generate the GC map for a method that has just been verified (i.e. we're doing this as part of
* verification). For type-precise determination we have all the data we need, so we just need to
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6b62110..a2bab80 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2005,6 +2005,35 @@
}
}
+static void PopulateVerifiedMethods(const DexFile& dex_file,
+ uint32_t class_def_index,
+ VerificationResults* verification_results) {
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
+ if (class_data == nullptr) {
+ return;
+ }
+ ClassDataItemIterator it(dex_file, class_data);
+ // Skip fields
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ while (it.HasNextInstanceField()) {
+ it.Next();
+ }
+
+ while (it.HasNextDirectMethod()) {
+ verification_results->CreateVerifiedMethodFor(MethodReference(&dex_file, it.GetMemberIndex()));
+ it.Next();
+ }
+
+ while (it.HasNextVirtualMethod()) {
+ verification_results->CreateVerifiedMethodFor(MethodReference(&dex_file, it.GetMemberIndex()));
+ it.Next();
+ }
+ DCHECK(!it.HasNext());
+}
+
void CompilerDriver::Verify(jobject jclass_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
@@ -2041,6 +2070,13 @@
} else if (set.find(class_def.class_idx_) == set.end()) {
ObjectLock<mirror::Class> lock(soa.Self(), cls);
mirror::Class::SetStatus(cls, mirror::Class::kStatusVerified, soa.Self());
+ // Create `VerifiedMethod`s for each methods, the compiler expects one for
+ // quickening or compiling.
+ // Note that this means:
+ // - We're only going to compile methods that did verify.
+ // - Quickening will not do checkcast ellision.
+ // TODO(ngeoffray): Reconsider this once we refactor compiler filters.
+ PopulateVerifiedMethods(*dex_file, i, verification_results_);
}
}
}
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 5629dff..9bbe595 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -29,6 +29,8 @@
#include "elf_writer_quick.h"
#include "gc/space/image_space.h"
#include "image_writer.h"
+#include "linker/buffered_output_stream.h"
+#include "linker/file_output_stream.h"
#include "linker/multi_oat_relative_patcher.h"
#include "lock_word.h"
#include "mirror/object-inl.h"
@@ -256,6 +258,16 @@
bool image_space_ok = writer->PrepareImageAddressSpace();
ASSERT_TRUE(image_space_ok);
+ if (kIsVdexEnabled) {
+ for (size_t i = 0, size = vdex_files.size(); i != size; ++i) {
+ std::unique_ptr<BufferedOutputStream> vdex_out(
+ MakeUnique<BufferedOutputStream>(
+ MakeUnique<FileOutputStream>(vdex_files[i].GetFile())));
+ oat_writers[i]->WriteVerifierDeps(vdex_out.get(), nullptr);
+ oat_writers[i]->WriteChecksumsAndVdexHeader(vdex_out.get());
+ }
+ }
+
for (size_t i = 0, size = oat_files.size(); i != size; ++i) {
linker::MultiOatRelativePatcher patcher(driver->GetInstructionSet(),
driver->GetInstructionSetFeatures());
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 6aa5642..7bb2bb7 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -433,7 +433,7 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
Thread* const self = Thread::Current();
- ReaderMutexLock mu(self, *class_linker->DexLock());
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
ObjPtr<mirror::DexCache> dex_cache =
ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
@@ -838,73 +838,21 @@
return true;
}
-class ImageWriter::PruneClassesVisitor : public ClassVisitor {
+class ImageWriter::NonImageClassesVisitor : public ClassVisitor {
public:
- PruneClassesVisitor(ImageWriter* image_writer, ObjPtr<mirror::ClassLoader> class_loader)
- : image_writer_(image_writer),
- class_loader_(class_loader),
- classes_to_prune_(),
- defined_class_count_(0u) { }
+ explicit NonImageClassesVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (!image_writer_->KeepClass(klass.Ptr())) {
classes_to_prune_.insert(klass.Ptr());
- if (klass->GetClassLoader() == class_loader_) {
- ++defined_class_count_;
- }
}
return true;
}
- size_t Prune() REQUIRES_SHARED(Locks::mutator_lock_) {
- ClassTable* class_table =
- Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(class_loader_);
- for (mirror::Class* klass : classes_to_prune_) {
- std::string storage;
- const char* descriptor = klass->GetDescriptor(&storage);
- bool result = class_table->Remove(descriptor);
- DCHECK(result);
- DCHECK(!class_table->Remove(descriptor)) << descriptor;
- }
- return defined_class_count_;
- }
-
- private:
- ImageWriter* const image_writer_;
- const ObjPtr<mirror::ClassLoader> class_loader_;
std::unordered_set<mirror::Class*> classes_to_prune_;
- size_t defined_class_count_;
-};
-
-class ImageWriter::PruneClassLoaderClassesVisitor : public ClassLoaderVisitor {
- public:
- explicit PruneClassLoaderClassesVisitor(ImageWriter* image_writer)
- : image_writer_(image_writer), removed_class_count_(0) {}
-
- virtual void Visit(ObjPtr<mirror::ClassLoader> class_loader) OVERRIDE
- REQUIRES_SHARED(Locks::mutator_lock_) {
- PruneClassesVisitor classes_visitor(image_writer_, class_loader);
- ClassTable* class_table =
- Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(class_loader);
- class_table->Visit(classes_visitor);
- removed_class_count_ += classes_visitor.Prune();
- }
-
- size_t GetRemovedClassCount() const {
- return removed_class_count_;
- }
-
- private:
ImageWriter* const image_writer_;
- size_t removed_class_count_;
};
-void ImageWriter::VisitClassLoaders(ClassLoaderVisitor* visitor) {
- ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- visitor->Visit(nullptr); // Visit boot class loader.
- Runtime::Current()->GetClassLinker()->VisitClassLoaders(visitor);
-}
-
void ImageWriter::PruneNonImageClasses() {
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
@@ -914,11 +862,21 @@
// path dex caches.
class_linker->ClearClassTableStrongRoots();
+ // Make a list of classes we would like to prune.
+ NonImageClassesVisitor visitor(this);
+ class_linker->VisitClasses(&visitor);
+
// Remove the undesired classes from the class roots.
- {
- PruneClassLoaderClassesVisitor class_loader_visitor(this);
- VisitClassLoaders(&class_loader_visitor);
- VLOG(compiler) << "Pruned " << class_loader_visitor.GetRemovedClassCount() << " classes";
+ VLOG(compiler) << "Pruning " << visitor.classes_to_prune_.size() << " classes";
+ for (mirror::Class* klass : visitor.classes_to_prune_) {
+ std::string temp;
+ const char* name = klass->GetDescriptor(&temp);
+ VLOG(compiler) << "Pruning class " << name;
+ if (!compile_app_image_) {
+ DCHECK(IsBootClassLoaderClass(klass));
+ }
+ bool result = class_linker->RemoveClass(name, klass->GetClassLoader());
+ DCHECK(result);
}
// Clear references to removed classes from the DexCaches.
@@ -926,7 +884,7 @@
ScopedAssertNoThreadSuspension sa(__FUNCTION__);
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); // For ClassInClassTable
- ReaderMutexLock mu2(self, *class_linker->DexLock());
+ ReaderMutexLock mu2(self, *Locks::dex_lock_);
for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
if (self->IsJWeakCleared(data.weak_root)) {
continue;
@@ -974,8 +932,7 @@
class_linker->DropFindArrayClassCache();
// Clear to save RAM.
- // FIXME: This has been temporarily removed to provide extra debugging output. Bug 33231647.
- // prune_class_memo_.clear();
+ prune_class_memo_.clear();
}
void ImageWriter::CheckNonImageClassesRemoved() {
@@ -1056,7 +1013,7 @@
// caches. We check that the number of dex caches does not change.
size_t dex_cache_count = 0;
{
- ReaderMutexLock mu(self, *class_linker->DexLock());
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
// Count number of dex caches not in the boot image.
for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
ObjPtr<mirror::DexCache> dex_cache =
@@ -1074,7 +1031,7 @@
hs.NewHandle(ObjectArray<Object>::Alloc(self, object_array_class.Get(), dex_cache_count)));
CHECK(dex_caches.Get() != nullptr) << "Failed to allocate a dex cache array.";
{
- ReaderMutexLock mu(self, *class_linker->DexLock());
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
size_t non_image_dex_caches = 0;
// Re-count number of non image dex caches.
for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
@@ -1147,69 +1104,7 @@
DCHECK_NE(as_klass->GetStatus(), mirror::Class::kStatusError);
if (compile_app_image_) {
// Extra sanity, no boot loader classes should be left!
- // FIXME: Remove the extra logging. Bug 33231647.
- struct Dumper {
- std::string ImageRanges() const {
- std::ostringstream oss;
- const char* separator = "";
- gc::Heap* const heap = Runtime::Current()->GetHeap();
- for (gc::space::ImageSpace* boot_image_space : heap->GetBootImageSpaces()) {
- const uint8_t* image_begin = boot_image_space->Begin();
- // Real image end including ArtMethods and ArtField sections.
- const uint8_t* image_end =
- image_begin + boot_image_space->GetImageHeader().GetImageSize();
- oss << separator << static_cast<const void*>(image_begin)
- << "-" << static_cast<const void*>(image_end);
- separator = ":";
- }
- return oss.str();
- }
- std::string PruneMemo(ImageWriter* writer,
- mirror::Class* klass,
- const std::unordered_map<mirror::Class*, bool>& map) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- auto it = map.find(klass);
- if (it == map.end()) {
- return writer->PruneAppImageClass(klass) ? "missing/true" : "missing/false";
- } else {
- return it->second ? "true" : "false";
- }
- }
- std::string ClassLoaders(ImageWriter* writer, mirror::Class* klass) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- struct DumpVisitor : ClassLoaderVisitor {
- explicit DumpVisitor(mirror::Class* the_klass)
- : klass_(the_klass),
- storage_(),
- descriptor_(the_klass->GetDescriptor(&storage_)) { }
- void Visit(ObjPtr<mirror::ClassLoader> class_loader) OVERRIDE
- REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) {
- ClassTable* class_table =
- Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(class_loader);
- if (class_table->Contains(klass_)) {
- result_ << ";" << static_cast<const void*>(class_loader.Ptr()) << "/"
- << (class_loader == klass_->GetClassLoader() ? "defining" : "initiating")
- << "/"
- << (class_table->LookupByDescriptor(klass_) == klass_ ? "ok" : "mismatch");
- }
- }
- mirror::Class* klass_;
- std::string storage_;
- const char* descriptor_;
- std::ostringstream result_;
- };
- DumpVisitor visitor(klass);
- writer->VisitClassLoaders(&visitor);
- std::string result = visitor.result_.str();
- return result.empty() ? "<none>" : /* drop leading ';' */ result.substr(1u);
- }
- };
- CHECK(!IsBootClassLoaderClass(as_klass)) << as_klass->PrettyClass()
- << " status:" << as_klass->GetStatus()
- << " " << static_cast<const void*>(as_klass)
- << " " << Dumper().ImageRanges()
- << " prune_memo:" << Dumper().PruneMemo(this, as_klass, prune_class_memo_)
- << " loaders:" << Dumper().ClassLoaders(this, as_klass);
+ CHECK(!IsBootClassLoaderClass(as_klass)) << as_klass->PrettyClass();
}
LengthPrefixedArray<ArtField>* fields[] = {
as_klass->GetSFieldsPtr(), as_klass->GetIFieldsPtr(),
@@ -1613,10 +1508,8 @@
}
// Calculate the size of the class table.
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u);
- mirror::ClassLoader* class_loader = compile_app_image_ ? *class_loaders_.begin() : nullptr;
- DCHECK_EQ(image_info.class_table_->NumZygoteClasses(class_loader), 0u);
- if (image_info.class_table_->NumNonZygoteClasses(class_loader) != 0u) {
+ DCHECK_EQ(image_info.class_table_->NumZygoteClasses(), 0u);
+ if (image_info.class_table_->NumNonZygoteClasses() != 0u) {
image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr);
}
}
@@ -1961,10 +1854,8 @@
// above comment for intern tables.
ClassTable temp_class_table;
temp_class_table.ReadFromMemory(class_table_memory_ptr);
- CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u);
- mirror::ClassLoader* class_loader = compile_app_image_ ? *class_loaders_.begin() : nullptr;
- CHECK_EQ(temp_class_table.NumZygoteClasses(class_loader),
- table->NumNonZygoteClasses(class_loader) + table->NumZygoteClasses(class_loader));
+ CHECK_EQ(temp_class_table.NumZygoteClasses(), table->NumNonZygoteClasses() +
+ table->NumZygoteClasses());
BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(&root_visitor,
RootInfo(kRootUnknown));
temp_class_table.VisitRoots(buffered_visitor);
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index c537483..24fad46 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -50,7 +50,6 @@
} // namespace space
} // namespace gc
-class ClassLoaderVisitor;
class ClassTable;
static constexpr int kInvalidFd = -1;
@@ -374,9 +373,6 @@
void ComputeLazyFieldsForImageClasses()
REQUIRES_SHARED(Locks::mutator_lock_);
- // Visit all class loaders.
- void VisitClassLoaders(ClassLoaderVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
-
// Remove unwanted classes from various roots.
void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -592,8 +588,7 @@
class FixupVisitor;
class GetRootsVisitor;
class NativeLocationVisitor;
- class PruneClassesVisitor;
- class PruneClassLoaderClassesVisitor;
+ class NonImageClassesVisitor;
class VisitReferencesVisitor;
DISALLOW_COPY_AND_ASSIGN(ImageWriter);
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 9458576..0a778b0 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -30,6 +30,8 @@
#include "elf_writer.h"
#include "elf_writer_quick.h"
#include "entrypoints/quick/quick_entrypoints.h"
+#include "linker/buffered_output_stream.h"
+#include "linker/file_output_stream.h"
#include "linker/multi_oat_relative_patcher.h"
#include "linker/vector_output_stream.h"
#include "mirror/class-inl.h"
@@ -218,6 +220,17 @@
oat_writer.GetBssSize(),
oat_writer.GetBssRootsOffset());
+ if (kIsVdexEnabled) {
+ std::unique_ptr<BufferedOutputStream> vdex_out(
+ MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(vdex_file)));
+ if (!oat_writer.WriteVerifierDeps(vdex_out.get(), nullptr)) {
+ return false;
+ }
+ if (!oat_writer.WriteChecksumsAndVdexHeader(vdex_out.get())) {
+ return false;
+ }
+ }
+
if (!oat_writer.WriteRodata(oat_rodata)) {
return false;
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 153aff4..bebd5f5 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -300,6 +300,7 @@
oat_data_offset_(0u),
oat_header_(nullptr),
size_vdex_header_(0),
+ size_vdex_checksums_(0),
size_dex_file_alignment_(0),
size_executable_offset_alignment_(0),
size_oat_header_(0),
@@ -409,10 +410,11 @@
CreateTypeLookupTable create_type_lookup_table) {
DCHECK(write_state_ == WriteState::kAddingDexFileSources);
const uint8_t* current_dex_data = nullptr;
- for (size_t i = 0; ; ++i) {
+ for (size_t i = 0; i < vdex_file.GetHeader().GetNumberOfDexFiles(); ++i) {
current_dex_data = vdex_file.GetNextDexFileData(current_dex_data);
if (current_dex_data == nullptr) {
- break;
+ LOG(ERROR) << "Unexpected number of dex files in vdex " << location;
+ return false;
}
if (!DexFile::IsMagicValid(current_dex_data)) {
LOG(ERROR) << "Invalid magic in vdex file created from " << location;
@@ -424,7 +426,14 @@
oat_dex_files_.emplace_back(full_location,
DexFileSource(current_dex_data),
create_type_lookup_table);
+ oat_dex_files_.back().dex_file_location_checksum_ = vdex_file.GetLocationChecksum(i);
}
+
+ if (vdex_file.GetNextDexFileData(current_dex_data) != nullptr) {
+ LOG(ERROR) << "Unexpected number of dex files in vdex " << location;
+ return false;
+ }
+
if (oat_dex_files_.empty()) {
LOG(ERROR) << "No dex files in vdex file created from " << location;
return false;
@@ -488,8 +497,8 @@
// Initialize VDEX and OAT headers.
if (kIsVdexEnabled) {
- size_vdex_header_ = sizeof(VdexFile::Header);
- vdex_size_ = size_vdex_header_;
+ // Reserve space for Vdex header and checksums.
+ vdex_size_ = sizeof(VdexFile::Header) + oat_dex_files_.size() * sizeof(VdexFile::VdexChecksum);
}
size_t oat_data_offset = InitOatHeader(instruction_set,
instruction_set_features,
@@ -793,7 +802,7 @@
// Update quick method header.
DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
- uint32_t vmap_table_offset = method_header->vmap_table_offset_;
+ uint32_t vmap_table_offset = method_header->GetVmapTableOffset();
// The code offset was 0 when the mapping/vmap table offset was set, so it's set
// to 0-offset and we need to adjust it by code_offset.
uint32_t code_offset = quick_code_offset - thumb_offset;
@@ -935,7 +944,7 @@
// If vdex is enabled, we only emit the stack map of compiled code. The quickening info will
// be in the vdex file.
if (!compiled_method->GetQuickCode().empty() || !kIsVdexEnabled) {
- DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].vmap_table_offset_, 0u);
+ DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].GetVmapTableOffset(), 0u);
ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
uint32_t map_size = map.size() * sizeof(map[0]);
@@ -949,7 +958,7 @@
});
// Code offset is not initialized yet, so set the map offset to 0u-offset.
DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u);
- oat_class->method_headers_[method_offsets_index_].vmap_table_offset_ = 0u - offset;
+ oat_class->method_headers_[method_offsets_index_].SetVmapTableOffset(0u - offset);
}
}
++method_offsets_index_;
@@ -1406,7 +1415,7 @@
size_t file_offset = file_offset_;
OutputStream* out = out_;
- uint32_t map_offset = oat_class->method_headers_[method_offsets_index_].vmap_table_offset_;
+ uint32_t map_offset = oat_class->method_headers_[method_offsets_index_].GetVmapTableOffset();
uint32_t code_offset = oat_class->method_offsets_[method_offsets_index_].code_offset_;
++method_offsets_index_;
@@ -1837,6 +1846,7 @@
size_total += (x);
DO_STAT(size_vdex_header_);
+ DO_STAT(size_vdex_checksums_);
DO_STAT(size_dex_file_alignment_);
DO_STAT(size_executable_offset_alignment_);
DO_STAT(size_oat_header_);
@@ -2383,6 +2393,7 @@
// Update dex file size and resize class offsets in the OatDexFile.
// Note: For raw data, the checksum is passed directly to AddRawDexFileSource().
+ // Note: For vdex, the checksum is copied from the existing vdex file.
oat_dex_file->dex_file_size_ = header->file_size_;
oat_dex_file->class_offsets_.resize(header->class_defs_size_);
return true;
@@ -2592,11 +2603,31 @@
return true;
}
-bool OatWriter::WriteVdexHeader(OutputStream* vdex_out) {
+bool OatWriter::WriteChecksumsAndVdexHeader(OutputStream* vdex_out) {
if (!kIsVdexEnabled) {
return true;
}
- off_t actual_offset = vdex_out->Seek(0, kSeekSet);
+ // Write checksums
+ off_t actual_offset = vdex_out->Seek(sizeof(VdexFile::Header), kSeekSet);
+ if (actual_offset != sizeof(VdexFile::Header)) {
+ PLOG(ERROR) << "Failed to seek to the checksum location of vdex file. Actual: " << actual_offset
+ << " File: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ for (size_t i = 0, size = oat_dex_files_.size(); i != size; ++i) {
+ OatDexFile* oat_dex_file = &oat_dex_files_[i];
+ if (!vdex_out->WriteFully(
+ &oat_dex_file->dex_file_location_checksum_, sizeof(VdexFile::VdexChecksum))) {
+ PLOG(ERROR) << "Failed to write dex file location checksum. File: "
+ << vdex_out->GetLocation();
+ return false;
+ }
+ size_vdex_checksums_ += sizeof(VdexFile::VdexChecksum);
+ }
+
+ // Write header.
+ actual_offset = vdex_out->Seek(0, kSeekSet);
if (actual_offset != 0) {
PLOG(ERROR) << "Failed to seek to the beginning of vdex file. Actual: " << actual_offset
<< " File: " << vdex_out->GetLocation();
@@ -2610,12 +2641,15 @@
size_t verifier_deps_section_size = vdex_quickening_info_offset_ - vdex_verifier_deps_offset_;
size_t quickening_info_section_size = vdex_size_ - vdex_quickening_info_offset_;
- VdexFile::Header vdex_header(
- dex_section_size, verifier_deps_section_size, quickening_info_section_size);
+ VdexFile::Header vdex_header(oat_dex_files_.size(),
+ dex_section_size,
+ verifier_deps_section_size,
+ quickening_info_section_size);
if (!vdex_out->WriteFully(&vdex_header, sizeof(VdexFile::Header))) {
PLOG(ERROR) << "Failed to write vdex header. File: " << vdex_out->GetLocation();
return false;
}
+ size_vdex_header_ = sizeof(VdexFile::Header);
if (!vdex_out->Flush()) {
PLOG(ERROR) << "Failed to flush stream after writing to vdex file."
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 0dcf79e..da221d6 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -124,7 +124,7 @@
// - Initialize()
// - WriteVerifierDeps()
// - WriteQuickeningInfo()
- // - WriteVdexHeader()
+ // - WriteChecksumsAndVdexHeader()
// - PrepareLayout(),
// - WriteRodata(),
// - WriteCode(),
@@ -168,7 +168,7 @@
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
bool WriteQuickeningInfo(OutputStream* vdex_out);
bool WriteVerifierDeps(OutputStream* vdex_out, verifier::VerifierDeps* verifier_deps);
- bool WriteVdexHeader(OutputStream* vdex_out);
+ bool WriteChecksumsAndVdexHeader(OutputStream* vdex_out);
// Initialize the writer with the given parameters.
void Initialize(const CompilerDriver* compiler,
ImageWriter* image_writer,
@@ -387,6 +387,7 @@
// output stats
uint32_t size_vdex_header_;
+ uint32_t size_vdex_checksums_;
uint32_t size_dex_file_alignment_;
uint32_t size_executable_offset_alignment_;
uint32_t size_oat_header_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9f6b78a..fa6a522 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -304,6 +304,7 @@
SetFrameSize(RoundUp(
first_register_slot_in_slow_path_
+ maximum_safepoint_spill_size
+ + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0)
+ FrameEntrySpillSize(),
kStackAlignment));
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index a5d19ab..4b11e7c 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -307,6 +307,12 @@
return POPCOUNT(GetSlowPathSpills(locations, core_registers));
}
+ size_t GetStackOffsetOfShouldDeoptimizeFlag() const {
+ DCHECK(GetGraph()->HasShouldDeoptimizeFlag());
+ DCHECK_GE(GetFrameSize(), FrameEntrySpillSize() + kShouldDeoptimizeFlagSize);
+ return GetFrameSize() - FrameEntrySpillSize() - kShouldDeoptimizeFlagSize;
+ }
+
// Record native to dex mapping for a suspend point. Required by runtime.
void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr);
// Check whether we have already recorded mapping at this PC.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 1f59816..ed6eef1 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1329,6 +1329,13 @@
__ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
__ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ mov(IP, ShifterOperand(0));
+ __ StoreToOffset(kStoreWord, IP, SP, -kShouldDeoptimizeFlagSize);
+ }
+
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, -adjust);
__ cfi().AdjustCFAOffset(adjust);
@@ -1944,6 +1951,19 @@
/* false_target */ nullptr);
}
+void LocationsBuilderARM::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ __ LoadFromOffset(kLoadWord,
+ flag->GetLocations()->Out().AsRegister<Register>(),
+ SP,
+ codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
+}
+
void LocationsBuilderARM::VisitSelect(HSelect* select) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
if (Primitive::IsFloatingPointType(select->GetType())) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index ab6a33f..6eebd69 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1273,6 +1273,12 @@
frame_size - GetCoreSpillSize());
GetAssembler()->SpillRegisters(GetFramePreservedFPRegisters(),
frame_size - FrameEntrySpillSize());
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ Register wzr = Register(VIXLRegCodeFromART(WZR), kWRegSize);
+ __ Str(wzr, MemOperand(sp, GetStackOffsetOfShouldDeoptimizeFlag()));
+ }
}
}
@@ -3235,6 +3241,17 @@
/* false_target */ nullptr);
}
+void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ __ Ldr(OutputRegister(flag),
+ MemOperand(sp, codegen_->GetStackOffsetOfShouldDeoptimizeFlag()));
+}
+
static inline bool IsConditionOnFloatingPointValues(HInstruction* condition) {
return condition->IsCondition() &&
Primitive::IsFloatingPointType(condition->InputAt(0)->GetType());
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 1ca439e..4b24ac3 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -63,9 +63,10 @@
// We expected this for both core and fpu register pairs.
return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
}
-
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kArmWordSize = static_cast<size_t>(kArmPointerSize);
+static constexpr size_t kArmBitsPerWord = kArmWordSize * kBitsPerByte;
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr size_t kArmInstrMaxSizeInBytes = 4u;
static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
#ifdef __
@@ -438,6 +439,62 @@
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARMVIXL);
};
+class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+ explicit LoadStringSlowPathARMVIXL(HLoadString* instruction)
+ : SlowPathCodeARMVIXL(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ HLoadString* load = instruction_->AsLoadString();
+ const uint32_t string_index = load->GetStringIndex().index_;
+ vixl32::Register out = OutputRegister(load);
+ vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+ constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
+
+ CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ // In the unlucky case that the `temp` is R0, we preserve the address in `out` across
+ // the kSaveEverything call (or use `out` for the address after non-kSaveEverything call).
+ bool temp_is_r0 = (temp.Is(calling_convention.GetRegisterAt(0)));
+ vixl32::Register entry_address = temp_is_r0 ? out : temp;
+ DCHECK(!entry_address.Is(calling_convention.GetRegisterAt(0)));
+ if (call_saves_everything_except_r0 && temp_is_r0) {
+ __ Mov(entry_address, temp);
+ }
+
+ __ Mov(calling_convention.GetRegisterAt(0), string_index);
+ arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+
+ // Store the resolved String to the .bss entry.
+ if (call_saves_everything_except_r0) {
+ // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
+ __ Str(r0, MemOperand(entry_address));
+ } else {
+ // For non-Baker read barrier, we need to re-calculate the address of the string entry.
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ arm_codegen->NewPcRelativeStringPatch(load->GetDexFile(), string_index);
+ arm_codegen->EmitMovwMovtPlaceholder(labels, out);
+ __ Str(r0, MemOperand(entry_address));
+ }
+
+ arm_codegen->Move32(locations->Out(), LocationFrom(r0));
+ RestoreLiveRegisters(codegen, locations);
+
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARMVIXL"; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARMVIXL);
+};
+
class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
public:
TypeCheckSlowPathARMVIXL(HInstruction* instruction, bool is_fatal)
@@ -630,9 +687,30 @@
return mask;
}
-size_t CodeGeneratorARMVIXL::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- GetAssembler()->LoadSFromOffset(vixl32::SRegister(reg_id), sp, stack_index);
- return kArmWordSize;
+// Saves the register in the stack. Returns the size taken on stack.
+size_t CodeGeneratorARMVIXL::SaveCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
+ uint32_t reg_id ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+ return 0;
+}
+
+// Restores the register from the stack. Returns the size taken on stack.
+size_t CodeGeneratorARMVIXL::RestoreCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
+ uint32_t reg_id ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+ return 0;
+}
+
+size_t CodeGeneratorARMVIXL::SaveFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
+ uint32_t reg_id ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+ return 0;
+}
+
+size_t CodeGeneratorARMVIXL::RestoreFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
+ uint32_t reg_id ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+ return 0;
}
#undef __
@@ -655,7 +733,11 @@
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
assembler_(graph->GetArena()),
- isa_features_(isa_features) {
+ isa_features_(isa_features),
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Always save the LR register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(LR));
// Give d14 and d15 as scratch registers to VIXL.
@@ -793,7 +875,7 @@
__ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
// The load must immediately precede RecordPcInfo.
AssemblerAccurateScope aas(GetVIXLAssembler(),
- kArmInstrMaxSizeInBytes,
+ vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
__ ldr(temp, MemOperand(temp));
RecordPcInfo(nullptr, 0);
@@ -853,6 +935,116 @@
__ Bind(GetLabelOf(block));
}
+Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(Primitive::Type type) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ uint32_t index = gp_index_++;
+ uint32_t stack_index = stack_index_++;
+ if (index < calling_convention.GetNumberOfRegisters()) {
+ return LocationFrom(calling_convention.GetRegisterAt(index));
+ } else {
+ return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
+ }
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t index = gp_index_;
+ uint32_t stack_index = stack_index_;
+ gp_index_ += 2;
+ stack_index_ += 2;
+ if (index + 1 < calling_convention.GetNumberOfRegisters()) {
+ if (calling_convention.GetRegisterAt(index).Is(r1)) {
+ // Skip R1, and use R2_R3 instead.
+ gp_index_++;
+ index++;
+ }
+ }
+ if (index + 1 < calling_convention.GetNumberOfRegisters()) {
+ DCHECK_EQ(calling_convention.GetRegisterAt(index).GetCode() + 1,
+ calling_convention.GetRegisterAt(index + 1).GetCode());
+
+ return LocationFrom(calling_convention.GetRegisterAt(index),
+ calling_convention.GetRegisterAt(index + 1));
+ } else {
+ return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
+ }
+ }
+
+ case Primitive::kPrimFloat: {
+ uint32_t stack_index = stack_index_++;
+ if (float_index_ % 2 == 0) {
+ float_index_ = std::max(double_index_, float_index_);
+ }
+ if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
+ return LocationFrom(calling_convention.GetFpuRegisterAt(float_index_++));
+ } else {
+ return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
+ }
+ }
+
+ case Primitive::kPrimDouble: {
+ double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
+ uint32_t stack_index = stack_index_;
+ stack_index_ += 2;
+ if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
+ uint32_t index = double_index_;
+ double_index_ += 2;
+ Location result = LocationFrom(
+ calling_convention.GetFpuRegisterAt(index),
+ calling_convention.GetFpuRegisterAt(index + 1));
+ DCHECK(ExpectedPairLayout(result));
+ return result;
+ } else {
+ return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
+ }
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected parameter type " << type;
+ break;
+ }
+ return Location::NoLocation();
+}
+
+Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(Primitive::Type type) const {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ return LocationFrom(r0);
+ }
+
+ case Primitive::kPrimFloat: {
+ return LocationFrom(s0);
+ }
+
+ case Primitive::kPrimLong: {
+ return LocationFrom(r0, r1);
+ }
+
+ case Primitive::kPrimDouble: {
+ return LocationFrom(s0, s1);
+ }
+
+ case Primitive::kPrimVoid:
+ return Location::NoLocation();
+ }
+
+ UNREACHABLE();
+}
+
+Location InvokeDexCallingConventionVisitorARMVIXL::GetMethodLocation() const {
+ return LocationFrom(kMethodRegister);
+}
+
void CodeGeneratorARMVIXL::Move32(Location destination, Location source) {
if (source.Equals(destination)) {
return;
@@ -924,10 +1116,14 @@
uint32_t dex_pc,
SlowPathCode* slow_path) {
ValidateInvokeRuntime(entrypoint, instruction, slow_path);
- GenerateInvokeRuntime(GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value());
+ __ Ldr(lr, MemOperand(tr, GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value()));
+ // Ensure the pc position is recorded immediately after the `blx` instruction.
+ // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+ __ blx(lr);
if (EntrypointRequiresStackMap(entrypoint)) {
- // TODO(VIXL): If necessary, use a scope to ensure we record the pc info immediately after the
- // previous instruction.
RecordPcInfo(instruction, dex_pc, slow_path);
}
}
@@ -936,11 +1132,7 @@
HInstruction* instruction,
SlowPathCode* slow_path) {
ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
- GenerateInvokeRuntime(entry_point_offset);
-}
-
-void CodeGeneratorARMVIXL::GenerateInvokeRuntime(int32_t entry_point_offset) {
- GetAssembler()->LoadFromOffset(kLoadWord, lr, tr, entry_point_offset);
+ __ Ldr(lr, MemOperand(tr, entry_point_offset));
__ Blx(lr);
}
@@ -1270,6 +1462,19 @@
/* false_target */ nullptr);
}
+void LocationsBuilderARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ GetAssembler()->LoadFromOffset(kLoadWord,
+ OutputRegister(flag),
+ sp,
+ codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
+}
+
void LocationsBuilderARMVIXL::VisitSelect(HSelect* select) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
if (Primitive::IsFloatingPointType(select->GetType())) {
@@ -1360,7 +1565,7 @@
CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
AssemblerAccurateScope aas(GetVIXLAssembler(),
- kArmInstrMaxSizeInBytes * 3u,
+ 3 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
__ ite(ARMCondition(cond->GetCondition()));
__ mov(ARMCondition(cond->GetCondition()), OutputRegister(cond), 1);
@@ -1575,7 +1780,10 @@
HandleInvoke(invoke);
- // TODO(VIXL): invoke->HasPcRelativeDexCache()
+ // For PC-relative dex cache the invoke has an extra input, the PC-relative address base.
+ if (invoke->HasPcRelativeDexCache()) {
+ invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
+ }
}
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARMVIXL* codegen) {
@@ -1597,15 +1805,13 @@
}
LocationSummary* locations = invoke->GetLocations();
- DCHECK(locations->HasTemps());
- codegen_->GenerateStaticOrDirectCall(invoke, locations->GetTemp(0));
- // TODO(VIXL): If necessary, use a scope to ensure we record the pc info immediately after the
- // previous instruction.
+ codegen_->GenerateStaticOrDirectCall(
+ invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderARMVIXL::HandleInvoke(HInvoke* invoke) {
- InvokeDexCallingConventionVisitorARM calling_convention_visitor;
+ InvokeDexCallingConventionVisitorARMVIXL calling_convention_visitor;
CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
}
@@ -1624,10 +1830,8 @@
}
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
- DCHECK(!codegen_->IsLeafMethod());
- // TODO(VIXL): If necessary, use a scope to ensure we record the pc info immediately after the
- // previous instruction.
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
}
void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
@@ -1646,10 +1850,15 @@
DCHECK(!receiver.IsStackSlot());
- // /* HeapReference<Class> */ temp = receiver->klass_
- GetAssembler()->LoadFromOffset(kLoadWord, temp, RegisterFrom(receiver), class_offset);
-
- codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // Ensure the pc position is recorded immediately after the `ldr` instruction.
+ {
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ // /* HeapReference<Class> */ temp = receiver->klass_
+ __ ldr(temp, MemOperand(RegisterFrom(receiver), class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ }
// Instead of simply (possibly) unpoisoning `temp` here, we should
// emit a read barrier for the previous class reference load.
// However this is not required in practice, as this is an
@@ -1688,15 +1897,16 @@
temps.Exclude(hidden_reg);
__ Mov(hidden_reg, invoke->GetDexMethodIndex());
}
-
{
+ // Ensure the pc position is recorded immediately after the `blx` instruction.
+ // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
AssemblerAccurateScope aas(GetVIXLAssembler(),
- kArmInstrMaxSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
// LR();
__ blx(lr);
- DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
}
}
@@ -3067,7 +3277,7 @@
__ Subs(temp, o_l, Operand::From(kArmBitsPerWord));
{
AssemblerAccurateScope guard(GetVIXLAssembler(),
- 3 * kArmInstrMaxSizeInBytes,
+ 2 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
__ it(pl);
__ lsl(pl, o_h, low, temp);
@@ -3086,7 +3296,7 @@
__ Subs(temp, o_h, Operand::From(kArmBitsPerWord));
{
AssemblerAccurateScope guard(GetVIXLAssembler(),
- 3 * kArmInstrMaxSizeInBytes,
+ 2 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
__ it(pl);
__ asr(pl, o_l, high, temp);
@@ -3103,7 +3313,7 @@
__ Subs(temp, o_h, Operand::From(kArmBitsPerWord));
{
AssemblerAccurateScope guard(GetVIXLAssembler(),
- 3 * kArmInstrMaxSizeInBytes,
+ 2 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
__ it(pl);
__ lsr(pl, o_l, high, temp);
@@ -3220,9 +3430,10 @@
MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize);
GetAssembler()->LoadFromOffset(kLoadWord, temp, tr, QUICK_ENTRY_POINT(pNewEmptyString));
GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, code_offset.Int32Value());
+ // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
AssemblerAccurateScope aas(GetVIXLAssembler(),
- kArmInstrMaxSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
__ blx(lr);
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
@@ -3462,10 +3673,16 @@
addr = temp;
}
__ Bind(&fail);
- // We need a load followed by store. (The address used in a STREX instruction must
- // be the same as the address in the most recently executed LDREX instruction.)
- __ Ldrexd(temp1, temp2, MemOperand(addr));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure the pc position is recorded immediately after the `ldrexd` instruction.
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ // We need a load followed by store. (The address used in a STREX instruction must
+ // be the same as the address in the most recently executed LDREX instruction.)
+ __ ldrexd(temp1, temp2, MemOperand(addr));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
__ Strexd(temp1, value_lo, value_hi, MemOperand(addr));
__ CompareAndBranchIfNonZero(temp1, &fail);
}
@@ -3614,6 +3831,11 @@
// Longs and doubles are handled in the switch.
if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
+ // TODO(VIXL): Here and for other calls to `MaybeRecordImplicitNullCheck` in this method, we
+ // should use a scope and the assembler to emit the store instruction to guarantee that we
+ // record the pc at the correct position. But the `Assembler` does not automatically handle
+ // unencodable offsets. Practically, everything is fine because the helper and VIXL, at the time
+ // of writing, do generate the store instruction last.
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
@@ -3788,7 +4010,6 @@
TODO_VIXL32(FATAL);
} else {
GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(out), base, offset);
- // TODO(VIXL): Scope to guarantee the position immediately after the load.
codegen_->MaybeRecordImplicitNullCheck(instruction);
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
@@ -3825,7 +4046,6 @@
__ Vmov(out_dreg, lo, hi);
} else {
GetAssembler()->LoadDFromOffset(out_dreg, base, offset);
- // TODO(VIXL): Scope to guarantee the position immediately after the load.
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
break;
@@ -3841,6 +4061,11 @@
// double fields, are handled in the previous switch statement.
} else {
// Address cases other than reference and double that may require an implicit null check.
+ // TODO(VIXL): Here and for other calls to `MaybeRecordImplicitNullCheck` in this method, we
+ // should use a scope and the assembler to emit the load instruction to guarantee that we
+ // record the pc at the correct position. But the `Assembler` does not automatically handle
+ // unencodable offsets. Practically, everything is fine because the helper and VIXL, at the time
+ // of writing, do generate the store instruction last.
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
@@ -3965,8 +4190,9 @@
}
UseScratchRegisterScope temps(GetVIXLAssembler());
+ // Ensure the pc position is recorded immediately after the `ldr` instruction.
AssemblerAccurateScope aas(GetVIXLAssembler(),
- kArmInstrMaxSizeInBytes,
+ vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
__ ldr(temps.Acquire(), MemOperand(InputRegisterAt(instruction, 0)));
RecordPcInfo(instruction, instruction->GetDexPc());
@@ -4233,6 +4459,11 @@
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
GetAssembler()->LoadFromOffset(kLoadWord, out, obj, offset);
+ // TODO(VIXL): Here and for other calls to `MaybeRecordImplicitNullCheck` in this method,
+ // we should use a scope and the assembler to emit the load instruction to guarantee that
+ // we record the pc at the correct position. But the `Assembler` does not automatically
+ // handle unencodable offsets. Practically, everything is fine because the helper and
+ // VIXL, at the time of writing, do generate the store instruction last.
codegen_->MaybeRecordImplicitNullCheck(instruction);
// If read barriers are enabled, emit read barriers other than
// Baker's using a slow path (and also unpoison the loaded
@@ -4255,7 +4486,9 @@
}
codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index));
temps.Release(temp);
-
+ // TODO(VIXL): Use a scope to ensure that we record the pc position immediately after the
+ // load instruction. Practically, everything is fine because the helper and VIXL, at the
+ // time of writing, do generate the store instruction last.
codegen_->MaybeRecordImplicitNullCheck(instruction);
// If read barriers are enabled, emit read barriers other than
// Baker's using a slow path (and also unpoison the loaded
@@ -4317,6 +4550,8 @@
// Potential implicit null checks, in the case of reference
// arrays, are handled in the previous switch statement.
} else if (!maybe_compressed_char_at) {
+ // TODO(VIXL): Use a scope to ensure we record the pc info immediately after
+ // the preceding load instruction.
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
}
@@ -4417,6 +4652,8 @@
codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
temps.Release(temp);
}
+ // TODO(VIXL): Use a scope to ensure we record the pc info immediately after the preceding
+ // store instruction.
codegen_->MaybeRecordImplicitNullCheck(instruction);
DCHECK(!needs_write_barrier);
DCHECK(!may_need_runtime_call_for_type_check);
@@ -4451,6 +4688,8 @@
codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
temps.Release(temp);
}
+ // TODO(VIXL): Use a scope to ensure we record the pc info immediately after the preceding
+ // store instruction.
codegen_->MaybeRecordImplicitNullCheck(instruction);
__ B(&done);
__ Bind(&non_zero);
@@ -4464,9 +4703,15 @@
// negative, in which case we would take the ArraySet slow
// path.
- // /* HeapReference<Class> */ temp1 = array->klass_
- GetAssembler()->LoadFromOffset(kLoadWord, temp1, array, class_offset);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ {
+ // Ensure we record the pc position immediately after the `ldr` instruction.
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ // /* HeapReference<Class> */ temp1 = array->klass_
+ __ ldr(temp1, MemOperand(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
GetAssembler()->MaybeUnpoisonHeapReference(temp1);
// /* HeapReference<Class> */ temp1 = temp1->component_type_
@@ -4523,6 +4768,8 @@
}
if (!may_need_runtime_call_for_type_check) {
+ // TODO(VIXL): Ensure we record the pc position immediately after the preceding store
+ // instruction.
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
@@ -4591,6 +4838,8 @@
// Objects are handled in the switch.
if (value_type != Primitive::kPrimNot) {
+ // TODO(VIXL): Ensure we record the pc position immediately after the preceding store
+ // instruction.
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
}
@@ -4606,8 +4855,13 @@
uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
vixl32::Register obj = InputRegisterAt(instruction, 0);
vixl32::Register out = OutputRegister(instruction);
- GetAssembler()->LoadFromOffset(kLoadWord, out, obj, offset);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ {
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ __ ldr(out, MemOperand(obj, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
// Mask out compression flag from String's array length.
if (mirror::kUseStringCompression && instruction->IsStringLength()) {
__ Lsr(out, out, 1u);
@@ -4985,12 +5239,37 @@
TODO_VIXL32(FATAL);
}
-// Check if the desired_class_load_kind is supported. If it is, return it,
-// otherwise return a fall-back kind that should be used instead.
HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind ATTRIBUTE_UNUSED) {
- // TODO(VIXL): Implement optimized code paths.
- return HLoadClass::LoadKind::kDexCacheViaMethod;
+ HLoadClass::LoadKind desired_class_load_kind) {
+ switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kReferrersClass:
+ break;
+ case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
+ // TODO(VIXL): Enable it back when literal pools are fixed in VIXL.
+ return HLoadClass::LoadKind::kDexCacheViaMethod;
+ case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ DCHECK(GetCompilerOptions().GetCompilePic());
+ break;
+ case HLoadClass::LoadKind::kBootImageAddress:
+ // TODO(VIXL): Enable it back when literal pools are fixed in VIXL.
+ return HLoadClass::LoadKind::kDexCacheViaMethod;
+ case HLoadClass::LoadKind::kDexCacheAddress:
+ // TODO(VIXL): Enable it back when literal pools are fixed in VIXL.
+ return HLoadClass::LoadKind::kDexCacheViaMethod;
+ case HLoadClass::LoadKind::kDexCachePcRelative:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ // We disable pc-relative load when there is an irreducible loop, as the optimization
+ // is incompatible with it.
+ // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
+ // with irreducible loops.
+ if (GetGraph()->HasIrreducibleLoops()) {
+ return HLoadClass::LoadKind::kDexCacheViaMethod;
+ }
+ break;
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ break;
+ }
+ return desired_class_load_kind;
}
void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
@@ -5004,11 +5283,15 @@
return;
}
- // TODO(VIXL): read barrier code.
- LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
+ const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
+ TODO_VIXL32(FATAL);
+ }
+
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
load_kind == HLoadClass::LoadKind::kDexCacheViaMethod ||
@@ -5030,7 +5313,9 @@
Location out_loc = locations->Out();
vixl32::Register out = OutputRegister(cls);
- // TODO(VIXL): read barrier code.
+ const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+ ? kWithoutReadBarrier
+ : kCompilerReadBarrierOption;
bool generate_null_check = false;
switch (cls->GetLoadKind()) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -5042,7 +5327,35 @@
out_loc,
current_method,
ArtMethod::DeclaringClassOffset().Int32Value(),
- kEmitCompilerReadBarrier);
+ read_barrier_option);
+ break;
+ }
+ case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
+ TODO_VIXL32(FATAL);
+ break;
+ }
+ case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ break;
+ }
+ case HLoadClass::LoadKind::kBootImageAddress: {
+ TODO_VIXL32(FATAL);
+ break;
+ }
+ case HLoadClass::LoadKind::kDexCacheAddress: {
+ TODO_VIXL32(FATAL);
+ break;
+ }
+ case HLoadClass::LoadKind::kDexCachePcRelative: {
+ vixl32::Register base_reg = InputRegisterAt(cls, 0);
+ HArmDexCacheArraysBase* base = cls->InputAt(0)->AsArmDexCacheArraysBase();
+ int32_t offset = cls->GetDexCacheElementOffset() - base->GetElementOffset();
+ // /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
+ GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, read_barrier_option);
+ generate_null_check = !cls->IsInDexCache();
break;
}
case HLoadClass::LoadKind::kDexCacheViaMethod: {
@@ -5054,7 +5367,7 @@
GetAssembler()->LoadFromOffset(kLoadWord, out, current_method, resolved_types_offset);
// /* GcRoot<mirror::Class> */ out = out[type_index]
size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset, kEmitCompilerReadBarrier);
+ GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
generate_null_check = !cls->IsInDexCache();
break;
}
@@ -5114,37 +5427,101 @@
__ Bind(slow_path->GetExitLabel());
}
-// Check if the desired_string_load_kind is supported. If it is, return it,
-// otherwise return a fall-back kind that should be used instead.
HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind ATTRIBUTE_UNUSED) {
- // TODO(VIXL): Implement optimized code paths. For now we always use the simpler fallback code.
- return HLoadString::LoadKind::kDexCacheViaMethod;
+ HLoadString::LoadKind desired_string_load_kind) {
+ switch (desired_string_load_kind) {
+ case HLoadString::LoadKind::kBootImageLinkTimeAddress:
+ // TODO(VIXL): Implement missing optimization.
+ return HLoadString::LoadKind::kDexCacheViaMethod;
+ case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
+ DCHECK(GetCompilerOptions().GetCompilePic());
+ break;
+ case HLoadString::LoadKind::kBootImageAddress:
+ // TODO(VIXL): Implement missing optimization.
+ return HLoadString::LoadKind::kDexCacheViaMethod;
+ case HLoadString::LoadKind::kBssEntry:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ break;
+ case HLoadString::LoadKind::kJitTableAddress:
+ DCHECK(Runtime::Current()->UseJitCompilation());
+ // TODO(VIXL): Implement missing optimization.
+ return HLoadString::LoadKind::kDexCacheViaMethod;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ break;
+ }
+ return desired_string_load_kind;
}
void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
- LocationSummary::CallKind call_kind = load->NeedsEnvironment()
- ? LocationSummary::kCallOnMainOnly
- : LocationSummary::kNoCall;
+ LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
-
- // TODO(VIXL): Implement optimized code paths.
- // See InstructionCodeGeneratorARMVIXL::VisitLoadString.
HLoadString::LoadKind load_kind = load->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
- locations->SetInAt(0, Location::RequiresRegister());
- // TODO(VIXL): Use InvokeRuntimeCallingConventionARMVIXL instead.
locations->SetOut(LocationFrom(r0));
} else {
locations->SetOut(Location::RequiresRegister());
+ if (load_kind == HLoadString::LoadKind::kBssEntry) {
+ if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ // Rely on the pResolveString and/or marking to save everything, including temps.
+ // Note that IP may theoretically be clobbered by saving/restoring the live register
+ // (only one thanks to the custom calling convention), so we request a different temp.
+ locations->AddTemp(Location::RequiresRegister());
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
+ // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
+ // that the the kPrimNot result register is the same as the first argument register.
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
+ } else {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ }
+ }
}
}
void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
- // TODO(VIXL): Implement optimized code paths.
- // We implemented the simplest solution to get first ART tests passing, we deferred the
- // optimized path until later, we should implement it using ARM64 implementation as a
- // reference. The same related to LocationsBuilderARMVIXL::VisitLoadString.
+ LocationSummary* locations = load->GetLocations();
+ Location out_loc = locations->Out();
+ vixl32::Register out = OutputRegister(load);
+ HLoadString::LoadKind load_kind = load->GetLoadKind();
+
+ switch (load_kind) {
+ case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
+ TODO_VIXL32(FATAL);
+ break;
+ }
+ case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ return; // No dex cache slow path.
+ }
+ case HLoadString::LoadKind::kBootImageAddress: {
+ TODO_VIXL32(FATAL);
+ break;
+ }
+ case HLoadString::LoadKind::kBssEntry: {
+ DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
+ vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->EmitMovwMovtPlaceholder(labels, temp);
+ GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
+ LoadStringSlowPathARMVIXL* slow_path =
+ new (GetGraph()->GetArena()) LoadStringSlowPathARMVIXL(load);
+ codegen_->AddSlowPath(slow_path);
+ __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
+ case HLoadString::LoadKind::kJitTableAddress: {
+ TODO_VIXL32(FATAL);
+ break;
+ }
+ default:
+ break;
+ }
// TODO: Re-add the compiler code to do string dex cache lookup again.
DCHECK_EQ(load->GetLoadKind(), HLoadString::LoadKind::kDexCacheViaMethod);
@@ -5999,9 +6376,9 @@
Location root,
vixl32::Register obj,
uint32_t offset,
- bool requires_read_barrier) {
+ ReadBarrierOption read_barrier_option) {
vixl32::Register root_reg = RegisterFrom(root);
- if (requires_read_barrier) {
+ if (read_barrier_option == kWithReadBarrier) {
TODO_VIXL32(FATAL);
} else {
// Plain GC root load with no read barrier.
@@ -6062,15 +6439,51 @@
// Check if the desired_dispatch_info is supported. If it is, return it,
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
- const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info ATTRIBUTE_UNUSED,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ HInvokeStaticOrDirect* invoke) {
// TODO(VIXL): Implement optimized code paths.
- return {
- HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
- HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- 0u,
- 0u
- };
+ if (desired_dispatch_info.method_load_kind ==
+ HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup ||
+ desired_dispatch_info.code_ptr_location ==
+ HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup) {
+ return {
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ 0u,
+ 0u
+ };
+ }
+
+ HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
+ // We disable pc-relative load when there is an irreducible loop, as the optimization
+ // is incompatible with it.
+ // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
+ // with irreducible loops.
+ if (GetGraph()->HasIrreducibleLoops() &&
+ (dispatch_info.method_load_kind ==
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative)) {
+ dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
+ }
+
+ if (dispatch_info.code_ptr_location == HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative) {
+ const DexFile& outer_dex_file = GetGraph()->GetDexFile();
+ if (&outer_dex_file != invoke->GetTargetMethod().dex_file) {
+ // Calls across dex files are more likely to exceed the available BL range,
+ // so use absolute patch with fixup if available and kCallArtMethod otherwise.
+ HInvokeStaticOrDirect::CodePtrLocation code_ptr_location =
+ (desired_dispatch_info.method_load_kind ==
+ HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup)
+ ? HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup
+ : HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+ return HInvokeStaticOrDirect::DispatchInfo {
+ dispatch_info.method_load_kind,
+ code_ptr_location,
+ dispatch_info.method_load_data,
+ 0u
+ };
+ }
+ }
+ return dispatch_info;
}
vixl32::Register CodeGeneratorARMVIXL::GetInvokeStaticOrDirectExtraParameter(
@@ -6101,59 +6514,119 @@
void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp) {
- Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
- vixl32::Register temp_reg = RegisterFrom(temp);
+ // For better instruction scheduling we load the direct code pointer before the method pointer.
+ switch (invoke->GetCodePtrLocation()) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ // LR = code address from literal pool with link-time patch.
+ TODO_VIXL32(FATAL);
+ break;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+ // LR = invoke->GetDirectCodePtr();
+ __ Mov(lr, Operand::From(invoke->GetDirectCodePtr()));
+ break;
+ default:
+ break;
+ }
+ Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
uint32_t offset =
GetThreadOffset<kArmPointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
// temp = thread->string_init_entrypoint
- GetAssembler()->LoadFromOffset(kLoadWord, temp_reg, tr, offset);
+ GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(temp), tr, offset);
+ break;
+ }
+ case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
+ callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
+ __ Mov(RegisterFrom(temp), Operand::From(invoke->GetMethodAddress()));
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
+ TODO_VIXL32(FATAL);
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
+ HArmDexCacheArraysBase* base =
+ invoke->InputAt(invoke->GetSpecialInputIndex())->AsArmDexCacheArraysBase();
+ vixl32::Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke, RegisterFrom(temp));
+ int32_t offset = invoke->GetDexCacheArrayOffset() - base->GetElementOffset();
+ GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(temp), base_reg, offset);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
vixl32::Register method_reg;
+ vixl32::Register reg = RegisterFrom(temp);
if (current_method.IsRegister()) {
method_reg = RegisterFrom(current_method);
} else {
DCHECK(invoke->GetLocations()->Intrinsified());
DCHECK(!current_method.IsValid());
- method_reg = temp_reg;
- GetAssembler()->LoadFromOffset(kLoadWord, temp_reg, sp, kCurrentMethodStackOffset);
+ method_reg = reg;
+ GetAssembler()->LoadFromOffset(kLoadWord, reg, sp, kCurrentMethodStackOffset);
}
// /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
GetAssembler()->LoadFromOffset(
kLoadWord,
- temp_reg,
+ reg,
method_reg,
ArtMethod::DexCacheResolvedMethodsOffset(kArmPointerSize).Int32Value());
// temp = temp[index_in_cache];
// Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
uint32_t index_in_cache = invoke->GetDexMethodIndex();
GetAssembler()->LoadFromOffset(
- kLoadWord, temp_reg, temp_reg, CodeGenerator::GetCachePointerOffset(index_in_cache));
+ kLoadWord, reg, reg, CodeGenerator::GetCachePointerOffset(index_in_cache));
break;
}
- default:
- TODO_VIXL32(FATAL);
}
- // TODO(VIXL): Support `CodePtrLocation` values other than `kCallArtMethod`.
- if (invoke->GetCodePtrLocation() != HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod) {
- TODO_VIXL32(FATAL);
+ switch (invoke->GetCodePtrLocation()) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
+ __ Bl(GetFrameEntryLabel());
+ break;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+ relative_call_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
+ invoke->GetTargetMethod().dex_method_index);
+ {
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ __ bind(&relative_call_patches_.back().label);
+ // Arbitrarily branch to the BL itself, override at link time.
+ __ bl(&relative_call_patches_.back().label);
+ }
+ break;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+ // LR prepared above for better instruction scheduling.
+ // LR()
+ {
+ // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+ __ blx(lr);
+ }
+ break;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
+ // LR = callee_method->entry_point_from_quick_compiled_code_
+ GetAssembler()->LoadFromOffset(
+ kLoadWord,
+ lr,
+ RegisterFrom(callee_method),
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
+ {
+ // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+ // LR()
+ __ blx(lr);
+ }
+ break;
}
- // LR = callee_method->entry_point_from_quick_compiled_code_
- GetAssembler()->LoadFromOffset(
- kLoadWord,
- lr,
- RegisterFrom(callee_method),
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
- // LR()
- __ Blx(lr);
-
DCHECK(!IsLeafMethod());
}
@@ -6169,9 +6642,15 @@
InvokeDexCallingConventionARMVIXL calling_convention;
vixl32::Register receiver = calling_convention.GetRegisterAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- // /* HeapReference<Class> */ temp = receiver->klass_
- GetAssembler()->LoadFromOffset(kLoadWord, temp, receiver, class_offset);
- MaybeRecordImplicitNullCheck(invoke);
+ {
+ // Make sure the pc is recorded immediately after the `ldr` instruction.
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ // /* HeapReference<Class> */ temp = receiver->klass_
+ __ ldr(temp, MemOperand(receiver, class_offset));
+ MaybeRecordImplicitNullCheck(invoke);
+ }
// Instead of simply (possibly) unpoisoning `temp` here, we should
// emit a read barrier for the previous class reference load.
// However this is not required in practice, as this is an
@@ -6188,7 +6667,81 @@
// LR = temp->GetEntryPoint();
GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, entry_point);
// LR();
- __ Blx(lr);
+ // This `blx` *must* be the *last* instruction generated by this stub, so that calls to
+ // `RecordPcInfo()` immediately following record the correct pc. Use a scope to help guarantee
+ // that.
+ // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+ __ blx(lr);
+}
+
+CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeStringPatch(
+ const DexFile& dex_file, uint32_t string_index) {
+ return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+}
+
+CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeTypePatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
+}
+
+CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeDexCacheArrayPatch(
+ const DexFile& dex_file, uint32_t element_offset) {
+ return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
+}
+
+CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativePatch(
+ const DexFile& dex_file, uint32_t offset_or_index, ArenaDeque<PcRelativePatchInfo>* patches) {
+ patches->emplace_back(dex_file, offset_or_index);
+ return &patches->back();
+}
+
+template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+inline void CodeGeneratorARMVIXL::EmitPcRelativeLinkerPatches(
+ const ArenaDeque<PcRelativePatchInfo>& infos,
+ ArenaVector<LinkerPatch>* linker_patches) {
+ for (const PcRelativePatchInfo& info : infos) {
+ const DexFile& dex_file = info.target_dex_file;
+ size_t offset_or_index = info.offset_or_index;
+ DCHECK(info.add_pc_label.IsBound());
+ uint32_t add_pc_offset = dchecked_integral_cast<uint32_t>(info.add_pc_label.GetLocation());
+ // Add MOVW patch.
+ DCHECK(info.movw_label.IsBound());
+ uint32_t movw_offset = dchecked_integral_cast<uint32_t>(info.movw_label.GetLocation());
+ linker_patches->push_back(Factory(movw_offset, &dex_file, add_pc_offset, offset_or_index));
+ // Add MOVT patch.
+ DCHECK(info.movt_label.IsBound());
+ uint32_t movt_offset = dchecked_integral_cast<uint32_t>(info.movt_label.GetLocation());
+ linker_patches->push_back(Factory(movt_offset, &dex_file, add_pc_offset, offset_or_index));
+ }
+}
+
+void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
+ DCHECK(linker_patches->empty());
+ size_t size =
+ relative_call_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * pc_relative_dex_cache_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size();
+ linker_patches->reserve(size);
+ for (const PatchInfo<vixl32::Label>& info : relative_call_patches_) {
+ uint32_t literal_offset = info.label.GetLocation();
+ linker_patches->push_back(
+ LinkerPatch::RelativeCodePatch(literal_offset, &info.dex_file, info.index));
+ }
+ EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
+ linker_patches);
+ if (!GetCompilerOptions().IsBootImage()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
+ linker_patches);
+ } else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
+ linker_patches);
+ }
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
}
void LocationsBuilderARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
@@ -6315,6 +6868,17 @@
jump_table->EmitTable(codegen_);
}
}
+void LocationsBuilderARMVIXL::VisitArmDexCacheArraysBase(HArmDexCacheArraysBase* base) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(base);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitArmDexCacheArraysBase(HArmDexCacheArraysBase* base) {
+ vixl32::Register base_reg = OutputRegister(base);
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ codegen_->NewPcRelativeDexCacheArrayPatch(base->GetDexFile(), base->GetElementOffset());
+ codegen_->EmitMovwMovtPlaceholder(labels, base_reg);
+}
// Copy the result of a call into the given target.
void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, Primitive::Type type) {
@@ -6325,7 +6889,7 @@
DCHECK_NE(type, Primitive::kPrimVoid);
- Location return_loc = InvokeDexCallingConventionVisitorARM().GetReturnLocation(type);
+ Location return_loc = InvokeDexCallingConventionVisitorARMVIXL().GetReturnLocation(type);
if (return_loc.Equals(trg)) {
return;
}
@@ -6373,6 +6937,21 @@
}
}
+void CodeGeneratorARMVIXL::EmitMovwMovtPlaceholder(
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels,
+ vixl32::Register out) {
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ 3 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ // TODO(VIXL): Think about using mov instead of movw.
+ __ bind(&labels->movw_label);
+ __ movw(out, /* placeholder */ 0u);
+ __ bind(&labels->movt_label);
+ __ movt(out, /* placeholder */ 0u);
+ __ bind(&labels->add_pc_label);
+ __ add(out, out, pc);
+}
+
#undef __
#undef QUICK_ENTRY_POINT
#undef TODO_VIXL32
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index bd91127..b7ba8dd 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -17,9 +17,15 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
-#include "code_generator_arm.h"
+#include "base/enums.h"
+#include "code_generator.h"
#include "common_arm.h"
+#include "driver/compiler_options.h"
+#include "nodes.h"
+#include "string_reference.h"
+#include "parallel_move_resolver.h"
#include "utils/arm/assembler_arm_vixl.h"
+#include "utils/type_reference.h"
// TODO(VIXL): make vixl clean wrt -Wshadow.
#pragma GCC diagnostic push
@@ -44,7 +50,7 @@
vixl::aarch32::r2,
vixl::aarch32::r3
};
-static const size_t kParameterCoreRegistersLengthVIXL = arraysize(kParameterCoreRegisters);
+static const size_t kParameterCoreRegistersLengthVIXL = arraysize(kParameterCoreRegistersVIXL);
static const vixl::aarch32::SRegister kParameterFpuRegistersVIXL[] = {
vixl::aarch32::s0,
vixl::aarch32::s1,
@@ -63,7 +69,7 @@
vixl::aarch32::s14,
vixl::aarch32::s15
};
-static const size_t kParameterFpuRegistersLengthVIXL = arraysize(kParameterFpuRegisters);
+static const size_t kParameterFpuRegistersLengthVIXL = arraysize(kParameterFpuRegistersVIXL);
static const vixl::aarch32::Register kMethodRegister = vixl::aarch32::r0;
@@ -90,7 +96,7 @@
vixl::aarch32::r3
};
static const size_t kRuntimeParameterCoreRegistersLengthVIXL =
- arraysize(kRuntimeParameterCoreRegisters);
+ arraysize(kRuntimeParameterCoreRegistersVIXL);
static const vixl::aarch32::SRegister kRuntimeParameterFpuRegistersVIXL[] = {
vixl::aarch32::s0,
vixl::aarch32::s1,
@@ -98,98 +104,10 @@
vixl::aarch32::s3
};
static const size_t kRuntimeParameterFpuRegistersLengthVIXL =
- arraysize(kRuntimeParameterFpuRegisters);
+ arraysize(kRuntimeParameterFpuRegistersVIXL);
class LoadClassSlowPathARMVIXL;
-#define FOR_EACH_IMPLEMENTED_INSTRUCTION(M) \
- M(Above) \
- M(AboveOrEqual) \
- M(Add) \
- M(And) \
- M(ArrayGet) \
- M(ArrayLength) \
- M(ArraySet) \
- M(Below) \
- M(BelowOrEqual) \
- M(BitwiseNegatedRight) \
- M(BooleanNot) \
- M(BoundsCheck) \
- M(BoundType) \
- M(CheckCast) \
- M(ClassTableGet) \
- M(ClearException) \
- M(ClinitCheck) \
- M(Compare) \
- M(CurrentMethod) \
- M(Deoptimize) \
- M(Div) \
- M(DivZeroCheck) \
- M(DoubleConstant) \
- M(Equal) \
- M(Exit) \
- M(FloatConstant) \
- M(Goto) \
- M(GreaterThan) \
- M(GreaterThanOrEqual) \
- M(If) \
- M(InstanceFieldGet) \
- M(InstanceFieldSet) \
- M(InstanceOf) \
- M(IntConstant) \
- M(IntermediateAddress) \
- M(InvokeInterface) \
- M(InvokeStaticOrDirect) \
- M(InvokeUnresolved) \
- M(InvokeVirtual) \
- M(LessThan) \
- M(LessThanOrEqual) \
- M(LoadClass) \
- M(LoadException) \
- M(LoadString) \
- M(LongConstant) \
- M(MemoryBarrier) \
- M(MonitorOperation) \
- M(Mul) \
- M(MultiplyAccumulate) \
- M(NativeDebugInfo) \
- M(Neg) \
- M(NewArray) \
- M(NewInstance) \
- M(Not) \
- M(NotEqual) \
- M(NullCheck) \
- M(NullConstant) \
- M(Or) \
- M(PackedSwitch) \
- M(ParallelMove) \
- M(ParameterValue) \
- M(Phi) \
- M(Rem) \
- M(Return) \
- M(ReturnVoid) \
- M(Ror) \
- M(Select) \
- M(Shl) \
- M(Shr) \
- M(StaticFieldGet) \
- M(StaticFieldSet) \
- M(Sub) \
- M(SuspendCheck) \
- M(Throw) \
- M(TryBoundary) \
- M(TypeConversion) \
- M(UnresolvedInstanceFieldGet) \
- M(UnresolvedInstanceFieldSet) \
- M(UnresolvedStaticFieldGet) \
- M(UnresolvedStaticFieldSet) \
- M(UShr) \
- M(Xor) \
-
-// TODO: Remove once the VIXL32 backend is implemented completely.
-#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
- M(ArmDexCacheArraysBase) \
-
class CodeGeneratorARMVIXL;
class JumpTableARMVIXL : public DeletableArenaObject<kArenaAllocSwitchTable> {
@@ -248,6 +166,22 @@
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionARMVIXL);
};
+class InvokeDexCallingConventionVisitorARMVIXL : public InvokeDexCallingConventionVisitor {
+ public:
+ InvokeDexCallingConventionVisitorARMVIXL() {}
+ virtual ~InvokeDexCallingConventionVisitorARMVIXL() {}
+
+ Location GetNextLocation(Primitive::Type type) OVERRIDE;
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE;
+ Location GetMethodLocation() const OVERRIDE;
+
+ private:
+ InvokeDexCallingConventionARMVIXL calling_convention;
+ uint32_t double_index_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARMVIXL);
+};
+
class FieldAccessCallingConventionARMVIXL : public FieldAccessCallingConvention {
public:
FieldAccessCallingConventionARMVIXL() {}
@@ -319,27 +253,26 @@
DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARMVIXL);
};
-#define DEFINE_IMPLEMENTED_INSTRUCTION_VISITOR(Name) \
- void Visit##Name(H##Name*) OVERRIDE;
-
-#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITOR(Name) \
- void Visit##Name(H##Name* instr) OVERRIDE { \
- VisitUnimplemementedInstruction(instr); }
-
class LocationsBuilderARMVIXL : public HGraphVisitor {
public:
LocationsBuilderARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen)
: HGraphVisitor(graph), codegen_(codegen) {}
- FOR_EACH_IMPLEMENTED_INSTRUCTION(DEFINE_IMPLEMENTED_INSTRUCTION_VISITOR)
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ void Visit##name(H##name* instr) OVERRIDE;
- FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITOR)
+ FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
- private:
- void VisitUnimplemementedInstruction(HInstruction* instruction) {
- LOG(FATAL) << "Unimplemented Instruction: " << instruction->DebugName();
+#undef DECLARE_VISIT_INSTRUCTION
+
+ void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
+ << " (id " << instruction->GetId() << ")";
}
+ private:
void HandleInvoke(HInvoke* invoke);
void HandleBitwiseOperation(HBinaryOperation* operation, Opcode opcode);
void HandleCondition(HCondition* condition);
@@ -355,7 +288,7 @@
bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode, SetCc set_cc = kCcDontCare);
CodeGeneratorARMVIXL* const codegen_;
- InvokeDexCallingConventionVisitorARM parameter_visitor_;
+ InvokeDexCallingConventionVisitorARMVIXL parameter_visitor_;
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARMVIXL);
};
@@ -364,25 +297,30 @@
public:
InstructionCodeGeneratorARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen);
- FOR_EACH_IMPLEMENTED_INSTRUCTION(DEFINE_IMPLEMENTED_INSTRUCTION_VISITOR)
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ void Visit##name(H##name* instr) OVERRIDE;
- FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITOR)
+ FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
+ << " (id " << instruction->GetId() << ")";
+ }
ArmVIXLAssembler* GetAssembler() const { return assembler_; }
ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
private:
- void VisitUnimplemementedInstruction(HInstruction* instruction) {
- LOG(FATAL) << "Unimplemented Instruction: " << instruction->DebugName();
- }
-
// Generate code for the given suspend check. If not null, `successor`
// is the block to branch to if the suspend check is not needed, and after
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
void GenerateClassInitializationCheck(LoadClassSlowPathARMVIXL* slow_path,
vixl32::Register class_reg);
- void HandleGoto(HInstruction* got, HBasicBlock* successor);
void GenerateAndConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
void GenerateOrrConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
void GenerateEorConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
@@ -440,17 +378,16 @@
uint32_t offset,
Location maybe_temp,
ReadBarrierOption read_barrier_option);
-
// Generate a GC root reference load:
//
// root <- *(obj + offset)
//
- // while honoring read barriers if `requires_read_barrier` is true.
+ // while honoring read barriers based on read_barrier_option.
void GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
vixl::aarch32::Register obj,
uint32_t offset,
- bool requires_read_barrier);
+ ReadBarrierOption read_barrier_option);
void GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
vixl::aarch32::Label* true_target,
@@ -470,6 +407,7 @@
void DivRemByPowerOfTwo(HBinaryOperation* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateDivRemConstantIntegral(HBinaryOperation* instruction);
+ void HandleGoto(HInstruction* got, HBasicBlock* successor);
ArmVIXLAssembler* const assembler_;
CodeGeneratorARMVIXL* const codegen_;
@@ -483,62 +421,50 @@
const ArmInstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats = nullptr);
-
virtual ~CodeGeneratorARMVIXL() {}
- void Initialize() OVERRIDE {
- block_labels_.resize(GetGraph()->GetBlocks().size());
- }
-
void GenerateFrameEntry() OVERRIDE;
void GenerateFrameExit() OVERRIDE;
-
void Bind(HBasicBlock* block) OVERRIDE;
-
- vixl::aarch32::Label* GetLabelOf(HBasicBlock* block) {
- block = FirstNonEmptyBlock(block);
- return &(block_labels_[block->GetBlockId()]);
- }
-
void MoveConstant(Location destination, int32_t value) OVERRIDE;
void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+
+ size_t GetWordSize() const OVERRIDE {
+ return static_cast<size_t>(kArmPointerSize);
+ }
+
+ size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return vixl::aarch32::kRegSizeInBytes; }
+
+ HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+
ArmVIXLAssembler* GetAssembler() OVERRIDE { return &assembler_; }
const ArmVIXLAssembler& GetAssembler() const OVERRIDE { return assembler_; }
ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
- size_t GetWordSize() const OVERRIDE { return kArmWordSize; }
-
- size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return vixl::aarch32::kRegSizeInBytes; }
-
uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
vixl::aarch32::Label* block_entry_label = GetLabelOf(block);
DCHECK(block_entry_label->IsBound());
return block_entry_label->GetLocation();
}
- JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
- jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARMVIXL(switch_instr));
- return jump_tables_.back().get();
- }
-
- HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
-
- HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
-
void FixJumpTables();
- void GenerateMemoryBarrier(MemBarrierKind kind);
- void Finalize(CodeAllocator* allocator) OVERRIDE;
void SetupBlockedRegisters() const OVERRIDE;
void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kThumb2; }
-
// Helper method to move a 32-bit value between two locations.
void Move32(Location destination, Location source);
@@ -553,45 +479,6 @@
vixl::aarch32::Register reg_index,
vixl::aarch32::Condition cond = vixl::aarch32::al);
- const ArmInstructionSetFeatures& GetInstructionSetFeatures() const { return isa_features_; }
-
- vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; }
-
- // Saves the register in the stack. Returns the size taken on stack.
- size_t SaveCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
- uint32_t reg_id ATTRIBUTE_UNUSED) OVERRIDE {
- UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
- return 0;
- }
-
- // Restores the register from the stack. Returns the size taken on stack.
- size_t RestoreCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
- uint32_t reg_id ATTRIBUTE_UNUSED) OVERRIDE {
- UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
- return 0;
- }
-
- size_t SaveFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
- uint32_t reg_id ATTRIBUTE_UNUSED) OVERRIDE {
- UNIMPLEMENTED(INFO) << "TODO: SaveFloatingPointRegister";
- return 0;
- }
-
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-
- bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE {
- return type == Primitive::kPrimDouble || type == Primitive::kPrimLong;
- }
-
- void ComputeSpillMask() OVERRIDE;
-
- void GenerateImplicitNullCheck(HNullCheck* null_check) OVERRIDE;
- void GenerateExplicitNullCheck(HNullCheck* null_check) OVERRIDE;
-
- ParallelMoveResolver* GetMoveResolver() OVERRIDE {
- return &move_resolver_;
- }
-
// Generate code to invoke a runtime entry point.
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
@@ -604,8 +491,6 @@
HInstruction* instruction,
SlowPathCode* slow_path);
- void GenerateInvokeRuntime(int32_t entry_point_offset);
-
// Emit a write barrier.
void MarkGCCard(vixl::aarch32::Register temp,
vixl::aarch32::Register card,
@@ -613,6 +498,76 @@
vixl::aarch32::Register value,
bool can_be_null);
+ void GenerateMemoryBarrier(MemBarrierKind kind);
+
+ vixl::aarch32::Label* GetLabelOf(HBasicBlock* block) {
+ block = FirstNonEmptyBlock(block);
+ return &(block_labels_[block->GetBlockId()]);
+ }
+
+ void Initialize() OVERRIDE {
+ block_labels_.resize(GetGraph()->GetBlocks().size());
+ }
+
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+
+ const ArmInstructionSetFeatures& GetInstructionSetFeatures() const { return isa_features_; }
+
+ bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE {
+ return type == Primitive::kPrimDouble || type == Primitive::kPrimLong;
+ }
+
+ void ComputeSpillMask() OVERRIDE;
+
+ vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; }
+
+ // Check if the desired_string_load_kind is supported. If it is, return it,
+ // otherwise return a fall-back kind that should be used instead.
+ HLoadString::LoadKind GetSupportedLoadStringKind(
+ HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+
+ // Check if the desired_class_load_kind is supported. If it is, return it,
+ // otherwise return a fall-back kind that should be used instead.
+ HLoadClass::LoadKind GetSupportedLoadClassKind(
+ HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ HInvokeStaticOrDirect* invoke) OVERRIDE;
+
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
+ void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
+
+ void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
+
+ // The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays
+ // and boot image strings/types. The only difference is the interpretation of the
+ // offset_or_index. The PC-relative address is loaded with three instructions,
+ // MOVW+MOVT to load the offset to base_reg and then ADD base_reg, PC. The offset
+ // is calculated from the ADD's effective PC, i.e. PC+4 on Thumb2. Though we
+ // currently emit these 3 instructions together, instruction scheduling could
+ // split this sequence apart, so we keep separate labels for each of them.
+ struct PcRelativePatchInfo {
+ PcRelativePatchInfo(const DexFile& dex_file, uint32_t off_or_idx)
+ : target_dex_file(dex_file), offset_or_index(off_or_idx) { }
+ PcRelativePatchInfo(PcRelativePatchInfo&& other) = default;
+
+ const DexFile& target_dex_file;
+ // Either the dex cache array element offset or the string/type index.
+ uint32_t offset_or_index;
+ vixl::aarch32::Label movw_label;
+ vixl::aarch32::Label movt_label;
+ vixl::aarch32::Label add_pc_label;
+ };
+
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
+ uint32_t element_offset);
+ void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -679,33 +634,35 @@
uint32_t offset,
Location index = Location::NoLocation());
- // Check if the desired_string_load_kind is supported. If it is, return it,
- // otherwise return a fall-back kind that should be used instead.
- HLoadString::LoadKind GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
-
- // Check if the desired_class_load_kind is supported. If it is, return it,
- // otherwise return a fall-back kind that should be used instead.
- HLoadClass::LoadKind GetSupportedLoadClassKind(
- HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
-
- // Check if the desired_dispatch_info is supported. If it is, return it,
- // otherwise return a fall-back info that should be used instead.
- HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
- const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) OVERRIDE;
-
- void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
- void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
-
- void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
-
void GenerateNop() OVERRIDE;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+
+ JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
+ jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARMVIXL(switch_instr));
+ return jump_tables_.back().get();
+ }
+ void EmitJumpTables();
+
+ void EmitMovwMovtPlaceholder(CodeGeneratorARMVIXL::PcRelativePatchInfo* labels,
+ vixl::aarch32::Register out);
+
private:
vixl::aarch32::Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
vixl::aarch32::Register temp);
+ using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, vixl::aarch32::Literal<uint32_t>*>;
+ using MethodToLiteralMap =
+ ArenaSafeMap<MethodReference, vixl::aarch32::Literal<uint32_t>*, MethodReferenceComparator>;
+
+ PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
+ uint32_t offset_or_index,
+ ArenaDeque<PcRelativePatchInfo>* patches);
+ template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+ static void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
+ ArenaVector<LinkerPatch>* linker_patches);
+
// Labels for each block that will be compiled.
// We use a deque so that the `vixl::aarch32::Label` objects do not move in memory.
ArenaDeque<vixl::aarch32::Label> block_labels_; // Indexed by block id.
@@ -719,15 +676,19 @@
ArmVIXLAssembler assembler_;
const ArmInstructionSetFeatures& isa_features_;
+ // Relative call patch info.
+ // Using ArenaDeque<> which retains element addresses on push/emplace_back().
+ ArenaDeque<PatchInfo<vixl::aarch32::Label>> relative_call_patches_;
+ // PC-relative patch info for each HArmDexCacheArraysBase.
+ ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
+ // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
+ ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
+ // PC-relative type patch info.
+ ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARMVIXL);
};
-#undef FOR_EACH_IMPLEMENTED_INSTRUCTION
-#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
-#undef DEFINE_IMPLEMENTED_INSTRUCTION_VISITOR
-#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITOR
-
-
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 572d900..61dabfa 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -4688,6 +4688,16 @@
}
}
+void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(
+ HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
+ // TODO: to be implemented.
+}
+
+void InstructionCodeGeneratorMIPS::VisitShouldDeoptimizeFlag(
+ HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
+ // TODO: to be implemented.
+}
+
void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
CanMoveConditionally(select, codegen_->GetInstructionSetFeatures().IsR6(), locations);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index b5e9871..b1f9b1d 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2636,6 +2636,16 @@
/* false_target */ nullptr);
}
+void LocationsBuilderMIPS64::VisitShouldDeoptimizeFlag(
+ HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
+ // TODO: to be implemented.
+}
+
+void InstructionCodeGeneratorMIPS64::VisitShouldDeoptimizeFlag(
+ HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
+ // TODO: to be implemented.
+}
+
void LocationsBuilderMIPS64::VisitSelect(HSelect* select) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
if (Primitive::IsFloatingPointType(select->GetType())) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 12aa03c..d6e92cc 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1059,6 +1059,11 @@
}
}
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ movl(Address(ESP, -kShouldDeoptimizeFlagSize), Immediate(0));
+ }
+
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ subl(ESP, Immediate(adjust));
__ cfi().AdjustCFAOffset(adjust);
@@ -1676,6 +1681,17 @@
/* false_target */ nullptr);
}
+void LocationsBuilderX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ __ movl(flag->GetLocations()->Out().AsRegister<Register>(),
+ Address(ESP, codegen_->GetStackOffsetOfShouldDeoptimizeFlag()));
+}
+
static bool SelectCanUseCMOV(HSelect* select) {
// There are no conditional move instructions for XMMs.
if (Primitive::IsFloatingPointType(select->GetType())) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 22f7f6b..4474dec 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1326,6 +1326,12 @@
}
}
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ movl(Address(CpuRegister(RSP), xmm_spill_location - kShouldDeoptimizeFlagSize),
+ Immediate(0));
+ }
+
// Save the current method if we need it. Note that we do not
// do this in HCurrentMethod, as the instruction might have been removed
// in the SSA graph.
@@ -1747,6 +1753,17 @@
/* false_target */ nullptr);
}
+void LocationsBuilderX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ __ movl(flag->GetLocations()->Out().AsRegister<CpuRegister>(),
+ Address(CpuRegister(RSP), codegen_->GetStackOffsetOfShouldDeoptimizeFlag()));
+}
+
static bool SelectCanUseCMOV(HSelect* select) {
// There are no conditional move instructions for XMMs.
if (Primitive::IsFloatingPointType(select->GetType())) {
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
index d3623f1..eabdbad 100644
--- a/compiler/optimizing/common_arm.h
+++ b/compiler/optimizing/common_arm.h
@@ -17,6 +17,11 @@
#ifndef ART_COMPILER_OPTIMIZING_COMMON_ARM_H_
#define ART_COMPILER_OPTIMIZING_COMMON_ARM_H_
+#include "debug/dwarf/register.h"
+#include "locations.h"
+#include "nodes.h"
+#include "utils/arm/constants_arm.h"
+
// TODO(VIXL): Make VIXL compile with -Wshadow.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.cc b/compiler/optimizing/dex_cache_array_fixups_arm.cc
index 82b8123..10a36c6 100644
--- a/compiler/optimizing/dex_cache_array_fixups_arm.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.cc
@@ -17,12 +17,24 @@
#include "dex_cache_array_fixups_arm.h"
#include "base/arena_containers.h"
+#ifdef ART_USE_VIXL_ARM_BACKEND
+#include "code_generator_arm_vixl.h"
+#include "intrinsics_arm_vixl.h"
+#else
#include "code_generator_arm.h"
#include "intrinsics_arm.h"
+#endif
#include "utils/dex_cache_arrays_layout-inl.h"
namespace art {
namespace arm {
+#ifdef ART_USE_VIXL_ARM_BACKEND
+typedef CodeGeneratorARMVIXL CodeGeneratorARMType;
+typedef IntrinsicLocationsBuilderARMVIXL IntrinsicLocationsBuilderARMType;
+#else
+typedef CodeGeneratorARM CodeGeneratorARMType;
+typedef IntrinsicLocationsBuilderARM IntrinsicLocationsBuilderARMType;
+#endif
/**
* Finds instructions that need the dex cache arrays base as an input.
@@ -31,7 +43,7 @@
public:
DexCacheArrayFixupsVisitor(HGraph* graph, CodeGenerator* codegen)
: HGraphVisitor(graph),
- codegen_(down_cast<CodeGeneratorARM*>(codegen)),
+ codegen_(down_cast<CodeGeneratorARMType*>(codegen)),
dex_cache_array_bases_(std::less<const DexFile*>(),
// Attribute memory use to code generator.
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {}
@@ -66,7 +78,7 @@
// If this is an invoke with PC-relative access to the dex cache methods array,
// we need to add the dex cache arrays base as the special input.
if (invoke->HasPcRelativeDexCache() &&
- !IsCallFreeIntrinsic<IntrinsicLocationsBuilderARM>(invoke, codegen_)) {
+ !IsCallFreeIntrinsic<IntrinsicLocationsBuilderARMType>(invoke, codegen_)) {
HArmDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(invoke->GetDexFile());
// Update the element offset in base.
DexCacheArraysLayout layout(kArmPointerSize, &invoke->GetDexFile());
@@ -94,7 +106,7 @@
return base;
}
- CodeGeneratorARM* codegen_;
+ CodeGeneratorARMType* codegen_;
using DexCacheArraysBaseMap =
ArenaSafeMap<const DexFile*, HArmDexCacheArraysBase*, std::less<const DexFile*>>;
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index c8cba20..188ee3a 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -23,7 +23,6 @@
#include "base/arena_containers.h"
#include "base/bit_vector-inl.h"
#include "base/stringprintf.h"
-#include "handle_scope-inl.h"
namespace art {
@@ -448,7 +447,6 @@
// Ensure that reference type instructions have reference type info.
if (instruction->GetType() == Primitive::kPrimNot) {
- ScopedObjectAccess soa(Thread::Current());
if (!instruction->GetReferenceTypeInfo().IsValid()) {
AddError(StringPrintf("Reference type instruction %s:%d does not have "
"valid reference type information.",
@@ -1011,7 +1009,6 @@
void GraphChecker::VisitBoundType(HBoundType* instruction) {
VisitInstruction(instruction);
- ScopedObjectAccess soa(Thread::Current());
if (!instruction->GetUpperBound().IsValid()) {
AddError(StringPrintf(
"%s %d does not have a valid upper bound RTI.",
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 01e89bb..8d93867 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -292,6 +292,21 @@
classes->Get(InlineCache::kIndividualCacheSize - 1) == nullptr;
}
+ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
+ if (!resolved_method->HasSingleImplementation()) {
+ return nullptr;
+ }
+ if (Runtime::Current()->IsAotCompiler()) {
+ // No CHA-based devirtulization for AOT compiler (yet).
+ return nullptr;
+ }
+ if (outermost_graph_->IsCompilingOsr()) {
+ // We do not support HDeoptimize in OSR methods.
+ return nullptr;
+ }
+ return resolved_method->GetSingleImplementation();
+}
+
bool HInliner::TryInline(HInvoke* invoke_instruction) {
if (invoke_instruction->IsInvokeUnresolved()) {
return false; // Don't bother to move further if we know the method is unresolved.
@@ -317,10 +332,29 @@
actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method);
}
+ bool cha_devirtualize = false;
+ if (actual_method == nullptr) {
+ ArtMethod* method = TryCHADevirtualization(resolved_method);
+ if (method != nullptr) {
+ cha_devirtualize = true;
+ actual_method = method;
+ }
+ }
+
if (actual_method != nullptr) {
- bool result = TryInlineAndReplace(invoke_instruction, actual_method, /* do_rtp */ true);
+ bool result = TryInlineAndReplace(invoke_instruction,
+ actual_method,
+ /* do_rtp */ true,
+ cha_devirtualize);
if (result && !invoke_instruction->IsInvokeStaticOrDirect()) {
- MaybeRecordStat(kInlinedInvokeVirtualOrInterface);
+ if (cha_devirtualize) {
+ // Add dependency due to devirtulization. We've assumed resolved_method
+ // has single implementation.
+ outermost_graph_->AddCHASingleImplementationDependency(resolved_method);
+ MaybeRecordStat(kCHAInline);
+ } else {
+ MaybeRecordStat(kInlinedInvokeVirtualOrInterface);
+ }
}
return result;
}
@@ -438,7 +472,10 @@
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- if (!TryInlineAndReplace(invoke_instruction, resolved_method, /* do_rtp */ false)) {
+ if (!TryInlineAndReplace(invoke_instruction,
+ resolved_method,
+ /* do_rtp */ false,
+ /* cha_devirtualize */ false)) {
return false;
}
@@ -465,6 +502,25 @@
return true;
}
+void HInliner::AddCHAGuard(HInstruction* invoke_instruction,
+ uint32_t dex_pc,
+ HInstruction* cursor,
+ HBasicBlock* bb_cursor) {
+ HInstruction* deopt_flag = new (graph_->GetArena()) HShouldDeoptimizeFlag(dex_pc);
+ HInstruction* should_deopt = new (graph_->GetArena()) HNotEqual(
+ deopt_flag, graph_->GetIntConstant(0, dex_pc));
+ HInstruction* deopt = new (graph_->GetArena()) HDeoptimize(should_deopt, dex_pc);
+
+ if (cursor != nullptr) {
+ bb_cursor->InsertInstructionAfter(deopt_flag, cursor);
+ } else {
+ bb_cursor->InsertInstructionBefore(deopt_flag, bb_cursor->GetFirstInstruction());
+ }
+ bb_cursor->InsertInstructionAfter(should_deopt, deopt_flag);
+ bb_cursor->InsertInstructionAfter(deopt, should_deopt);
+ deopt->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
+}
+
HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
HInstruction* cursor,
HBasicBlock* bb_cursor,
@@ -787,8 +843,14 @@
return true;
}
-bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction, ArtMethod* method, bool do_rtp) {
+bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
+ ArtMethod* method,
+ bool do_rtp,
+ bool cha_devirtualize) {
HInstruction* return_replacement = nullptr;
+ uint32_t dex_pc = invoke_instruction->GetDexPc();
+ HInstruction* cursor = invoke_instruction->GetPrevious();
+ HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
if (!TryBuildAndInline(invoke_instruction, method, &return_replacement)) {
if (invoke_instruction->IsInvokeInterface()) {
// Turn an invoke-interface into an invoke-virtual. An invoke-virtual is always
@@ -826,6 +888,9 @@
return false;
}
}
+ if (cha_devirtualize) {
+ AddCHAGuard(invoke_instruction, dex_pc, cursor, bb_cursor);
+ }
if (return_replacement != nullptr) {
invoke_instruction->ReplaceWith(return_replacement);
}
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index a2b4fc9..ffebd97 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -62,8 +62,12 @@
// Try to inline `resolved_method` in place of `invoke_instruction`. `do_rtp` is whether
// reference type propagation can run after the inlining. If the inlining is successful, this
- // method will replace and remove the `invoke_instruction`.
- bool TryInlineAndReplace(HInvoke* invoke_instruction, ArtMethod* resolved_method, bool do_rtp)
+ // method will replace and remove the `invoke_instruction`. If `cha_devirtualize` is true,
+ // a CHA guard needs to be added for the inlining.
+ bool TryInlineAndReplace(HInvoke* invoke_instruction,
+ ArtMethod* resolved_method,
+ bool do_rtp,
+ bool cha_devirtualize)
REQUIRES_SHARED(Locks::mutator_lock_);
bool TryBuildAndInline(HInvoke* invoke_instruction,
@@ -118,6 +122,18 @@
Handle<mirror::ObjectArray<mirror::Class>> classes)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Try CHA-based devirtualization to change virtual method calls into
+ // direct calls.
+ // Returns the actual method that resolved_method can be devirtualized to.
+ ArtMethod* TryCHADevirtualization(ArtMethod* resolved_method)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Add a CHA guard for a CHA-based devirtualized call. A CHA guard checks a
+ // should_deoptimize flag and if it's true, does deoptimization.
+ void AddCHAGuard(HInstruction* invoke_instruction,
+ uint32_t dex_pc,
+ HInstruction* cursor,
+ HBasicBlock* bb_cursor);
HInstanceFieldGet* BuildGetReceiverClass(ClassLinker* class_linker,
HInstruction* receiver,
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 9e72447..433dced 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -71,7 +71,7 @@
: SlowPathCodeARMVIXL(invoke), invoke_(invoke) {}
Location MoveArguments(CodeGenerator* codegen) {
- InvokeDexCallingConventionVisitorARM calling_convention_visitor;
+ InvokeDexCallingConventionVisitorARMVIXL calling_convention_visitor;
IntrinsicVisitor::MoveArguments(invoke_, codegen, &calling_convention_visitor);
return calling_convention_visitor.GetMethodLocation();
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 680381a..594255c 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2543,6 +2543,8 @@
return os << "BssEntry";
case HLoadString::LoadKind::kDexCacheViaMethod:
return os << "DexCacheViaMethod";
+ case HLoadString::LoadKind::kJitTableAddress:
+ return os << "JitTableAddress";
default:
LOG(FATAL) << "Unknown HLoadString::LoadKind: " << static_cast<int>(rhs);
UNREACHABLE();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 7ab04e1..e3f4d8f 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -333,7 +333,8 @@
cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
cached_current_method_(nullptr),
inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
- osr_(osr) {
+ osr_(osr),
+ cha_single_implementation_list_(arena->Adapter(kArenaAllocCHA)) {
blocks_.reserve(kDefaultNumberOfBlocks);
}
@@ -536,6 +537,20 @@
bool IsCompilingOsr() const { return osr_; }
+ ArenaSet<ArtMethod*>& GetCHASingleImplementationList() {
+ return cha_single_implementation_list_;
+ }
+
+ void AddCHASingleImplementationDependency(ArtMethod* method) {
+ cha_single_implementation_list_.insert(method);
+ }
+
+ bool HasShouldDeoptimizeFlag() const {
+ // TODO: if all CHA guards can be eliminated, there is no need for the flag
+ // even if cha_single_implementation_list_ is not empty.
+ return !cha_single_implementation_list_.empty();
+ }
+
bool HasTryCatch() const { return has_try_catch_; }
void SetHasTryCatch(bool value) { has_try_catch_ = value; }
@@ -672,6 +687,9 @@
// compiled code entries which the interpreter can directly jump to.
const bool osr_;
+ // List of methods that are assumed to have single implementation.
+ ArenaSet<ArtMethod*> cha_single_implementation_list_;
+
friend class SsaBuilder; // For caching constants.
friend class SsaLivenessAnalysis; // For the linear order.
friend class HInliner; // For the reverse post order.
@@ -1240,6 +1258,7 @@
M(ClinitCheck, Instruction) \
M(Compare, BinaryOperation) \
M(CurrentMethod, Instruction) \
+ M(ShouldDeoptimizeFlag, Instruction) \
M(Deoptimize, Instruction) \
M(Div, BinaryOperation) \
M(DivZeroCheck, Instruction) \
@@ -2875,6 +2894,27 @@
DISALLOW_COPY_AND_ASSIGN(HDeoptimize);
};
+// Represents a should_deoptimize flag. Currently used for CHA-based devirtualization.
+// The compiled code checks this flag value in a guard before devirtualized call and
+// if it's true, starts to do deoptimization.
+// It has a 4-byte slot on stack.
+// TODO: allocate a register for this flag.
+class HShouldDeoptimizeFlag FINAL : public HExpression<0> {
+ public:
+ // TODO: use SideEffects to aid eliminating some CHA guards.
+ explicit HShouldDeoptimizeFlag(uint32_t dex_pc)
+ : HExpression(Primitive::kPrimInt, SideEffects::None(), dex_pc) {
+ }
+
+ // We don't eliminate CHA guards yet.
+ bool CanBeMoved() const OVERRIDE { return false; }
+
+ DECLARE_INSTRUCTION(ShouldDeoptimizeFlag);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HShouldDeoptimizeFlag);
+};
+
// Represents the ArtMethod that was passed as a first argument to
// the method. It is used by instructions that depend on it, like
// instructions that work with the dex cache.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 2382b72..8ea2b06 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -630,10 +630,8 @@
#if defined(ART_ENABLE_CODEGEN_arm)
case kThumb2:
case kArm: {
-#ifndef ART_USE_VIXL_ARM_BACKEND
arm::DexCacheArrayFixups* fixups =
new (arena) arm::DexCacheArrayFixups(graph, codegen, stats);
-#endif
arm::InstructionSimplifierArm* simplifier =
new (arena) arm::InstructionSimplifierArm(graph, stats);
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
@@ -642,9 +640,7 @@
simplifier,
side_effects,
gvn,
-#ifndef ART_USE_VIXL_ARM_BACKEND
fixups
-#endif
};
RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer);
break;
@@ -1208,7 +1204,9 @@
code_allocator.GetMemory().data(),
code_allocator.GetSize(),
osr,
- roots);
+ roots,
+ codegen->GetGraph()->HasShouldDeoptimizeFlag(),
+ codegen->GetGraph()->GetCHASingleImplementationList());
if (code == nullptr) {
code_cache->ClearData(self, stack_map_data, roots_data);
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index c8d1ce0..203b1ec 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -27,6 +27,7 @@
enum MethodCompilationStat {
kAttemptCompilation = 0,
+ kCHAInline,
kCompiled,
kInlinedInvoke,
kReplacedInvokeWithSimplePattern,
@@ -106,6 +107,7 @@
std::string name;
switch (stat) {
case kAttemptCompilation : name = "AttemptCompilation"; break;
+ case kCHAInline : name = "CHAInline"; break;
case kCompiled : name = "Compiled"; break;
case kInlinedInvoke : name = "InlinedInvoke"; break;
case kReplacedInvokeWithSimplePattern: name = "ReplacedInvokeWithSimplePattern"; break;
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 5991791..59523a9 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -87,6 +87,10 @@
// Adjust the stack slot, now that we know the number of them for each type.
// The way this implementation lays out the stack is the following:
// [parameter slots ]
+ // [art method (caller) ]
+ // [entry spill (core) ]
+ // [entry spill (float) ]
+ // [should_deoptimize flag] (this is optional)
// [catch phi spill slots ]
// [double spill slots ]
// [long spill slots ]
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index aa0d371..9064f86 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -1749,7 +1749,7 @@
bool RegisterAllocatorGraphColor::IsCallerSave(size_t reg, bool processing_core_regs) {
return processing_core_regs
? !codegen_->IsCoreCalleeSaveRegister(reg)
- : !codegen_->IsCoreCalleeSaveRegister(reg);
+ : !codegen_->IsFloatingPointCalleeSaveRegister(reg);
}
static bool RegisterIsAligned(size_t reg) {
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index fb6f172..2d026b8 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -428,8 +428,6 @@
UNIMPLEMENTED(FATAL);
}
-static constexpr uint32_t kArmInstrMaxSizeInBytes = 4;
-
void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
FrameOffset handle_scope_offset,
ManagedRegister min_reg,
@@ -458,14 +456,14 @@
if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
if (!out_reg.Equals(in_reg)) {
AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
- 3 * kArmInstrMaxSizeInBytes,
+ 3 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
___ it(eq, 0xc);
___ mov(eq, out_reg.AsVIXLRegister(), 0);
asm_.AddConstantInIt(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
} else {
AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
- 2 * kArmInstrMaxSizeInBytes,
+ 2 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
___ it(ne, 0x8);
asm_.AddConstantInIt(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
@@ -496,7 +494,7 @@
if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
- 2 * kArmInstrMaxSizeInBytes,
+ 2 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
___ it(ne, 0x8);
asm_.AddConstantInIt(scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
@@ -589,7 +587,7 @@
___ Cmp(scratch.AsVIXLRegister(), 0);
{
AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
- kArmInstrMaxSizeInBytes,
+ vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
___ b(ne, Narrow, exception_blocks_.back()->Entry());
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 91a32f9..264be99 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1855,8 +1855,8 @@
return false;
}
- // VDEX finalized, seek back to the beginning and write the header.
- if (!oat_writers_[i]->WriteVdexHeader(vdex_out.get())) {
+ // VDEX finalized, seek back to the beginning and write checksums and the header.
+ if (!oat_writers_[i]->WriteChecksumsAndVdexHeader(vdex_out.get())) {
LOG(ERROR) << "Failed to write vdex header into VDEX " << vdex_file->GetPath();
return false;
}
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 714a58c..b6b62a8 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -26,6 +26,7 @@
#include "base/stringprintf.h"
#include "dex_file-inl.h"
#include "dex2oat_environment_test.h"
+#include "jit/offline_profiling_info.h"
#include "oat.h"
#include "oat_file.h"
#include "utils.h"
@@ -552,26 +553,6 @@
RunTest(CompilerFilter::kSpeed, true, { "--very-large-app-threshold=100" });
}
-static const char kDexFileLayoutInputProfile[] = "cHJvADAwMgABAAwAAQABAOqMEeFEZXhOb09hdC5qYXIBAAEA";
-
-static void WriteFileBase64(const char* base64, const char* location) {
- // Decode base64.
- CHECK(base64 != nullptr);
- size_t length;
- std::unique_ptr<uint8_t[]> bytes(DecodeBase64(base64, &length));
- CHECK(bytes.get() != nullptr);
-
- // Write to provided file.
- std::unique_ptr<File> file(OS::CreateEmptyFile(location));
- CHECK(file.get() != nullptr);
- if (!file->WriteFully(bytes.get(), length)) {
- PLOG(FATAL) << "Failed to write base64 as file";
- }
- if (file->FlushCloseOrErase() != 0) {
- PLOG(FATAL) << "Could not flush and close test file.";
- }
-}
-
class Dex2oatLayoutTest : public Dex2oatTest {
protected:
void CheckFilter(CompilerFilter::Filter input ATTRIBUTE_UNUSED,
@@ -579,13 +560,34 @@
// Ignore, we'll do our own checks.
}
+ // Emits a profile with a single dex file with the given location and a single class index of 1.
+ void GenerateProfile(const std::string& test_profile,
+ const std::string& dex_location,
+ uint32_t checksum) {
+ int profile_test_fd = open(test_profile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
+ CHECK_GE(profile_test_fd, 0);
+
+ ProfileCompilationInfo info;
+ std::string profile_key = ProfileCompilationInfo::GetProfileDexFileKey(dex_location);
+ info.AddClassIndex(profile_key, checksum, dex::TypeIndex(1));
+ bool result = info.Save(profile_test_fd);
+ close(profile_test_fd);
+ ASSERT_TRUE(result);
+ }
+
void RunTest() {
std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
std::string profile_location = GetScratchDir() + "/primary.prof";
std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
Copy(GetDexSrc2(), dex_location);
- WriteFileBase64(kDexFileLayoutInputProfile, profile_location.c_str());
+ const char* location = dex_location.c_str();
+ std::string error_msg;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ ASSERT_TRUE(DexFile::Open(location, location, true, &error_msg, &dex_files));
+ EXPECT_EQ(dex_files.size(), 1U);
+ std::unique_ptr<const DexFile>& dex_file = dex_files[0];
+ GenerateProfile(profile_location, dex_location, dex_file->GetLocationChecksum());
const std::vector<std::string>& extra_args = { "--profile-file=" + profile_location };
GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kLayoutProfile, extra_args);
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index a1984a7..80c7113 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1053,7 +1053,8 @@
if (options_.absolute_addresses_) {
vios->Stream() << StringPrintf("%p ", oat_method.GetVmapTable());
}
- uint32_t vmap_table_offset = method_header == nullptr ? 0 : method_header->vmap_table_offset_;
+ uint32_t vmap_table_offset = method_header ==
+ nullptr ? 0 : method_header->GetVmapTableOffset();
vios->Stream() << StringPrintf("(offset=0x%08x)\n", vmap_table_offset);
size_t vmap_table_offset_limit =
@@ -1603,7 +1604,7 @@
// Mark dex caches.
dex_caches_.clear();
{
- ReaderMutexLock mu(self, *class_linker->DexLock());
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
ObjPtr<mirror::DexCache> dex_cache =
ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
diff --git a/runtime/Android.bp b/runtime/Android.bp
index c6f479f..08be5b2 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -45,6 +45,7 @@
"base/timing_logger.cc",
"base/unix_file/fd_file.cc",
"base/unix_file/random_access_file_utils.cc",
+ "cha.cc",
"check_jni.cc",
"class_linker.cc",
"class_table.cc",
@@ -514,6 +515,7 @@
"base/transform_iterator_test.cc",
"base/variant_map_test.cc",
"base/unix_file/fd_file_test.cc",
+ "cha_test.cc",
"class_linker_test.cc",
"compiler_filter_test.cc",
"dex_file_test.cc",
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 730a9c3..ef03bb3 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -105,7 +105,7 @@
DoGetAccessFlagsHelper<kReadBarrierOption>(this);
}
}
- return access_flags_;
+ return access_flags_.load(std::memory_order_relaxed);
}
inline uint16_t ArtMethod::GetMethodIndex() {
@@ -452,6 +452,53 @@
return type;
}
+inline bool ArtMethod::HasSingleImplementation() {
+ if (IsFinal() || GetDeclaringClass()->IsFinal()) {
+ // We don't set kAccSingleImplementation for these cases since intrinsic
+ // can use the flag also.
+ return true;
+ }
+ return (GetAccessFlags() & kAccSingleImplementation) != 0;
+}
+
+inline void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
+ DCHECK(IsUint<8>(intrinsic));
+ // Currently we only do intrinsics for static/final methods or methods of final
+ // classes. We don't set kHasSingleImplementation for those methods.
+ DCHECK(IsStatic() || IsFinal() || GetDeclaringClass()->IsFinal()) <<
+ "Potential conflict with kAccSingleImplementation";
+ uint32_t new_value = (GetAccessFlags() & kAccFlagsNotUsedByIntrinsic) |
+ kAccIntrinsic |
+ (intrinsic << POPCOUNT(kAccFlagsNotUsedByIntrinsic));
+ if (kIsDebugBuild) {
+ uint32_t java_flags = (GetAccessFlags() & kAccJavaFlagsMask);
+ bool is_constructor = IsConstructor();
+ bool is_synchronized = IsSynchronized();
+ bool skip_access_checks = SkipAccessChecks();
+ bool is_fast_native = IsFastNative();
+ bool is_copied = IsCopied();
+ bool is_miranda = IsMiranda();
+ bool is_default = IsDefault();
+ bool is_default_conflict = IsDefaultConflicting();
+ bool is_compilable = IsCompilable();
+ bool must_count_locks = MustCountLocks();
+ SetAccessFlags(new_value);
+ DCHECK_EQ(java_flags, (GetAccessFlags() & kAccJavaFlagsMask));
+ DCHECK_EQ(is_constructor, IsConstructor());
+ DCHECK_EQ(is_synchronized, IsSynchronized());
+ DCHECK_EQ(skip_access_checks, SkipAccessChecks());
+ DCHECK_EQ(is_fast_native, IsFastNative());
+ DCHECK_EQ(is_copied, IsCopied());
+ DCHECK_EQ(is_miranda, IsMiranda());
+ DCHECK_EQ(is_default, IsDefault());
+ DCHECK_EQ(is_default_conflict, IsDefaultConflicting());
+ DCHECK_EQ(is_compilable, IsCompilable());
+ DCHECK_EQ(must_count_locks, MustCountLocks());
+ } else {
+ SetAccessFlags(new_value);
+ }
+}
+
template<ReadBarrierOption kReadBarrierOption, typename RootVisitorType>
void ArtMethod::VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) {
if (LIKELY(!declaring_class_.IsNull())) {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index eeece90..96b6f18 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -51,6 +51,17 @@
extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*,
const char*);
+ArtMethod* ArtMethod::GetSingleImplementation() {
+ DCHECK(!IsNative());
+ if (!IsAbstract()) {
+ // A non-abstract's single implementation is itself.
+ return this;
+ }
+ // TODO: add single-implementation logic for abstract method by storing it
+ // in ptr_sized_fields_.
+ return nullptr;
+}
+
ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method) {
ObjPtr<mirror::Executable> executable = soa.Decode<mirror::Executable>(jlr_method);
@@ -68,7 +79,8 @@
DCHECK(ext->GetObsoleteDexCaches() != nullptr);
int32_t len = obsolete_methods->GetLength();
DCHECK_EQ(len, ext->GetObsoleteDexCaches()->GetLength());
- // TODO I think this is fine since images should never have obsolete methods in them.
+ // Using kRuntimePointerSize (instead of using the image's pointer size) is fine since images
+ // should never have obsolete methods in them so they should always be the same.
PointerSize pointer_size = kRuntimePointerSize;
DCHECK_EQ(kRuntimePointerSize, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
for (int32_t i = 0; i < len; i++) {
@@ -345,7 +357,7 @@
CHECK(!IsFastNative()) << PrettyMethod();
CHECK(native_method != nullptr) << PrettyMethod();
if (is_fast) {
- SetAccessFlags(GetAccessFlags() | kAccFastNative);
+ AddAccessFlags(kAccFastNative);
}
SetEntryPointFromJni(native_method);
}
@@ -503,7 +515,7 @@
if (oat_file == nullptr) {
return nullptr;
}
- return oat_file->DexBegin() + header->vmap_table_offset_;
+ return oat_file->DexBegin() + header->GetVmapTableOffset();
} else {
return oat_method.GetVmapTable();
}
@@ -601,7 +613,7 @@
DCHECK(method_header->Contains(pc))
<< PrettyMethod()
<< " " << std::hex << pc << " " << oat_entry_point
- << " " << (uintptr_t)(method_header->code_ + method_header->code_size_);
+ << " " << (uintptr_t)(method_header->GetCode() + method_header->GetCodeSize());
return method_header;
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 00fab65..3bc6f5d 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -85,9 +85,29 @@
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE uint32_t GetAccessFlags();
+ // This version should only be called when it's certain there is no
+ // concurrency so there is no need to guarantee atomicity. For example,
+ // before the method is linked.
void SetAccessFlags(uint32_t new_access_flags) {
- // Not called within a transaction.
- access_flags_ = new_access_flags;
+ access_flags_.store(new_access_flags, std::memory_order_relaxed);
+ }
+
+ // This setter guarantees atomicity.
+ void AddAccessFlags(uint32_t flag) {
+ uint32_t old_access_flags = access_flags_.load(std::memory_order_relaxed);
+ uint32_t new_access_flags;
+ do {
+ new_access_flags = old_access_flags | flag;
+ } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
+ }
+
+ // This setter guarantees atomicity.
+ void ClearAccessFlags(uint32_t flag) {
+ uint32_t old_access_flags = access_flags_.load(std::memory_order_relaxed);
+ uint32_t new_access_flags;
+ do {
+ new_access_flags = old_access_flags & ~flag;
+ } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
}
// Approximate what kind of method call would be used for this method.
@@ -142,39 +162,7 @@
return (GetAccessFlags() & kAccIntrinsic) != 0;
}
- void SetIntrinsic(uint32_t intrinsic) {
- DCHECK(IsUint<8>(intrinsic));
- uint32_t new_value = (GetAccessFlags() & kAccFlagsNotUsedByIntrinsic) |
- kAccIntrinsic |
- (intrinsic << POPCOUNT(kAccFlagsNotUsedByIntrinsic));
- if (kIsDebugBuild) {
- uint32_t java_flags = (GetAccessFlags() & kAccJavaFlagsMask);
- bool is_constructor = IsConstructor();
- bool is_synchronized = IsSynchronized();
- bool skip_access_checks = SkipAccessChecks();
- bool is_fast_native = IsFastNative();
- bool is_copied = IsCopied();
- bool is_miranda = IsMiranda();
- bool is_default = IsDefault();
- bool is_default_conflict = IsDefaultConflicting();
- bool is_compilable = IsCompilable();
- bool must_count_locks = MustCountLocks();
- SetAccessFlags(new_value);
- DCHECK_EQ(java_flags, (GetAccessFlags() & kAccJavaFlagsMask));
- DCHECK_EQ(is_constructor, IsConstructor());
- DCHECK_EQ(is_synchronized, IsSynchronized());
- DCHECK_EQ(skip_access_checks, SkipAccessChecks());
- DCHECK_EQ(is_fast_native, IsFastNative());
- DCHECK_EQ(is_copied, IsCopied());
- DCHECK_EQ(is_miranda, IsMiranda());
- DCHECK_EQ(is_default, IsDefault());
- DCHECK_EQ(is_default_conflict, IsDefaultConflicting());
- DCHECK_EQ(is_compilable, IsCompilable());
- DCHECK_EQ(must_count_locks, MustCountLocks());
- } else {
- SetAccessFlags(new_value);
- }
- }
+ ALWAYS_INLINE void SetIntrinsic(uint32_t intrinsic) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetIntrinsic() {
DCHECK(IsIntrinsic());
@@ -250,6 +238,10 @@
return (GetAccessFlags() & kAccSynthetic) != 0;
}
+ bool IsVarargs() {
+ return (GetAccessFlags() & kAccVarargs) != 0;
+ }
+
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsProxyMethod() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -259,7 +251,7 @@
void SetSkipAccessChecks() {
DCHECK(!SkipAccessChecks());
- SetAccessFlags(GetAccessFlags() | kAccSkipAccessChecks);
+ AddAccessFlags(kAccSkipAccessChecks);
}
// Should this method be run in the interpreter and count locks (e.g., failed structured-
@@ -461,6 +453,26 @@
return DataOffset(kRuntimePointerSize);
}
+ ALWAYS_INLINE bool HasSingleImplementation() REQUIRES_SHARED(Locks::mutator_lock_);
+
+ ALWAYS_INLINE void SetHasSingleImplementation(bool single_impl) {
+ DCHECK(!IsIntrinsic()) << "conflict with intrinsic bits";
+ if (single_impl) {
+ AddAccessFlags(kAccSingleImplementation);
+ } else {
+ ClearAccessFlags(kAccSingleImplementation);
+ }
+ }
+
+ ArtMethod* GetSingleImplementation()
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ ALWAYS_INLINE void SetSingleImplementation(ArtMethod* method, PointerSize pointer_size) {
+ DCHECK(!IsNative());
+ DCHECK(IsAbstract()); // Non-abstract method's single implementation is just itself.
+ SetDataPtrSize(method, pointer_size);
+ }
+
void* GetEntryPointFromJni() {
DCHECK(IsNative());
return GetEntryPointFromJniPtrSize(kRuntimePointerSize);
@@ -655,7 +667,10 @@
GcRoot<mirror::Class> declaring_class_;
// Access flags; low 16 bits are defined by spec.
- uint32_t access_flags_;
+ // Getting and setting this flag needs to be atomic when concurrency is
+ // possible, e.g. after this method's class is linked. Such as when setting
+ // verifier flags and single-implementation flag.
+ std::atomic<std::uint32_t> access_flags_;
/* Dex file fields. The defining dex file is available via declaring_class_->dex_cache_ */
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index 0d842cc..9bcda35 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -80,6 +80,11 @@
return timed_out;
}
+int Barrier::GetCount(Thread* self) {
+ MutexLock mu(self, lock_);
+ return count_;
+}
+
void Barrier::SetCountLocked(Thread* self, int count) {
count_ = count;
if (count == 0) {
diff --git a/runtime/barrier.h b/runtime/barrier.h
index 94977fb..d7c4661 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -61,6 +61,8 @@
// another thread is still in Wait(). See above.
void Init(Thread* self, int count) REQUIRES(!lock_);
+ int GetCount(Thread* self) REQUIRES(!lock_);
+
private:
void SetCountLocked(Thread* self, int count) REQUIRES(lock_);
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 62cd2a7..2feb28a 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -94,6 +94,7 @@
kArenaAllocGraphChecker,
kArenaAllocVerifier,
kArenaAllocCallingConvention,
+ kArenaAllocCHA,
kNumArenaAllocKinds
};
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index e8ef69f..ce452cb 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -58,6 +58,7 @@
Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
+Mutex* Locks::cha_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
ConditionVariable* Locks::thread_exit_cond_ = nullptr;
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
@@ -66,6 +67,7 @@
Uninterruptible Roles::uninterruptible_;
ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
Mutex* Locks::jni_weak_globals_lock_ = nullptr;
+ReaderWriterMutex* Locks::dex_lock_ = nullptr;
struct AllMutexData {
// A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -955,10 +957,12 @@
DCHECK(logging_lock_ != nullptr);
DCHECK(mutator_lock_ != nullptr);
DCHECK(profiler_lock_ != nullptr);
+ DCHECK(cha_lock_ != nullptr);
DCHECK(thread_list_lock_ != nullptr);
DCHECK(thread_suspend_count_lock_ != nullptr);
DCHECK(trace_lock_ != nullptr);
DCHECK(unexpected_signal_lock_ != nullptr);
+ DCHECK(dex_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -1014,6 +1018,10 @@
DCHECK(breakpoint_lock_ == nullptr);
breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
+ DCHECK(cha_lock_ == nullptr);
+ cha_lock_ = new Mutex("CHA lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
DCHECK(classlinker_classes_lock_ == nullptr);
classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
@@ -1033,6 +1041,10 @@
modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
}
+ UPDATE_CURRENT_LOCK_LEVEL(kDexLock);
+ DCHECK(dex_lock_ == nullptr);
+ dex_lock_ = new ReaderWriterMutex("ClassLinker dex lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock);
DCHECK(oat_file_manager_lock_ == nullptr);
oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 7e73e0d..255ad71 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -97,6 +97,7 @@
kMonitorPoolLock,
kClassLinkerClassesLock, // TODO rename.
kJitCodeCacheLock,
+ kCHALock,
kBreakpointLock,
kMonitorLock,
kMonitorListLock,
@@ -627,9 +628,12 @@
// TODO: improve name, perhaps instrumentation_update_lock_.
static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
+ // Guards Class Hierarchy Analysis (CHA).
+ static Mutex* cha_lock_ ACQUIRED_AFTER(deoptimization_lock_);
+
// The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
// attaching and detaching.
- static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
+ static Mutex* thread_list_lock_ ACQUIRED_AFTER(cha_lock_);
// Signaled when threads terminate. Used to determine when all non-daemons have terminated.
static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
@@ -655,8 +659,10 @@
// Guards modification of the LDT on x86.
static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
+ static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
+
// Guards opened oat files in OatFileManager.
- static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
+ static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
// Guards extra string entries for VerifierDeps.
static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
diff --git a/runtime/cha.cc b/runtime/cha.cc
new file mode 100644
index 0000000..be675a8
--- /dev/null
+++ b/runtime/cha.cc
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "cha.h"
+
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "stack.h"
+#include "thread.h"
+#include "thread_list.h"
+#include "thread_pool.h"
+
+namespace art {
+
+void ClassHierarchyAnalysis::AddDependency(ArtMethod* method,
+ ArtMethod* dependent_method,
+ OatQuickMethodHeader* dependent_header) {
+ auto it = cha_dependency_map_.find(method);
+ if (it == cha_dependency_map_.end()) {
+ cha_dependency_map_[method] =
+ new std::vector<std::pair<art::ArtMethod*, art::OatQuickMethodHeader*>>();
+ it = cha_dependency_map_.find(method);
+ } else {
+ DCHECK(it->second != nullptr);
+ }
+ it->second->push_back(std::make_pair(dependent_method, dependent_header));
+}
+
+std::vector<std::pair<ArtMethod*, OatQuickMethodHeader*>>*
+ ClassHierarchyAnalysis::GetDependents(ArtMethod* method) {
+ auto it = cha_dependency_map_.find(method);
+ if (it != cha_dependency_map_.end()) {
+ DCHECK(it->second != nullptr);
+ return it->second;
+ }
+ return nullptr;
+}
+
+void ClassHierarchyAnalysis::RemoveDependencyFor(ArtMethod* method) {
+ auto it = cha_dependency_map_.find(method);
+ if (it != cha_dependency_map_.end()) {
+ auto dependents = it->second;
+ cha_dependency_map_.erase(it);
+ delete dependents;
+ }
+}
+
+void ClassHierarchyAnalysis::RemoveDependentsWithMethodHeaders(
+ const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
+ // Iterate through all entries in the dependency map and remove any entry that
+ // contains one of those in method_headers.
+ for (auto map_it = cha_dependency_map_.begin(); map_it != cha_dependency_map_.end(); ) {
+ auto dependents = map_it->second;
+ for (auto vec_it = dependents->begin(); vec_it != dependents->end(); ) {
+ OatQuickMethodHeader* method_header = vec_it->second;
+ auto it = std::find(method_headers.begin(), method_headers.end(), method_header);
+ if (it != method_headers.end()) {
+ vec_it = dependents->erase(vec_it);
+ } else {
+ vec_it++;
+ }
+ }
+ // Remove the map entry if there are no more dependents.
+ if (dependents->empty()) {
+ map_it = cha_dependency_map_.erase(map_it);
+ delete dependents;
+ } else {
+ map_it++;
+ }
+ }
+}
+
+// This stack visitor walks the stack and for compiled code with certain method
+// headers, sets the should_deoptimize flag on stack to 1.
+// TODO: also set the register value to 1 when should_deoptimize is allocated in
+// a register.
+class CHAStackVisitor FINAL : public StackVisitor {
+ public:
+ CHAStackVisitor(Thread* thread_in,
+ Context* context,
+ const std::unordered_set<OatQuickMethodHeader*>& method_headers)
+ : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
+ method_headers_(method_headers) {
+ }
+
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* method = GetMethod();
+ if (method == nullptr || method->IsRuntimeMethod() || method->IsNative()) {
+ return true;
+ }
+ if (GetCurrentQuickFrame() == nullptr) {
+ // Not compiled code.
+ return true;
+ }
+ // Method may have multiple versions of compiled code. Check
+ // the method header to see if it has should_deoptimize flag.
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ if (!method_header->HasShouldDeoptimizeFlag()) {
+ // This compiled version doesn't have should_deoptimize flag. Skip.
+ return true;
+ }
+ auto it = std::find(method_headers_.begin(), method_headers_.end(), method_header);
+ if (it == method_headers_.end()) {
+ // Not in the list of method headers that should be deoptimized.
+ return true;
+ }
+
+ // The compiled code on stack is not valid anymore. Need to deoptimize.
+ SetShouldDeoptimizeFlag();
+
+ return true;
+ }
+
+ private:
+ void SetShouldDeoptimizeFlag() REQUIRES_SHARED(Locks::mutator_lock_) {
+ QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
+ size_t frame_size = frame_info.FrameSizeInBytes();
+ uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
+ size_t core_spill_size = POPCOUNT(frame_info.CoreSpillMask()) *
+ GetBytesPerGprSpillLocation(kRuntimeISA);
+ size_t fpu_spill_size = POPCOUNT(frame_info.FpSpillMask()) *
+ GetBytesPerFprSpillLocation(kRuntimeISA);
+ size_t offset = frame_size - core_spill_size - fpu_spill_size - kShouldDeoptimizeFlagSize;
+ uint8_t* should_deoptimize_addr = sp + offset;
+ // Set deoptimization flag to 1.
+ DCHECK(*should_deoptimize_addr == 0 || *should_deoptimize_addr == 1);
+ *should_deoptimize_addr = 1;
+ }
+
+ // Set of method headers for compiled code that should be deoptimized.
+ const std::unordered_set<OatQuickMethodHeader*>& method_headers_;
+
+ DISALLOW_COPY_AND_ASSIGN(CHAStackVisitor);
+};
+
+class CHACheckpoint FINAL : public Closure {
+ public:
+ explicit CHACheckpoint(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
+ : barrier_(0),
+ method_headers_(method_headers) {}
+
+ void Run(Thread* thread) OVERRIDE {
+ // Note thread and self may not be equal if thread was already suspended at
+ // the point of the request.
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ CHAStackVisitor visitor(thread, nullptr, method_headers_);
+ visitor.WalkStack();
+ barrier_.Pass(self);
+ }
+
+ void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
+ Thread* self = Thread::Current();
+ ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+ barrier_.Increment(self, threads_running_checkpoint);
+ }
+
+ private:
+ // The barrier to be passed through and for the requestor to wait upon.
+ Barrier barrier_;
+ // List of method headers for invalidated compiled code.
+ const std::unordered_set<OatQuickMethodHeader*>& method_headers_;
+
+ DISALLOW_COPY_AND_ASSIGN(CHACheckpoint);
+};
+
+void ClassHierarchyAnalysis::VerifyNonSingleImplementation(mirror::Class* verify_class,
+ uint16_t verify_index) {
+ // Grab cha_lock_ to make sure all single-implementation updates are seen.
+ PointerSize image_pointer_size =
+ Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ MutexLock cha_mu(Thread::Current(), *Locks::cha_lock_);
+ while (verify_class != nullptr) {
+ if (verify_index >= verify_class->GetVTableLength()) {
+ return;
+ }
+ ArtMethod* verify_method = verify_class->GetVTableEntry(verify_index, image_pointer_size);
+ DCHECK(!verify_method->HasSingleImplementation())
+ << "class: " << verify_class->PrettyClass()
+ << " verify_method: " << verify_method->PrettyMethod(true);
+ verify_class = verify_class->GetSuperClass();
+ }
+}
+
+void ClassHierarchyAnalysis::CheckSingleImplementationInfo(
+ Handle<mirror::Class> klass,
+ ArtMethod* virtual_method,
+ ArtMethod* method_in_super,
+ std::unordered_set<ArtMethod*>& invalidated_single_impl_methods) {
+ // TODO: if klass is not instantiable, virtual_method isn't invocable yet so
+ // even if it overrides, it doesn't invalidate single-implementation
+ // assumption.
+
+ DCHECK_NE(virtual_method, method_in_super);
+ DCHECK(method_in_super->GetDeclaringClass()->IsResolved()) << "class isn't resolved";
+ // If virtual_method doesn't come from a default interface method, it should
+ // be supplied by klass.
+ DCHECK(virtual_method->IsCopied() ||
+ virtual_method->GetDeclaringClass() == klass.Get());
+
+ // A new virtual_method should set method_in_super to
+ // non-single-implementation (if not set already).
+ // We don't grab cha_lock_. Single-implementation flag won't be set to true
+ // again once it's set to false.
+ if (!method_in_super->HasSingleImplementation()) {
+ // method_in_super already has multiple implementations. All methods in the
+ // same vtable slots in its super classes should have
+ // non-single-implementation already.
+ if (kIsDebugBuild) {
+ VerifyNonSingleImplementation(klass->GetSuperClass()->GetSuperClass(),
+ method_in_super->GetMethodIndex());
+ }
+ return;
+ }
+
+ // Native methods don't have single-implementation flag set.
+ DCHECK(!method_in_super->IsNative());
+ // Invalidate method_in_super's single-implementation status.
+ invalidated_single_impl_methods.insert(method_in_super);
+}
+
+void ClassHierarchyAnalysis::InitSingleImplementationFlag(Handle<mirror::Class> klass,
+ ArtMethod* method) {
+ DCHECK(method->IsCopied() || method->GetDeclaringClass() == klass.Get());
+ if (klass->IsFinal() || method->IsFinal()) {
+ // Final classes or methods do not need CHA for devirtualization.
+ // This frees up modifier bits for intrinsics which currently are only
+ // used for static methods or methods of final classes.
+ return;
+ }
+ if (method->IsNative()) {
+ // Native method's invocation overhead is already high and it
+ // cannot be inlined. It's not worthwhile to devirtualize the
+ // call which can add a deoptimization point.
+ DCHECK(!method->HasSingleImplementation());
+ } else {
+ method->SetHasSingleImplementation(true);
+ if (method->IsAbstract()) {
+ // There is no real implementation yet.
+ // TODO: implement single-implementation logic for abstract methods.
+ DCHECK(method->GetSingleImplementation() == nullptr);
+ } else {
+ // Single implementation of non-abstract method is itself.
+ DCHECK_EQ(method->GetSingleImplementation(), method);
+ }
+ }
+}
+
+void ClassHierarchyAnalysis::UpdateAfterLoadingOf(Handle<mirror::Class> klass) {
+ if (klass->IsInterface()) {
+ return;
+ }
+ mirror::Class* super_class = klass->GetSuperClass();
+ if (super_class == nullptr) {
+ return;
+ }
+
+ // Keeps track of all methods whose single-implementation assumption
+ // is invalidated by linking `klass`.
+ std::unordered_set<ArtMethod*> invalidated_single_impl_methods;
+
+ PointerSize image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ // Do an entry-by-entry comparison of vtable contents with super's vtable.
+ for (int32_t i = 0; i < super_class->GetVTableLength(); ++i) {
+ ArtMethod* method = klass->GetVTableEntry(i, image_pointer_size);
+ ArtMethod* method_in_super = super_class->GetVTableEntry(i, image_pointer_size);
+ if (method == method_in_super) {
+ // vtable slot entry is inherited from super class.
+ continue;
+ }
+ InitSingleImplementationFlag(klass, method);
+ CheckSingleImplementationInfo(klass,
+ method,
+ method_in_super,
+ invalidated_single_impl_methods);
+ }
+
+ // For new virtual methods that don't override.
+ for (int32_t i = super_class->GetVTableLength(); i < klass->GetVTableLength(); ++i) {
+ ArtMethod* method = klass->GetVTableEntry(i, image_pointer_size);
+ InitSingleImplementationFlag(klass, method);
+ }
+
+ Runtime* const runtime = Runtime::Current();
+ if (!invalidated_single_impl_methods.empty()) {
+ Thread *self = Thread::Current();
+ // Method headers for compiled code to be invalidated.
+ std::unordered_set<OatQuickMethodHeader*> dependent_method_headers;
+
+ {
+ // We do this under cha_lock_. Committing code also grabs this lock to
+ // make sure the code is only committed when all single-implementation
+ // assumptions are still true.
+ MutexLock cha_mu(self, *Locks::cha_lock_);
+ // Invalidate compiled methods that assume some virtual calls have only
+ // single implementations.
+ for (ArtMethod* invalidated : invalidated_single_impl_methods) {
+ if (!invalidated->HasSingleImplementation()) {
+ // It might have been invalidated already when other class linking is
+ // going on.
+ continue;
+ }
+ invalidated->SetHasSingleImplementation(false);
+
+ if (runtime->IsAotCompiler()) {
+ // No need to invalidate any compiled code as the AotCompiler doesn't
+ // run any code.
+ continue;
+ }
+
+ // Invalidate all dependents.
+ auto dependents = GetDependents(invalidated);
+ if (dependents == nullptr) {
+ continue;
+ }
+ for (const auto& dependent : *dependents) {
+ ArtMethod* method = dependent.first;;
+ OatQuickMethodHeader* method_header = dependent.second;
+ VLOG(class_linker) << "CHA invalidated compiled code for " << method->PrettyMethod();
+ DCHECK(runtime->UseJitCompilation());
+ runtime->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor(
+ method, method_header);
+ dependent_method_headers.insert(method_header);
+ }
+ RemoveDependencyFor(invalidated);
+ }
+ }
+
+ if (dependent_method_headers.empty()) {
+ return;
+ }
+ // Deoptimze compiled code on stack that should have been invalidated.
+ CHACheckpoint checkpoint(dependent_method_headers);
+ size_t threads_running_checkpoint = runtime->GetThreadList()->RunCheckpoint(&checkpoint);
+ if (threads_running_checkpoint != 0) {
+ checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
+ }
+ }
+}
+
+} // namespace art
diff --git a/runtime/cha.h b/runtime/cha.h
new file mode 100644
index 0000000..ada5c89
--- /dev/null
+++ b/runtime/cha.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CHA_H_
+#define ART_RUNTIME_CHA_H_
+
+#include "art_method.h"
+#include "base/enums.h"
+#include "base/mutex.h"
+#include "handle.h"
+#include "mirror/class.h"
+#include "oat_quick_method_header.h"
+#include <unordered_map>
+#include <unordered_set>
+
+namespace art {
+
+/**
+ * Class Hierarchy Analysis (CHA) tries to devirtualize virtual calls into
+ * direct calls based on the info generated by analyzing class hierarchies.
+ * If a class is not subclassed, or even if it's subclassed but one of its
+ * virtual methods isn't overridden, a virtual call for that method can be
+ * changed into a direct call.
+ *
+ * Each virtual method carries a single-implementation status. The status is
+ * incrementally maintained at the end of class linking time when method
+ * overriding takes effect.
+ *
+ * Compiler takes advantage of the single-implementation info of a
+ * method. If a method A has the single-implementation flag set, the compiler
+ * devirtualizes the virtual call for method A into a direct call, and
+ * further try to inline the direct call as a result. The compiler will
+ * also register a dependency that the compiled code depends on the
+ * assumption that method A has single-implementation status.
+ *
+ * When single-implementation info is updated at the end of class linking,
+ * and if method A's single-implementation status is invalidated, all compiled
+ * code that depends on the assumption that method A has single-implementation
+ * status need to be invalidated. Method entrypoints that have this dependency
+ * will be updated as a result. Method A can later be recompiled with less
+ * aggressive assumptions.
+ *
+ * For live compiled code that's on stack, deoptmization will be initiated
+ * to force the invalidated compiled code into interpreter mode to guarantee
+ * correctness. The deoptimization mechanism used is a hybrid of
+ * synchronous and asynchronous deoptimization. The synchronous deoptimization
+ * part checks a hidden local variable flag for the method, and if true,
+ * initiates deoptimization. The asynchronous deoptimization part issues a
+ * checkpoint that walks the stack and for any compiled code on the stack
+ * that should be deoptimized, set the hidden local variable value to be true.
+ *
+ * A cha_lock_ needs to be held for updating single-implementation status,
+ * and registering/unregistering CHA dependencies. Registering CHA dependency
+ * and making compiled code visible also need to be atomic. Otherwise, we
+ * may miss invalidating CHA dependents or making compiled code visible even
+ * after it is invalidated. Care needs to be taken between cha_lock_ and
+ * JitCodeCache::lock_ to guarantee the atomicity.
+ *
+ * We base our CHA on dynamically linked class profiles instead of doing static
+ * analysis. Static analysis can be too aggressive due to dynamic class loading
+ * at runtime, and too conservative since some classes may not be really loaded
+ * at runtime.
+ */
+class ClassHierarchyAnalysis {
+ public:
+ // Types for recording CHA dependencies.
+ // For invalidating CHA dependency, we need to know both the ArtMethod and
+ // the method header. If the ArtMethod has compiled code with the method header
+ // as the entrypoint, we update the entrypoint to the interpreter bridge.
+ // We will also deoptimize frames that are currently executing the code of
+ // the method header.
+ typedef std::pair<ArtMethod*, OatQuickMethodHeader*> MethodAndMethodHeaderPair;
+ typedef std::vector<MethodAndMethodHeaderPair> ListOfDependentPairs;
+
+ ClassHierarchyAnalysis() {}
+
+ // Add a dependency that compiled code with `dependent_header` for `dependent_method`
+ // assumes that virtual `method` has single-implementation.
+ void AddDependency(ArtMethod* method,
+ ArtMethod* dependent_method,
+ OatQuickMethodHeader* dependent_header) REQUIRES(Locks::cha_lock_);
+
+ // Return compiled code that assumes that `method` has single-implementation.
+ std::vector<MethodAndMethodHeaderPair>* GetDependents(ArtMethod* method)
+ REQUIRES(Locks::cha_lock_);
+
+ // Remove dependency tracking for compiled code that assumes that
+ // `method` has single-implementation.
+ void RemoveDependencyFor(ArtMethod* method) REQUIRES(Locks::cha_lock_);
+
+ // Remove from cha_dependency_map_ all entries that contain OatQuickMethodHeader from
+ // the given `method_headers` set.
+ // This is used when some compiled code is freed.
+ void RemoveDependentsWithMethodHeaders(
+ const std::unordered_set<OatQuickMethodHeader*>& method_headers)
+ REQUIRES(Locks::cha_lock_);
+
+ // Update CHA info for methods that `klass` overrides, after loading `klass`.
+ void UpdateAfterLoadingOf(Handle<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ void InitSingleImplementationFlag(Handle<mirror::Class> klass, ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // `virtual_method` in `klass` overrides `method_in_super`.
+ // This will invalidate some assumptions on single-implementation.
+ // Append methods that should have their single-implementation flag invalidated
+ // to `invalidated_single_impl_methods`.
+ void CheckSingleImplementationInfo(
+ Handle<mirror::Class> klass,
+ ArtMethod* virtual_method,
+ ArtMethod* method_in_super,
+ std::unordered_set<ArtMethod*>& invalidated_single_impl_methods)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Verify all methods in the same vtable slot from verify_class and its supers
+ // don't have single-implementation.
+ void VerifyNonSingleImplementation(mirror::Class* verify_class, uint16_t verify_index)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // A map that maps a method to a set of compiled code that assumes that method has a
+ // single implementation, which is used to do CHA-based devirtualization.
+ std::unordered_map<ArtMethod*, ListOfDependentPairs*> cha_dependency_map_
+ GUARDED_BY(Locks::cha_lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(ClassHierarchyAnalysis);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_CHA_H_
diff --git a/runtime/cha_test.cc b/runtime/cha_test.cc
new file mode 100644
index 0000000..d2f335e
--- /dev/null
+++ b/runtime/cha_test.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "cha.h"
+
+#include "common_runtime_test.h"
+
+namespace art {
+
+class CHATest : public CommonRuntimeTest {};
+
+// Mocks some methods.
+#define METHOD1 (reinterpret_cast<ArtMethod*>(8u))
+#define METHOD2 (reinterpret_cast<ArtMethod*>(16u))
+#define METHOD3 (reinterpret_cast<ArtMethod*>(24u))
+
+// Mocks some method headers.
+#define METHOD_HEADER1 (reinterpret_cast<OatQuickMethodHeader*>(128u))
+#define METHOD_HEADER2 (reinterpret_cast<OatQuickMethodHeader*>(136u))
+#define METHOD_HEADER3 (reinterpret_cast<OatQuickMethodHeader*>(144u))
+
+TEST_F(CHATest, CHACheckDependency) {
+ ClassHierarchyAnalysis cha;
+ MutexLock cha_mu(Thread::Current(), *Locks::cha_lock_);
+
+ ASSERT_EQ(cha.GetDependents(METHOD1), nullptr);
+ ASSERT_EQ(cha.GetDependents(METHOD2), nullptr);
+ ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+
+ cha.AddDependency(METHOD1, METHOD2, METHOD_HEADER2);
+ ASSERT_EQ(cha.GetDependents(METHOD2), nullptr);
+ ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+ auto dependents = cha.GetDependents(METHOD1);
+ ASSERT_EQ(dependents->size(), 1u);
+ ASSERT_EQ(dependents->at(0).first, METHOD2);
+ ASSERT_EQ(dependents->at(0).second, METHOD_HEADER2);
+
+ cha.AddDependency(METHOD1, METHOD3, METHOD_HEADER3);
+ ASSERT_EQ(cha.GetDependents(METHOD2), nullptr);
+ ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+ dependents = cha.GetDependents(METHOD1);
+ ASSERT_EQ(dependents->size(), 2u);
+ ASSERT_EQ(dependents->at(0).first, METHOD2);
+ ASSERT_EQ(dependents->at(0).second, METHOD_HEADER2);
+ ASSERT_EQ(dependents->at(1).first, METHOD3);
+ ASSERT_EQ(dependents->at(1).second, METHOD_HEADER3);
+
+ std::unordered_set<OatQuickMethodHeader*> headers;
+ headers.insert(METHOD_HEADER2);
+ cha.RemoveDependentsWithMethodHeaders(headers);
+ ASSERT_EQ(cha.GetDependents(METHOD2), nullptr);
+ ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+ dependents = cha.GetDependents(METHOD1);
+ ASSERT_EQ(dependents->size(), 1u);
+ ASSERT_EQ(dependents->at(0).first, METHOD3);
+ ASSERT_EQ(dependents->at(0).second, METHOD_HEADER3);
+
+ cha.AddDependency(METHOD2, METHOD1, METHOD_HEADER1);
+ ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+ dependents = cha.GetDependents(METHOD1);
+ ASSERT_EQ(dependents->size(), 1u);
+ dependents = cha.GetDependents(METHOD2);
+ ASSERT_EQ(dependents->size(), 1u);
+
+ headers.insert(METHOD_HEADER3);
+ cha.RemoveDependentsWithMethodHeaders(headers);
+ ASSERT_EQ(cha.GetDependents(METHOD1), nullptr);
+ ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+ dependents = cha.GetDependents(METHOD2);
+ ASSERT_EQ(dependents->size(), 1u);
+ ASSERT_EQ(dependents->at(0).first, METHOD1);
+ ASSERT_EQ(dependents->at(0).second, METHOD_HEADER1);
+
+ cha.RemoveDependencyFor(METHOD2);
+ ASSERT_EQ(cha.GetDependents(METHOD1), nullptr);
+ ASSERT_EQ(cha.GetDependents(METHOD2), nullptr);
+ ASSERT_EQ(cha.GetDependents(METHOD3), nullptr);
+}
+
+} // namespace art
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 7005c29..0a65cd1 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -248,7 +248,7 @@
DCHECK(proxy_method->IsProxyMethod<kReadBarrierOption>());
{
Thread* const self = Thread::Current();
- ReaderMutexLock mu(self, dex_lock_);
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
// Locate the dex cache of the original interface/Object
for (const DexCacheData& data : dex_caches_) {
if (!self->IsJWeakCleared(data.weak_root) &&
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f98f364..674bad7 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -40,6 +40,7 @@
#include "base/time_utils.h"
#include "base/unix_file/fd_file.h"
#include "base/value_object.h"
+#include "cha.h"
#include "class_linker-inl.h"
#include "class_table-inl.h"
#include "compiler_callbacks.h"
@@ -96,6 +97,7 @@
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
+#include "thread_list.h"
#include "trace.h"
#include "utils.h"
#include "utils/dex_cache_arrays_layout-inl.h"
@@ -240,10 +242,11 @@
ScopedLocalRef<jthrowable> cause(env, env->ExceptionOccurred());
CHECK(cause.get() != nullptr);
- // Boot classpath classes should not fail initialization.
- if (!Runtime::Current()->IsAotCompiler()) {
+ // Boot classpath classes should not fail initialization. This is a sanity debug check. This
+ // cannot in general be guaranteed, but in all likelihood leads to breakage down the line.
+ if (klass->GetClassLoader() == nullptr && !Runtime::Current()->IsAotCompiler()) {
std::string tmp;
- CHECK(klass->GetClassLoader() != nullptr) << klass->GetDescriptor(&tmp);
+ LOG(kIsDebugBuild ? FATAL : WARNING) << klass->GetDescriptor(&tmp) << " failed initialization";
}
env->ExceptionClear();
@@ -339,9 +342,7 @@
}
ClassLinker::ClassLinker(InternTable* intern_table)
- // dex_lock_ is recursive as it may be used in stack dumping.
- : dex_lock_("ClassLinker dex lock", kDexLock),
- failed_dex_cache_class_lookups_(0),
+ : failed_dex_cache_class_lookups_(0),
class_roots_(nullptr),
array_iftable_(nullptr),
find_array_class_cache_next_victim_(0),
@@ -820,123 +821,6 @@
}
}
-static void SanityCheckArtMethod(ArtMethod* m,
- ObjPtr<mirror::Class> expected_class,
- const std::vector<gc::space::ImageSpace*>& spaces)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (m->IsRuntimeMethod()) {
- ObjPtr<mirror::Class> declaring_class = m->GetDeclaringClassUnchecked();
- CHECK(declaring_class == nullptr) << declaring_class << " " << m->PrettyMethod();
- } else if (m->IsCopied()) {
- CHECK(m->GetDeclaringClass() != nullptr) << m->PrettyMethod();
- } else if (expected_class != nullptr) {
- CHECK_EQ(m->GetDeclaringClassUnchecked(), expected_class) << m->PrettyMethod();
- }
- if (!spaces.empty()) {
- bool contains = false;
- for (gc::space::ImageSpace* space : spaces) {
- auto& header = space->GetImageHeader();
- size_t offset = reinterpret_cast<uint8_t*>(m) - space->Begin();
-
- const ImageSection& methods = header.GetMethodsSection();
- contains = contains || methods.Contains(offset);
-
- const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
- contains = contains || runtime_methods.Contains(offset);
- }
- CHECK(contains) << m << " not found";
- }
-}
-
-static void SanityCheckArtMethodPointerArray(ObjPtr<mirror::PointerArray> arr,
- ObjPtr<mirror::Class> expected_class,
- PointerSize pointer_size,
- const std::vector<gc::space::ImageSpace*>& spaces)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- CHECK(arr != nullptr);
- for (int32_t j = 0; j < arr->GetLength(); ++j) {
- auto* method = arr->GetElementPtrSize<ArtMethod*>(j, pointer_size);
- // expected_class == null means we are a dex cache.
- if (expected_class != nullptr) {
- CHECK(method != nullptr);
- }
- if (method != nullptr) {
- SanityCheckArtMethod(method, expected_class, spaces);
- }
- }
-}
-
-static void SanityCheckArtMethodPointerArray(ArtMethod** arr,
- size_t size,
- PointerSize pointer_size,
- const std::vector<gc::space::ImageSpace*>& spaces)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- CHECK_EQ(arr != nullptr, size != 0u);
- if (arr != nullptr) {
- bool contains = false;
- for (auto space : spaces) {
- auto offset = reinterpret_cast<uint8_t*>(arr) - space->Begin();
- if (space->GetImageHeader().GetImageSection(
- ImageHeader::kSectionDexCacheArrays).Contains(offset)) {
- contains = true;
- break;
- }
- }
- CHECK(contains);
- }
- for (size_t j = 0; j < size; ++j) {
- ArtMethod* method = mirror::DexCache::GetElementPtrSize(arr, j, pointer_size);
- // expected_class == null means we are a dex cache.
- if (method != nullptr) {
- SanityCheckArtMethod(method, nullptr, spaces);
- }
- }
-}
-
-static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(obj != nullptr);
- CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj;
- CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
- if (obj->IsClass()) {
- auto klass = obj->AsClass();
- for (ArtField& field : klass->GetIFields()) {
- CHECK_EQ(field.GetDeclaringClass(), klass);
- }
- for (ArtField& field : klass->GetSFields()) {
- CHECK_EQ(field.GetDeclaringClass(), klass);
- }
- auto* runtime = Runtime::Current();
- auto image_spaces = runtime->GetHeap()->GetBootImageSpaces();
- auto pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
- for (auto& m : klass->GetMethods(pointer_size)) {
- SanityCheckArtMethod(&m, klass, image_spaces);
- }
- auto* vtable = klass->GetVTable();
- if (vtable != nullptr) {
- SanityCheckArtMethodPointerArray(vtable, nullptr, pointer_size, image_spaces);
- }
- if (klass->ShouldHaveImt()) {
- ImTable* imt = klass->GetImt(pointer_size);
- for (size_t i = 0; i < ImTable::kSize; ++i) {
- SanityCheckArtMethod(imt->Get(i, pointer_size), nullptr, image_spaces);
- }
- }
- if (klass->ShouldHaveEmbeddedVTable()) {
- for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) {
- SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr, image_spaces);
- }
- }
- mirror::IfTable* iftable = klass->GetIfTable();
- for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
- if (iftable->GetMethodArrayCount(i) > 0) {
- SanityCheckArtMethodPointerArray(
- iftable->GetMethodArray(i), nullptr, pointer_size, image_spaces);
- }
- }
- }
-}
-
// Set image methods' entry point to interpreter.
class SetInterpreterEntrypointArtMethodVisitor : public ArtMethodVisitor {
public:
@@ -1444,7 +1328,7 @@
}
}
{
- WriterMutexLock mu2(self, dex_lock_);
+ WriterMutexLock mu2(self, *Locks::dex_lock_);
// Make sure to do this after we update the arrays since we store the resolved types array
// in DexCacheData in RegisterDexFileLocked. We need the array pointer to be the one in the
// BSS.
@@ -1607,6 +1491,153 @@
return true;
}
+// Helper class for ArtMethod checks when adding an image. Keeps all required functionality
+// together and caches some intermediate results.
+class ImageSanityChecks FINAL {
+ public:
+ static void CheckObjects(gc::Heap* heap, ClassLinker* class_linker)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ImageSanityChecks isc(heap, class_linker);
+ heap->VisitObjects(ImageSanityChecks::SanityCheckObjectsCallback, &isc);
+ }
+
+ static void CheckPointerArray(gc::Heap* heap,
+ ClassLinker* class_linker,
+ ArtMethod** arr,
+ size_t size)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ImageSanityChecks isc(heap, class_linker);
+ isc.SanityCheckArtMethodPointerArray(arr, size);
+ }
+
+ static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj;
+ CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
+ if (obj->IsClass()) {
+ ImageSanityChecks* isc = reinterpret_cast<ImageSanityChecks*>(arg);
+
+ auto klass = obj->AsClass();
+ for (ArtField& field : klass->GetIFields()) {
+ CHECK_EQ(field.GetDeclaringClass(), klass);
+ }
+ for (ArtField& field : klass->GetSFields()) {
+ CHECK_EQ(field.GetDeclaringClass(), klass);
+ }
+ const auto pointer_size = isc->pointer_size_;
+ for (auto& m : klass->GetMethods(pointer_size)) {
+ isc->SanityCheckArtMethod(&m, klass);
+ }
+ auto* vtable = klass->GetVTable();
+ if (vtable != nullptr) {
+ isc->SanityCheckArtMethodPointerArray(vtable, nullptr);
+ }
+ if (klass->ShouldHaveImt()) {
+ ImTable* imt = klass->GetImt(pointer_size);
+ for (size_t i = 0; i < ImTable::kSize; ++i) {
+ isc->SanityCheckArtMethod(imt->Get(i, pointer_size), nullptr);
+ }
+ }
+ if (klass->ShouldHaveEmbeddedVTable()) {
+ for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) {
+ isc->SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr);
+ }
+ }
+ mirror::IfTable* iftable = klass->GetIfTable();
+ for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+ if (iftable->GetMethodArrayCount(i) > 0) {
+ isc->SanityCheckArtMethodPointerArray(iftable->GetMethodArray(i), nullptr);
+ }
+ }
+ }
+ }
+
+ private:
+ ImageSanityChecks(gc::Heap* heap, ClassLinker* class_linker)
+ : spaces_(heap->GetBootImageSpaces()),
+ pointer_size_(class_linker->GetImagePointerSize()) {
+ space_begin_.reserve(spaces_.size());
+ method_sections_.reserve(spaces_.size());
+ runtime_method_sections_.reserve(spaces_.size());
+ for (gc::space::ImageSpace* space : spaces_) {
+ space_begin_.push_back(space->Begin());
+ auto& header = space->GetImageHeader();
+ method_sections_.push_back(&header.GetMethodsSection());
+ runtime_method_sections_.push_back(&header.GetRuntimeMethodsSection());
+ }
+ }
+
+ void SanityCheckArtMethod(ArtMethod* m, ObjPtr<mirror::Class> expected_class)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (m->IsRuntimeMethod()) {
+ ObjPtr<mirror::Class> declaring_class = m->GetDeclaringClassUnchecked();
+ CHECK(declaring_class == nullptr) << declaring_class << " " << m->PrettyMethod();
+ } else if (m->IsCopied()) {
+ CHECK(m->GetDeclaringClass() != nullptr) << m->PrettyMethod();
+ } else if (expected_class != nullptr) {
+ CHECK_EQ(m->GetDeclaringClassUnchecked(), expected_class) << m->PrettyMethod();
+ }
+ if (!spaces_.empty()) {
+ bool contains = false;
+ for (size_t i = 0; !contains && i != space_begin_.size(); ++i) {
+ const size_t offset = reinterpret_cast<uint8_t*>(m) - space_begin_[i];
+ contains = method_sections_[i]->Contains(offset) ||
+ runtime_method_sections_[i]->Contains(offset);
+ }
+ CHECK(contains) << m << " not found";
+ }
+ }
+
+ void SanityCheckArtMethodPointerArray(ObjPtr<mirror::PointerArray> arr,
+ ObjPtr<mirror::Class> expected_class)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CHECK(arr != nullptr);
+ for (int32_t j = 0; j < arr->GetLength(); ++j) {
+ auto* method = arr->GetElementPtrSize<ArtMethod*>(j, pointer_size_);
+ // expected_class == null means we are a dex cache.
+ if (expected_class != nullptr) {
+ CHECK(method != nullptr);
+ }
+ if (method != nullptr) {
+ SanityCheckArtMethod(method, expected_class);
+ }
+ }
+ }
+
+ void SanityCheckArtMethodPointerArray(ArtMethod** arr, size_t size)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CHECK_EQ(arr != nullptr, size != 0u);
+ if (arr != nullptr) {
+ bool contains = false;
+ for (auto space : spaces_) {
+ auto offset = reinterpret_cast<uint8_t*>(arr) - space->Begin();
+ if (space->GetImageHeader().GetImageSection(
+ ImageHeader::kSectionDexCacheArrays).Contains(offset)) {
+ contains = true;
+ break;
+ }
+ }
+ CHECK(contains);
+ }
+ for (size_t j = 0; j < size; ++j) {
+ ArtMethod* method = mirror::DexCache::GetElementPtrSize(arr, j, pointer_size_);
+ // expected_class == null means we are a dex cache.
+ if (method != nullptr) {
+ SanityCheckArtMethod(method, nullptr);
+ }
+ }
+ }
+
+ const std::vector<gc::space::ImageSpace*>& spaces_;
+ const PointerSize pointer_size_;
+
+ // Cached sections from the spaces.
+ std::vector<const uint8_t*> space_begin_;
+ std::vector<const ImageSection*> method_sections_;
+ std::vector<const ImageSection*> runtime_method_sections_;
+};
+
bool ClassLinker::AddImageSpace(
gc::space::ImageSpace* space,
Handle<mirror::ClassLoader> class_loader,
@@ -1697,10 +1728,10 @@
}
} else {
if (kSanityCheckObjects) {
- SanityCheckArtMethodPointerArray(h_dex_cache->GetResolvedMethods(),
- h_dex_cache->NumResolvedMethods(),
- image_pointer_size_,
- heap->GetBootImageSpaces());
+ ImageSanityChecks::CheckPointerArray(heap,
+ this,
+ h_dex_cache->GetResolvedMethods(),
+ h_dex_cache->NumResolvedMethods());
}
// Register dex files, keep track of existing ones that are conflicts.
AppendToBootClassPath(*dex_file.get(), h_dex_cache);
@@ -1785,7 +1816,7 @@
}
}
if (!app_image) {
- heap->VisitObjects(SanityCheckObjectsCallback, nullptr);
+ ImageSanityChecks::CheckObjects(heap, this);
}
}
@@ -1949,36 +1980,13 @@
void Visit(ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
ClassTable* const class_table = class_loader->GetClassTable();
- if (!done_ && class_table != nullptr) {
- DefiningClassLoaderFilterVisitor visitor(class_loader, visitor_);
- if (!class_table->Visit(visitor)) {
- // If the visitor ClassTable returns false it means that we don't need to continue.
- done_ = true;
- }
+ if (!done_ && class_table != nullptr && !class_table->Visit(*visitor_)) {
+ // If the visitor ClassTable returns false it means that we don't need to continue.
+ done_ = true;
}
}
private:
- // Class visitor that limits the class visits from a ClassTable to the classes with
- // the provided defining class loader. This filter is used to avoid multiple visits
- // of the same class which can be recorded for multiple initiating class loaders.
- class DefiningClassLoaderFilterVisitor : public ClassVisitor {
- public:
- DefiningClassLoaderFilterVisitor(ObjPtr<mirror::ClassLoader> defining_class_loader,
- ClassVisitor* visitor)
- : defining_class_loader_(defining_class_loader), visitor_(visitor) { }
-
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
- if (klass->GetClassLoader() != defining_class_loader_) {
- return true;
- }
- return (*visitor_)(klass);
- }
-
- ObjPtr<mirror::ClassLoader> const defining_class_loader_;
- ClassVisitor* const visitor_;
- };
-
ClassVisitor* const visitor_;
// If done is true then we don't need to do any more visiting.
bool done_;
@@ -2135,109 +2143,6 @@
: static_cast<mirror::Array*>(mirror::IntArray::Alloc(self, length)));
}
-void ClassLinker::InitializeDexCache(Thread* self,
- ObjPtr<mirror::DexCache> dex_cache,
- ObjPtr<mirror::String> location,
- const DexFile& dex_file,
- LinearAlloc* linear_alloc) {
- ScopedAssertNoThreadSuspension sants(__FUNCTION__);
- DexCacheArraysLayout layout(image_pointer_size_, &dex_file);
- uint8_t* raw_arrays = nullptr;
-
- const OatDexFile* const oat_dex = dex_file.GetOatDexFile();
- if (oat_dex != nullptr && oat_dex->GetDexCacheArrays() != nullptr) {
- raw_arrays = oat_dex->GetDexCacheArrays();
- } else if (dex_file.NumStringIds() != 0u ||
- dex_file.NumTypeIds() != 0u ||
- dex_file.NumMethodIds() != 0u ||
- dex_file.NumFieldIds() != 0u) {
- // Zero-initialized.
- raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
- }
-
- mirror::StringDexCacheType* strings = (dex_file.NumStringIds() == 0u) ? nullptr :
- reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset());
- GcRoot<mirror::Class>* types = (dex_file.NumTypeIds() == 0u) ? nullptr :
- reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset());
- ArtMethod** methods = (dex_file.NumMethodIds() == 0u) ? nullptr :
- reinterpret_cast<ArtMethod**>(raw_arrays + layout.MethodsOffset());
- ArtField** fields = (dex_file.NumFieldIds() == 0u) ? nullptr :
- reinterpret_cast<ArtField**>(raw_arrays + layout.FieldsOffset());
-
- size_t num_strings = mirror::DexCache::kDexCacheStringCacheSize;
- if (dex_file.NumStringIds() < num_strings) {
- num_strings = dex_file.NumStringIds();
- }
-
- // Note that we allocate the method type dex caches regardless of this flag,
- // and we make sure here that they're not used by the runtime. This is in the
- // interest of simplicity and to avoid extensive compiler and layout class changes.
- //
- // If this needs to be mitigated in a production system running this code,
- // DexCache::kDexCacheMethodTypeCacheSize can be set to zero.
- mirror::MethodTypeDexCacheType* method_types = nullptr;
- size_t num_method_types = 0;
-
- if (dex_file.NumProtoIds() < mirror::DexCache::kDexCacheMethodTypeCacheSize) {
- num_method_types = dex_file.NumProtoIds();
- } else {
- num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
- }
-
- if (num_method_types > 0) {
- method_types = reinterpret_cast<mirror::MethodTypeDexCacheType*>(
- raw_arrays + layout.MethodTypesOffset());
- }
-
- DCHECK_ALIGNED(raw_arrays, alignof(mirror::StringDexCacheType)) <<
- "Expected raw_arrays to align to StringDexCacheType.";
- DCHECK_ALIGNED(layout.StringsOffset(), alignof(mirror::StringDexCacheType)) <<
- "Expected StringsOffset() to align to StringDexCacheType.";
- DCHECK_ALIGNED(strings, alignof(mirror::StringDexCacheType)) <<
- "Expected strings to align to StringDexCacheType.";
- static_assert(alignof(mirror::StringDexCacheType) == 8u,
- "Expected StringDexCacheType to have align of 8.");
- if (kIsDebugBuild) {
- // Sanity check to make sure all the dex cache arrays are empty. b/28992179
- for (size_t i = 0; i < num_strings; ++i) {
- CHECK_EQ(strings[i].load(std::memory_order_relaxed).index, 0u);
- CHECK(strings[i].load(std::memory_order_relaxed).object.IsNull());
- }
- for (size_t i = 0; i < dex_file.NumTypeIds(); ++i) {
- CHECK(types[i].IsNull());
- }
- for (size_t i = 0; i < dex_file.NumMethodIds(); ++i) {
- CHECK(mirror::DexCache::GetElementPtrSize(methods, i, image_pointer_size_) == nullptr);
- }
- for (size_t i = 0; i < dex_file.NumFieldIds(); ++i) {
- CHECK(mirror::DexCache::GetElementPtrSize(fields, i, image_pointer_size_) == nullptr);
- }
- for (size_t i = 0; i < num_method_types; ++i) {
- CHECK_EQ(method_types[i].load(std::memory_order_relaxed).index, 0u);
- CHECK(method_types[i].load(std::memory_order_relaxed).object.IsNull());
- }
- }
- if (strings != nullptr) {
- mirror::StringDexCachePair::Initialize(strings);
- }
- if (method_types != nullptr) {
- mirror::MethodTypeDexCachePair::Initialize(method_types);
- }
- dex_cache->Init(&dex_file,
- location,
- strings,
- num_strings,
- types,
- dex_file.NumTypeIds(),
- methods,
- dex_file.NumMethodIds(),
- fields,
- dex_file.NumFieldIds(),
- method_types,
- num_method_types,
- image_pointer_size_);
-}
-
mirror::DexCache* ClassLinker::AllocDexCache(ObjPtr<mirror::String>* out_location,
Thread* self,
const DexFile& dex_file) {
@@ -2264,9 +2169,14 @@
ObjPtr<mirror::String> location = nullptr;
ObjPtr<mirror::DexCache> dex_cache = AllocDexCache(&location, self, dex_file);
if (dex_cache != nullptr) {
- WriterMutexLock mu(self, dex_lock_);
+ WriterMutexLock mu(self, *Locks::dex_lock_);
DCHECK(location != nullptr);
- InitializeDexCache(self, dex_cache, location, dex_file, linear_alloc);
+ mirror::DexCache::InitializeDexCache(self,
+ dex_cache,
+ location,
+ &dex_file,
+ linear_alloc,
+ image_pointer_size_);
}
return dex_cache.Ptr();
}
@@ -2563,109 +2473,56 @@
}
} else {
ScopedObjectAccessUnchecked soa(self);
- ObjPtr<mirror::Class> result_ptr;
- bool descriptor_equals;
- bool known_hierarchy =
- FindClassInBaseDexClassLoader(soa, self, descriptor, hash, class_loader, &result_ptr);
- if (result_ptr != nullptr) {
- // The chain was understood and we found the class. We still need to add the class to
- // the class table to protect from racy programs that can try and redefine the path list
- // which would change the Class<?> returned for subsequent evaluation of const-class.
- DCHECK(known_hierarchy);
- DCHECK(result_ptr->DescriptorEquals(descriptor));
- descriptor_equals = true;
- } else {
- // Either the chain wasn't understood or the class wasn't found.
- //
- // If the chain was understood but we did not find the class, let the Java-side
- // rediscover all this and throw the exception with the right stack trace. Note that
- // the Java-side could still succeed for racy programs if another thread is actively
- // modifying the class loader's path list.
+ ObjPtr<mirror::Class> cp_klass;
+ if (FindClassInBaseDexClassLoader(soa, self, descriptor, hash, class_loader, &cp_klass)) {
+ // The chain was understood. So the value in cp_klass is either the class we were looking
+ // for, or not found.
+ if (cp_klass != nullptr) {
+ return cp_klass.Ptr();
+ }
+ // TODO: We handle the boot classpath loader in FindClassInBaseDexClassLoader. Try to unify
+ // this and the branch above. TODO: throw the right exception here.
- if (Runtime::Current()->IsAotCompiler()) {
- // Oops, compile-time, can't run actual class-loader code.
- ObjPtr<mirror::Throwable> pre_allocated =
- Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
- self->SetException(pre_allocated);
- return nullptr;
- }
-
- ScopedLocalRef<jobject> class_loader_object(
- soa.Env(), soa.AddLocalReference<jobject>(class_loader.Get()));
- std::string class_name_string(DescriptorToDot(descriptor));
- ScopedLocalRef<jobject> result(soa.Env(), nullptr);
- {
- ScopedThreadStateChange tsc(self, kNative);
- ScopedLocalRef<jobject> class_name_object(
- soa.Env(), soa.Env()->NewStringUTF(class_name_string.c_str()));
- if (class_name_object.get() == nullptr) {
- DCHECK(self->IsExceptionPending()); // OOME.
- return nullptr;
- }
- CHECK(class_loader_object.get() != nullptr);
- result.reset(soa.Env()->CallObjectMethod(class_loader_object.get(),
- WellKnownClasses::java_lang_ClassLoader_loadClass,
- class_name_object.get()));
- }
- if (self->IsExceptionPending()) {
- // If the ClassLoader threw, pass that exception up.
- // However, to comply with the RI behavior, first check if another thread succeeded.
- result_ptr = LookupClass(self, descriptor, hash, class_loader.Get());
- if (result_ptr != nullptr && !result_ptr->IsErroneous()) {
- self->ClearException();
- return EnsureResolved(self, descriptor, result_ptr);
- }
- return nullptr;
- } else if (result.get() == nullptr) {
- // broken loader - throw NPE to be compatible with Dalvik
- ThrowNullPointerException(StringPrintf("ClassLoader.loadClass returned null for %s",
- class_name_string.c_str()).c_str());
- return nullptr;
- }
- result_ptr = soa.Decode<mirror::Class>(result.get());
- // Check the name of the returned class.
- descriptor_equals = result_ptr->DescriptorEquals(descriptor);
+ // We'll let the Java-side rediscover all this and throw the exception with the right stack
+ // trace.
}
- // Try to insert the class to the class table, checking for mismatch.
- ObjPtr<mirror::Class> old;
- {
- ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ClassTable* const class_table = InsertClassTableForClassLoader(class_loader.Get());
- old = class_table->Lookup(descriptor, hash);
- if (old == nullptr) {
- old = result_ptr; // For the comparison below, after releasing the lock.
- if (descriptor_equals) {
- class_table->InsertWithHash(result_ptr.Ptr(), hash);
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
- } // else throw below, after releasing the lock.
- }
- }
- if (UNLIKELY(old != result_ptr)) {
- // Return `old` (even if `!descriptor_equals`) to mimic the RI behavior for parallel
- // capable class loaders. (All class loaders are considered parallel capable on Android.)
- mirror::Class* loader_class = class_loader->GetClass();
- const char* loader_class_name =
- loader_class->GetDexFile().StringByTypeIdx(loader_class->GetDexTypeIndex());
- LOG(WARNING) << "Initiating class loader of type " << DescriptorToDot(loader_class_name)
- << " is not well-behaved; it returned a different Class for racing loadClass(\""
- << DescriptorToDot(descriptor) << "\").";
- return EnsureResolved(self, descriptor, old);
- }
- if (UNLIKELY(!descriptor_equals)) {
- std::string result_storage;
- const char* result_name = result_ptr->GetDescriptor(&result_storage);
- std::string loader_storage;
- const char* loader_class_name = class_loader->GetClass()->GetDescriptor(&loader_storage);
- ThrowNoClassDefFoundError(
- "Initiating class loader of type %s returned class %s instead of %s.",
- DescriptorToDot(loader_class_name).c_str(),
- DescriptorToDot(result_name).c_str(),
- DescriptorToDot(descriptor).c_str());
+ if (Runtime::Current()->IsAotCompiler()) {
+ // Oops, compile-time, can't run actual class-loader code.
+ ObjPtr<mirror::Throwable> pre_allocated = Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+ self->SetException(pre_allocated);
return nullptr;
}
- // success, return mirror::Class*
- return result_ptr.Ptr();
+
+ ScopedLocalRef<jobject> class_loader_object(soa.Env(),
+ soa.AddLocalReference<jobject>(class_loader.Get()));
+ std::string class_name_string(DescriptorToDot(descriptor));
+ ScopedLocalRef<jobject> result(soa.Env(), nullptr);
+ {
+ ScopedThreadStateChange tsc(self, kNative);
+ ScopedLocalRef<jobject> class_name_object(soa.Env(),
+ soa.Env()->NewStringUTF(class_name_string.c_str()));
+ if (class_name_object.get() == nullptr) {
+ DCHECK(self->IsExceptionPending()); // OOME.
+ return nullptr;
+ }
+ CHECK(class_loader_object.get() != nullptr);
+ result.reset(soa.Env()->CallObjectMethod(class_loader_object.get(),
+ WellKnownClasses::java_lang_ClassLoader_loadClass,
+ class_name_object.get()));
+ }
+ if (self->IsExceptionPending()) {
+ // If the ClassLoader threw, pass that exception up.
+ return nullptr;
+ } else if (result.get() == nullptr) {
+ // broken loader - throw NPE to be compatible with Dalvik
+ ThrowNullPointerException(StringPrintf("ClassLoader.loadClass returned null for %s",
+ class_name_string.c_str()).c_str());
+ return nullptr;
+ } else {
+ // success, return mirror::Class*
+ return soa.Decode<mirror::Class>(result.get()).Ptr();
+ }
}
UNREACHABLE();
}
@@ -3339,7 +3196,7 @@
void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) {
Thread* const self = Thread::Current();
- dex_lock_.AssertExclusiveHeld(self);
+ Locks::dex_lock_->AssertExclusiveHeld(self);
CHECK(dex_cache.Get() != nullptr) << dex_file.GetLocation();
// For app images, the dex cache location may be a suffix of the dex file location since the
// dex file location is an absolute path.
@@ -3381,7 +3238,7 @@
ObjPtr<mirror::ClassLoader> class_loader) {
Thread* self = Thread::Current();
{
- ReaderMutexLock mu(self, dex_lock_);
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
ObjPtr<mirror::DexCache> dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
return dex_cache.Ptr();
@@ -3404,7 +3261,7 @@
dex_file)));
Handle<mirror::String> h_location(hs.NewHandle(location));
{
- WriterMutexLock mu(self, dex_lock_);
+ WriterMutexLock mu(self, *Locks::dex_lock_);
ObjPtr<mirror::DexCache> dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
// Another thread managed to initialize the dex cache faster, so use that DexCache.
@@ -3420,7 +3277,12 @@
// Do InitializeDexCache while holding dex lock to make sure two threads don't call it at the
// same time with the same dex cache. Since the .bss is shared this can cause failing DCHECK
// that the arrays are null.
- InitializeDexCache(self, h_dex_cache.Get(), h_location.Get(), dex_file, linear_alloc);
+ mirror::DexCache::InitializeDexCache(self,
+ h_dex_cache.Get(),
+ h_location.Get(),
+ &dex_file,
+ linear_alloc,
+ image_pointer_size_);
RegisterDexFileLocked(dex_file, h_dex_cache);
}
table->InsertStrongRoot(h_dex_cache.Get());
@@ -3429,14 +3291,14 @@
void ClassLinker::RegisterDexFile(const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) {
- WriterMutexLock mu(Thread::Current(), dex_lock_);
+ WriterMutexLock mu(Thread::Current(), *Locks::dex_lock_);
RegisterDexFileLocked(dex_file, dex_cache);
}
mirror::DexCache* ClassLinker::FindDexCache(Thread* self,
const DexFile& dex_file,
bool allow_failure) {
- ReaderMutexLock mu(self, dex_lock_);
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
return FindDexCacheLocked(self, dex_file, allow_failure);
}
@@ -3473,7 +3335,7 @@
void ClassLinker::FixupDexCaches(ArtMethod* resolution_method) {
Thread* const self = Thread::Current();
- ReaderMutexLock mu(self, dex_lock_);
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
for (const DexCacheData& data : dex_caches_) {
if (!self->IsJWeakCleared(data.weak_root)) {
ObjPtr<mirror::DexCache> dex_cache = ObjPtr<mirror::DexCache>::DownCast(
@@ -3746,6 +3608,12 @@
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass);
}
+bool ClassLinker::RemoveClass(const char* descriptor, ObjPtr<mirror::ClassLoader> class_loader) {
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ ClassTable* const class_table = ClassTableForClassLoader(class_loader);
+ return class_table != nullptr && class_table->Remove(descriptor);
+}
+
mirror::Class* ClassLinker::LookupClass(Thread* self,
const char* descriptor,
size_t hash,
@@ -3796,8 +3664,7 @@
REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
ClassTable* const class_table = class_loader->GetClassTable();
ObjPtr<mirror::Class> klass = class_table->Lookup(descriptor_, hash_);
- // Add `klass` only if `class_loader` is its defining (not just initiating) class loader.
- if (klass != nullptr && klass->GetClassLoader() == class_loader) {
+ if (klass != nullptr) {
result_->push_back(klass);
}
}
@@ -3816,7 +3683,6 @@
const size_t hash = ComputeModifiedUtf8Hash(descriptor);
ObjPtr<mirror::Class> klass = boot_class_table_.Lookup(descriptor, hash);
if (klass != nullptr) {
- DCHECK(klass->GetClassLoader() == nullptr);
result.push_back(klass);
}
LookupClassesVisitor visitor(descriptor, hash, &result);
@@ -3864,6 +3730,17 @@
return false;
}
+// Ensures that methods have the kAccSkipAccessChecks bit set. We use the
+// kAccVerificationAttempted bit on the class access flags to determine whether this has been done
+// before.
+static void EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass, PointerSize pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!klass->WasVerificationAttempted()) {
+ klass->SetSkipAccessChecksFlagOnAllMethods(pointer_size);
+ klass->SetVerificationAttempted();
+ }
+}
+
verifier::MethodVerifier::FailureKind ClassLinker::VerifyClass(
Thread* self, Handle<mirror::Class> klass, verifier::HardFailLogMode log_level) {
{
@@ -3891,7 +3768,7 @@
// Don't attempt to re-verify if already sufficiently verified.
if (klass->IsVerified()) {
- EnsureSkipAccessChecksMethods(klass);
+ EnsureSkipAccessChecksMethods(klass, image_pointer_size_);
return verifier::MethodVerifier::kNoFailure;
}
if (klass->IsCompileTimeVerified() && Runtime::Current()->IsAotCompiler()) {
@@ -3910,7 +3787,7 @@
// Skip verification if disabled.
if (!Runtime::Current()->IsVerificationEnabled()) {
mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
- EnsureSkipAccessChecksMethods(klass);
+ EnsureSkipAccessChecksMethods(klass, image_pointer_size_);
return verifier::MethodVerifier::kNoFailure;
}
}
@@ -4045,19 +3922,12 @@
// Mark the class as having a verification attempt to avoid re-running the verifier.
klass->SetVerificationAttempted();
} else {
- EnsureSkipAccessChecksMethods(klass);
+ EnsureSkipAccessChecksMethods(klass, image_pointer_size_);
}
}
return verifier_failure;
}
-void ClassLinker::EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass) {
- if (!klass->WasVerificationAttempted()) {
- klass->SetSkipAccessChecksFlagOnAllMethods(image_pointer_size_);
- klass->SetVerificationAttempted();
- }
-}
-
bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file,
ObjPtr<mirror::Class> klass,
mirror::Class::Status& oat_file_class_status) {
@@ -5027,7 +4897,7 @@
bool can_init_parents) {
DCHECK(c.Get() != nullptr);
if (c->IsInitialized()) {
- EnsureSkipAccessChecksMethods(c);
+ EnsureSkipAccessChecksMethods(c, image_pointer_size_);
self->AssertNoPendingException();
return true;
}
@@ -5183,6 +5053,12 @@
if (klass->ShouldHaveImt()) {
klass->SetImt(imt, image_pointer_size_);
}
+
+ // Update CHA info based on whether we override methods.
+ // Have to do this before setting the class as resolved which allows
+ // instantiation of klass.
+ Runtime::Current()->GetClassHierarchyAnalysis()->UpdateAfterLoadingOf(klass);
+
// This will notify waiters on klass that saw the not yet resolved
// class in the class_table_ during EnsureResolved.
mirror::Class::SetStatus(klass, mirror::Class::kStatusResolved, self);
@@ -5226,6 +5102,11 @@
}
}
+ // Update CHA info based on whether we override methods.
+ // Have to do this before setting the class as resolved which allows
+ // instantiation of klass.
+ Runtime::Current()->GetClassHierarchyAnalysis()->UpdateAfterLoadingOf(h_new_class);
+
// This will notify waiters on temp class that saw the not yet resolved class in the
// class_table_ during EnsureResolved.
mirror::Class::SetStatus(klass, mirror::Class::kStatusRetired, self);
@@ -8039,42 +7920,6 @@
return type.Get();
}
-const char* ClassLinker::MethodShorty(uint32_t method_idx,
- ArtMethod* referrer,
- uint32_t* length) {
- ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- ObjPtr<mirror::DexCache> dex_cache = declaring_class->GetDexCache();
- const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
- return dex_file.GetMethodShorty(method_id, length);
-}
-
-class DumpClassVisitor : public ClassVisitor {
- public:
- explicit DumpClassVisitor(int flags) : flags_(flags) {}
-
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
- klass->DumpClass(LOG_STREAM(ERROR), flags_);
- return true;
- }
-
- private:
- const int flags_;
-};
-
-void ClassLinker::DumpAllClasses(int flags) {
- DumpClassVisitor visitor(flags);
- VisitClasses(&visitor);
-}
-
-static OatFile::OatMethod CreateOatMethod(const void* code) {
- CHECK(code != nullptr);
- const uint8_t* base = reinterpret_cast<const uint8_t*>(code); // Base of data points at code.
- base -= sizeof(void*); // Move backward so that code_offset != 0.
- const uint32_t code_offset = sizeof(void*);
- return OatFile::OatMethod(base, code_offset);
-}
-
bool ClassLinker::IsQuickResolutionStub(const void* entry_point) const {
return (entry_point == GetQuickResolutionStub()) ||
(quick_resolution_trampoline_ == entry_point);
@@ -8094,9 +7939,12 @@
return GetQuickGenericJniStub();
}
-void ClassLinker::SetEntryPointsToCompiledCode(ArtMethod* method,
- const void* method_code) const {
- OatFile::OatMethod oat_method = CreateOatMethod(method_code);
+void ClassLinker::SetEntryPointsToCompiledCode(ArtMethod* method, const void* code) const {
+ CHECK(code != nullptr);
+ const uint8_t* base = reinterpret_cast<const uint8_t*>(code); // Base of data points at code.
+ base -= sizeof(void*); // Move backward so that code_offset != 0.
+ const uint32_t code_offset = sizeof(void*);
+ OatFile::OatMethod oat_method(base, code_offset);
oat_method.LinkMethod(method);
}
@@ -8104,9 +7952,7 @@
if (!method->IsNative()) {
method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
} else {
- const void* quick_method_code = GetQuickGenericJniStub();
- OatFile::OatMethod oat_method = CreateOatMethod(quick_method_code);
- oat_method.LinkMethod(method);
+ SetEntryPointsToCompiledCode(method, GetQuickGenericJniStub());
}
}
@@ -8125,8 +7971,8 @@
REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
- num_zygote_classes += class_table->NumZygoteClasses(class_loader);
- num_non_zygote_classes += class_table->NumNonZygoteClasses(class_loader);
+ num_zygote_classes += class_table->NumZygoteClasses();
+ num_non_zygote_classes += class_table->NumNonZygoteClasses();
}
}
@@ -8137,13 +7983,13 @@
size_t ClassLinker::NumZygoteClasses() const {
CountClassesVisitor visitor;
VisitClassLoaders(&visitor);
- return visitor.num_zygote_classes + boot_class_table_.NumZygoteClasses(nullptr);
+ return visitor.num_zygote_classes + boot_class_table_.NumZygoteClasses();
}
size_t ClassLinker::NumNonZygoteClasses() const {
CountClassesVisitor visitor;
VisitClassLoaders(&visitor);
- return visitor.num_non_zygote_classes + boot_class_table_.NumNonZygoteClasses(nullptr);
+ return visitor.num_non_zygote_classes + boot_class_table_.NumNonZygoteClasses();
}
size_t ClassLinker::NumLoadedClasses() {
@@ -8157,7 +8003,7 @@
}
pid_t ClassLinker::GetDexLockOwner() {
- return dex_lock_.GetExclusiveOwnerTid();
+ return Locks::dex_lock_->GetExclusiveOwnerTid();
}
void ClassLinker::SetClassRoot(ClassRoot class_root, ObjPtr<mirror::Class> klass) {
@@ -8323,20 +8169,6 @@
return soa.Env()->NewGlobalRef(local_ref.get());
}
-ArtMethod* ClassLinker::CreateRuntimeMethod(LinearAlloc* linear_alloc) {
- const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
- const size_t method_size = ArtMethod::Size(image_pointer_size_);
- LengthPrefixedArray<ArtMethod>* method_array = AllocArtMethodArray(
- Thread::Current(),
- linear_alloc,
- 1);
- ArtMethod* method = &method_array->At(0, method_size, method_alignment);
- CHECK(method != nullptr);
- method->SetDexMethodIndex(DexFile::kDexNoIndex);
- CHECK(method->IsRuntimeMethod());
- return method;
-}
-
void ClassLinker::DropFindArrayClassCache() {
std::fill_n(find_array_class_cache_, kFindArrayCacheSize, GcRoot<mirror::Class>(nullptr));
find_array_class_cache_next_victim_ = 0;
@@ -8410,7 +8242,7 @@
std::set<DexCacheResolvedClasses> ret;
VLOG(class_linker) << "Collecting resolved classes";
const uint64_t start_time = NanoTime();
- ReaderMutexLock mu(soa.Self(), *DexLock());
+ ReaderMutexLock mu(soa.Self(), *Locks::dex_lock_);
// Loop through all the dex caches and inspect resolved classes.
for (const ClassLinker::DexCacheData& data : GetDexCachesData()) {
if (soa.Self()->IsJWeakCleared(data.weak_root)) {
@@ -8479,7 +8311,7 @@
std::unordered_map<std::string, const DexFile*> location_to_dex_file;
ScopedObjectAccess soa(self);
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
- ReaderMutexLock mu(self, *DexLock());
+ ReaderMutexLock mu(self, *Locks::dex_lock_);
for (const ClassLinker::DexCacheData& data : GetDexCachesData()) {
if (!self->IsJWeakCleared(data.weak_root)) {
ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(data.weak_root);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 1205074..de1f0f0 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -140,12 +140,12 @@
bool InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> boot_class_path,
std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
// Initialize class linker from one or more boot images.
bool InitFromBootImage(std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
// Add an image space to the class linker, may fix up classloader fields and dex cache fields.
// The dex files that were newly opened for the space are placed in the out argument
@@ -158,13 +158,13 @@
const char* dex_location,
std::vector<std::unique_ptr<const DexFile>>* out_dex_files,
std::string* error_msg)
- REQUIRES(!dex_lock_)
+ REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
bool OpenImageDexFiles(gc::space::ImageSpace* space,
std::vector<std::unique_ptr<const DexFile>>* out_dex_files,
std::string* error_msg)
- REQUIRES(!dex_lock_)
+ REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Finds a class by its descriptor, loading it if necessary.
@@ -173,18 +173,18 @@
const char* descriptor,
Handle<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
// Finds a class by its descriptor using the "system" class loader, ie by searching the
// boot_class_path_.
mirror::Class* FindSystemClass(Thread* self, const char* descriptor)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
// Finds the array class given for the element class.
mirror::Class* FindArrayClass(Thread* self, ObjPtr<mirror::Class>* element_class)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
// Returns true if the class linker is initialized.
bool IsInitialized() const {
@@ -199,7 +199,7 @@
const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
// Finds a class by its descriptor, returning null if it isn't wasn't loaded
// by the given 'class_loader'.
@@ -218,7 +218,9 @@
mirror::Class* FindPrimitiveClass(char type) REQUIRES_SHARED(Locks::mutator_lock_);
- void DumpAllClasses(int flags)
+ // General class unloading is not supported, this is used to prune
+ // unwanted classes during image writing.
+ bool RemoveClass(const char* descriptor, ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -255,18 +257,18 @@
dex::TypeIndex type_idx,
ObjPtr<mirror::Class> referrer)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
// Resolve a Type with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
mirror::Class* ResolveType(dex::TypeIndex type_idx, ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
mirror::Class* ResolveType(dex::TypeIndex type_idx, ArtField* referrer)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
// Look up a resolved type with the given ID from the DexFile. The ClassLoader is used to search
// for the type, since it may be referenced from but not contained within the given DexFile.
@@ -285,7 +287,7 @@
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
// Determine whether a dex cache result should be trusted, or an IncompatibleClassChangeError
// check should be performed even after a hit.
@@ -307,7 +309,7 @@
ArtMethod* referrer,
InvokeType type)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
ArtMethod* GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -319,17 +321,17 @@
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
template <ResolveMode kResolveMode>
ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
ArtMethod* ResolveMethodWithoutInvokeType(const DexFile& dex_file,
uint32_t method_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
ArtField* GetResolvedField(uint32_t field_idx, ObjPtr<mirror::Class> field_declaring_class)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -337,7 +339,7 @@
REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* ResolveField(uint32_t field_idx, ArtMethod* referrer, bool is_static)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
// Resolve a field with a given ID from the DexFile, storing the
// result in DexCache. The ClassLinker and ClassLoader are used as
@@ -348,7 +350,7 @@
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, bool is_static)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
// Resolve a field with a given ID from the DexFile, storing the
// result in DexCache. The ClassLinker and ClassLoader are used as
@@ -359,7 +361,7 @@
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
// Resolve a method type with a given ID from the DexFile, storing
// the result in the DexCache.
@@ -368,11 +370,7 @@
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
-
- // Get shorty from method index without resolution. Used to do handlerization.
- const char* MethodShorty(uint32_t method_idx, ArtMethod* referrer, uint32_t* length)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
// Returns true on success, false if there's an exception pending.
// can_run_clinit=false allows the compiler to attempt to init a class,
@@ -382,20 +380,20 @@
bool can_init_fields,
bool can_init_parents)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
// Initializes classes that have instances in the image but that have
// <clinit> methods so they could not be initialized by the compiler.
void RunRootClinits()
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
mirror::DexCache* RegisterDexFile(const DexFile& dex_file,
ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES(!dex_lock_)
+ REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
- REQUIRES(!dex_lock_)
+ REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
const std::vector<const DexFile*>& GetBootClassPath() {
@@ -412,22 +410,22 @@
// can race with insertion and deletion of classes while the visitor is being called.
void VisitClassesWithoutClassesLock(ClassVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
void VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
- REQUIRES(!dex_lock_, !Locks::classlinker_classes_lock_, !Locks::trace_lock_)
+ REQUIRES(!Locks::dex_lock_, !Locks::classlinker_classes_lock_, !Locks::trace_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::DexCache* FindDexCache(Thread* self,
const DexFile& dex_file,
bool allow_failure = false)
- REQUIRES(!dex_lock_)
+ REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
void FixupDexCaches(ArtMethod* resolution_method)
- REQUIRES(!dex_lock_)
+ REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Allocate an instance of a java.lang.Object.
@@ -475,18 +473,18 @@
Handle<mirror::Class> klass,
verifier::HardFailLogMode log_level = verifier::HardFailLogMode::kLogNone)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
bool VerifyClassUsingOatFile(const DexFile& dex_file,
ObjPtr<mirror::Class> klass,
mirror::Class::Status& oat_file_class_status)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
void ResolveClassExceptionHandlerTypes(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
void ResolveMethodExceptionHandlerTypes(ArtMethod* klass)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
mirror::Class* CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa,
jstring name,
@@ -499,7 +497,7 @@
REQUIRES_SHARED(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ArtMethod* FindMethodForProxy(ObjPtr<mirror::Class> proxy_class, ArtMethod* proxy_method)
- REQUIRES(!dex_lock_)
+ REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Get the oat code for a method when its class isn't yet initialized.
@@ -562,7 +560,7 @@
// Note: the objects are not completely set up. Do not use this outside of tests and the compiler.
jobject CreatePathClassLoader(Thread* self, const std::vector<const DexFile*>& dex_files)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
PointerSize GetImagePointerSize() const {
return image_pointer_size_;
@@ -573,8 +571,6 @@
REQUIRES(Locks::classlinker_classes_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* CreateRuntimeMethod(LinearAlloc* linear_alloc);
-
// Clear the ArrayClass cache. This is necessary when cleaning up for the image, as the cache
// entries are roots, but potentially not image classes.
void DropFindArrayClassCache() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -605,11 +601,11 @@
REQUIRES_SHARED(Locks::mutator_lock_);
std::set<DexCacheResolvedClasses> GetResolvedClasses(bool ignore_boot_classes)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
std::unordered_set<std::string> GetClassDescriptorsForProfileKeys(
const std::set<DexCacheResolvedClasses>& classes)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
ObjPtr<mirror::ClassLoader> class_loader)
@@ -644,7 +640,7 @@
// class.
void ThrowEarlierClassFailure(ObjPtr<mirror::Class> c, bool wrap_in_no_class_def = false)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
// Get the actual holding class for a copied method. Pretty slow, don't call often.
mirror::Class* GetHoldingClassOfCopiedMethod(ArtMethod* method)
@@ -674,7 +670,7 @@
bool AttemptSupertypeVerification(Thread* self,
Handle<mirror::Class> klass,
Handle<mirror::Class> supertype)
- REQUIRES(!dex_lock_)
+ REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
static void DeleteClassLoader(Thread* self, const ClassLoaderData& data)
@@ -698,7 +694,7 @@
void FinishInit(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
// For early bootstrapping by Init
mirror::Class* AllocClass(Thread* self,
@@ -725,17 +721,9 @@
const DexFile& dex_file,
LinearAlloc* linear_alloc)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_)
+ REQUIRES(!Locks::dex_lock_)
REQUIRES(!Roles::uninterruptible_);
- void InitializeDexCache(Thread* self,
- ObjPtr<mirror::DexCache> dex_cache,
- ObjPtr<mirror::String> location,
- const DexFile& dex_file,
- LinearAlloc* linear_alloc)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(dex_lock_);
-
mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
@@ -749,14 +737,14 @@
size_t hash,
Handle<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
void AppendToBootClassPath(Thread* self, const DexFile& dex_file)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
void AppendToBootClassPath(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
// Precomputes size needed for Class, in the case of a non-temporary class this size must be
// sufficient to hold all static fields.
@@ -804,7 +792,7 @@
Handle<mirror::ClassLoader> class_loader,
ObjPtr<mirror::Class>* result)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
// Finds a class by its descriptor, returning NULL if it isn't wasn't loaded
// by the given 'class_loader'. Uses the provided hash for the descriptor.
@@ -816,10 +804,10 @@
REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
- REQUIRES(dex_lock_)
+ REQUIRES(Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::DexCache* FindDexCacheLocked(Thread* self, const DexFile& dex_file, bool allow_failure)
- REQUIRES(dex_lock_)
+ REQUIRES(Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
bool InitializeClass(Thread* self,
@@ -827,12 +815,12 @@
bool can_run_clinit,
bool can_init_parents)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
bool InitializeDefaultInterfaceRecursive(Thread* self,
Handle<mirror::Class> klass,
bool can_run_clinit,
bool can_init_parents)
- REQUIRES(!dex_lock_)
+ REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
bool WaitForInitializeClass(Handle<mirror::Class> klass,
Thread* self,
@@ -865,7 +853,7 @@
bool LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
bool LinkMethods(Thread* self,
Handle<mirror::Class> klass,
@@ -1031,17 +1019,11 @@
void CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const
REQUIRES_SHARED(Locks::mutator_lock_);
- // For use by ImageWriter to find DexCaches for its roots
- ReaderWriterMutex* DexLock()
- REQUIRES_SHARED(Locks::mutator_lock_)
- LOCK_RETURNED(dex_lock_) {
- return &dex_lock_;
- }
- size_t GetDexCacheCount() REQUIRES_SHARED(Locks::mutator_lock_, dex_lock_) {
+ size_t GetDexCacheCount() REQUIRES_SHARED(Locks::mutator_lock_, Locks::dex_lock_) {
return dex_caches_.size();
}
const std::list<DexCacheData>& GetDexCachesData()
- REQUIRES_SHARED(Locks::mutator_lock_, dex_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::dex_lock_) {
return dex_caches_;
}
@@ -1050,12 +1032,6 @@
void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Ensures that methods have the kAccSkipAccessChecks bit set. We use the
- // kAccVerificationAttempted bit on the class access flags to determine whether this has been done
- // before.
- void EnsureSkipAccessChecksMethods(Handle<mirror::Class> c)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Register a class loader and create its class table and allocator. Should not be called if
// these are already created.
void RegisterClassLoader(ObjPtr<mirror::ClassLoader> class_loader)
@@ -1080,7 +1056,7 @@
mirror::Class* EnsureResolved(Thread* self, const char* descriptor, ObjPtr<mirror::Class> klass)
WARN_UNUSED
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_lock_);
+ REQUIRES(!Locks::dex_lock_);
void FixupTemporaryDeclaringClass(ObjPtr<mirror::Class> temp_class,
ObjPtr<mirror::Class> new_class)
@@ -1111,12 +1087,12 @@
ClassTable::ClassSet* new_class_set,
bool* out_forward_dex_cache_array,
std::string* out_error_msg)
- REQUIRES(!dex_lock_)
+ REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Check that c1 == FindSystemClass(self, descriptor). Abort with class dumps otherwise.
void CheckSystemClass(Thread* self, Handle<mirror::Class> c1, const char* descriptor)
- REQUIRES(!dex_lock_)
+ REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Sets imt_ref appropriately for LinkInterfaceMethods.
@@ -1147,10 +1123,9 @@
std::vector<const DexFile*> boot_class_path_;
std::vector<std::unique_ptr<const DexFile>> boot_dex_files_;
- mutable ReaderWriterMutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// JNI weak globals and side data to allow dex caches to get unloaded. We lazily delete weak
// globals when we register new dex files.
- std::list<DexCacheData> dex_caches_ GUARDED_BY(dex_lock_);
+ std::list<DexCacheData> dex_caches_ GUARDED_BY(Locks::dex_lock_);
// This contains the class loaders which have class tables. It is populated by
// InsertClassTableForClassLoader.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 7c06ffe..ddb9e59 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -1319,7 +1319,7 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
MutableHandle<mirror::DexCache> dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
{
- ReaderMutexLock mu(soa.Self(), *class_linker->DexLock());
+ ReaderMutexLock mu(soa.Self(), *Locks::dex_lock_);
for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
dex_cache.Assign(soa.Self()->DecodeJObject(data.weak_root)->AsDexCache());
if (dex_cache.Get() != nullptr) {
@@ -1343,7 +1343,7 @@
0u,
nullptr));
{
- WriterMutexLock mu(soa.Self(), *class_linker->DexLock());
+ WriterMutexLock mu(soa.Self(), *Locks::dex_lock_);
// Check that inserting with a UTF16 name works.
class_linker->RegisterDexFileLocked(*dex_file, dex_cache);
}
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index 81cdce5..0fcce6b 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -83,29 +83,18 @@
#pragma clang diagnostic pop // http://b/31104323
-size_t ClassTable::CountDefiningLoaderClasses(ObjPtr<mirror::ClassLoader> defining_loader,
- const ClassSet& set) const {
- size_t count = 0;
- for (const GcRoot<mirror::Class>& klass : set) {
- if (klass.Read()->GetClassLoader() == defining_loader) {
- ++count;
- }
- }
- return count;
-}
-
-size_t ClassTable::NumZygoteClasses(ObjPtr<mirror::ClassLoader> defining_loader) const {
+size_t ClassTable::NumZygoteClasses() const {
ReaderMutexLock mu(Thread::Current(), lock_);
size_t sum = 0;
for (size_t i = 0; i < classes_.size() - 1; ++i) {
- sum += CountDefiningLoaderClasses(defining_loader, classes_[i]);
+ sum += classes_[i].Size();
}
return sum;
}
-size_t ClassTable::NumNonZygoteClasses(ObjPtr<mirror::ClassLoader> defining_loader) const {
+size_t ClassTable::NumNonZygoteClasses() const {
ReaderMutexLock mu(Thread::Current(), lock_);
- return CountDefiningLoaderClasses(defining_loader, classes_.back());
+ return classes_.back().Size();
}
mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) {
@@ -113,7 +102,7 @@
for (ClassSet& class_set : classes_) {
auto it = class_set.FindWithHash(descriptor, hash);
if (it != class_set.end()) {
- return it->Read();
+ return it->Read();
}
}
return nullptr;
@@ -153,6 +142,7 @@
bool ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
const GcRoot<mirror::Class>& b) const {
+ DCHECK_EQ(a.Read()->GetClassLoader(), b.Read()->GetClassLoader());
std::string temp;
return a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp));
}
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 92634a4..558c144 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -84,14 +84,10 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of classes in previous snapshots.
- size_t NumZygoteClasses(ObjPtr<mirror::ClassLoader> defining_loader) const
- REQUIRES(!lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ size_t NumZygoteClasses() const REQUIRES(!lock_);
// Returns all off the classes in the lastest snapshot.
- size_t NumNonZygoteClasses(ObjPtr<mirror::ClassLoader> defining_loader) const
- REQUIRES(!lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ size_t NumNonZygoteClasses() const REQUIRES(!lock_);
// Update a class in the table with the new class. Returns the existing class which was replaced.
mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash)
@@ -177,11 +173,6 @@
private:
void InsertWithoutLocks(ObjPtr<mirror::Class> klass) NO_THREAD_SAFETY_ANALYSIS;
- size_t CountDefiningLoaderClasses(ObjPtr<mirror::ClassLoader> defining_loader,
- const ClassSet& set) const
- REQUIRES(lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Return true if we inserted the oat file, false if it already exists.
bool InsertOatFileLocked(const OatFile* oat_file)
REQUIRES(lock_)
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 99b9f9d..578550c 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -189,6 +189,7 @@
kVerifyVarArgRangeNonZero = 0x100000,
kVerifyRuntimeOnly = 0x200000,
kVerifyError = 0x400000,
+ kVerifyRegHPrototype = 0x800000
};
static constexpr uint32_t kMaxVarArgRegs = 5;
@@ -579,6 +580,10 @@
kVerifyRegCNewArray | kVerifyRegCType | kVerifyRegCWide));
}
+ int GetVerifyTypeArgumentH() const {
+ return (kInstructionVerifyFlags[Opcode()] & kVerifyRegHPrototype);
+ }
+
int GetVerifyExtraFlags() const {
return (kInstructionVerifyFlags[Opcode()] & (kVerifyArrayData | kVerifyBranchTarget |
kVerifySwitchTargets | kVerifyVarArg | kVerifyVarArgNonZero | kVerifyVarArgRange |
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index e537afe..ca2ce1d 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -269,8 +269,8 @@
V(0xF7, UNUSED_F7, "unused-f7", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xF8, UNUSED_F8, "unused-f8", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xF9, UNUSED_F9, "unused-f9", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xFA, INVOKE_POLYMORPHIC, "invoke-polymorphic", k45cc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero | kExperimental) \
- V(0xFB, INVOKE_POLYMORPHIC_RANGE, "invoke-polymorphic/range", k4rcc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero | kExperimental) \
+ V(0xFA, INVOKE_POLYMORPHIC, "invoke-polymorphic", k45cc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero | kVerifyRegHPrototype) \
+ V(0xFB, INVOKE_POLYMORPHIC_RANGE, "invoke-polymorphic/range", k4rcc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero | kVerifyRegHPrototype) \
V(0xFC, UNUSED_FC, "unused-fc", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xFD, UNUSED_FD, "unused-fd", k10x, kIndexUnknown, 0, kVerifyError) \
V(0xFE, UNUSED_FE, "unused-fe", k10x, kIndexUnknown, 0, kVerifyError) \
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 40186f8..2e4475f 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -2068,26 +2068,30 @@
size_t largest_continuous_free_pages = 0;
WriterMutexLock wmu(self, bulk_free_lock_);
MutexLock mu(self, lock_);
+ uint64_t total_free = 0;
for (FreePageRun* fpr : free_page_runs_) {
largest_continuous_free_pages = std::max(largest_continuous_free_pages,
fpr->ByteSize(this));
+ total_free += fpr->ByteSize(this);
}
+ size_t required_bytes = 0;
+ const char* new_buffer_msg = "";
if (failed_alloc_bytes > kLargeSizeThreshold) {
// Large allocation.
- size_t required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
- if (required_bytes > largest_continuous_free_pages) {
- os << "; failed due to fragmentation (required continguous free "
- << required_bytes << " bytes where largest contiguous free "
- << largest_continuous_free_pages << " bytes)";
- }
+ required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
} else {
// Non-large allocation.
- size_t required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
- if (required_bytes > largest_continuous_free_pages) {
- os << "; failed due to fragmentation (required continguous free "
- << required_bytes << " bytes for a new buffer where largest contiguous free "
- << largest_continuous_free_pages << " bytes)";
- }
+ required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
+ new_buffer_msg = " for a new buffer";
+ }
+ if (required_bytes > largest_continuous_free_pages) {
+ os << "; failed due to fragmentation ("
+ << "required contiguous free " << required_bytes << " bytes" << new_buffer_msg
+ << ", largest contiguous free " << largest_continuous_free_pages << " bytes"
+ << ", total free pages " << total_free << " bytes"
+ << ", space footprint " << footprint_ << " bytes"
+ << ", space max capacity " << max_capacity_ << " bytes"
+ << ")" << std::endl;
}
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 19ee0fb..fbab73f 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -812,13 +812,13 @@
false_gray_stack_.clear();
}
-
void ConcurrentCopying::IssueEmptyCheckpoint() {
Thread* self = Thread::Current();
ThreadList* thread_list = Runtime::Current()->GetThreadList();
Barrier* barrier = thread_list->EmptyCheckpointBarrier();
barrier->Init(self, 0);
- size_t barrier_count = thread_list->RunEmptyCheckpoint();
+ std::vector<uint32_t> runnable_thread_ids; // Used in debug build only
+ size_t barrier_count = thread_list->RunEmptyCheckpoint(runnable_thread_ids);
// If there are no threads to wait which implys that all the checkpoint functions are finished,
// then no need to release the mutator lock.
if (barrier_count == 0) {
@@ -828,7 +828,27 @@
Locks::mutator_lock_->SharedUnlock(self);
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
- barrier->Increment(self, barrier_count);
+ if (kIsDebugBuild) {
+ static constexpr uint64_t kEmptyCheckpointTimeoutMs = 600 * 1000; // 10 minutes.
+ bool timed_out = barrier->Increment(self, barrier_count, kEmptyCheckpointTimeoutMs);
+ if (timed_out) {
+ Runtime* runtime = Runtime::Current();
+ std::ostringstream ss;
+ ss << "Empty checkpoint timeout\n";
+ ss << "Barrier count " << barrier->GetCount(self) << "\n";
+ ss << "Runnable thread IDs";
+ for (uint32_t tid : runnable_thread_ids) {
+ ss << " " << tid;
+ }
+ ss << "\n";
+ Locks::mutator_lock_->Dump(ss);
+ ss << "\n";
+ runtime->GetThreadList()->Dump(ss);
+ LOG(FATAL) << ss.str();
+ }
+ } else {
+ barrier->Increment(self, barrier_count);
+ }
}
Locks::mutator_lock_->SharedLock(self);
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5c219cc..8ff5e5a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1326,7 +1326,9 @@
std::ostringstream oss;
size_t total_bytes_free = GetFreeMemory();
oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
- << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM";
+ << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
+ << " max allowed footprint " << max_allowed_footprint_ << ", growth limit "
+ << growth_limit_;
// If the allocation failed due to fragmentation, print out the largest continuous allocation.
if (total_bytes_free >= byte_count) {
space::AllocSpace* space = nullptr;
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 72dbe6a..22da07d 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -865,11 +865,6 @@
// The invoke_method_idx here is the name of the signature polymorphic method that
// was symbolically invoked in bytecode (say MethodHandle.invoke or MethodHandle.invokeExact)
// and not the method that we'll dispatch to in the end.
- //
- // TODO(narayan) We'll have to check in the verifier that this is in fact a
- // signature polymorphic method so that we disallow calls via invoke-polymorphic
- // to non sig-poly methods. This would also have the side effect of verifying
- // that vRegC really is a reference type.
StackHandleScope<6> hs(self);
Handle<mirror::MethodHandleImpl> method_handle(hs.NewHandle(
ObjPtr<mirror::MethodHandleImpl>::DownCast(
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 2ae989a..93f50ad 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -23,6 +23,7 @@
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
+#include "cha.h"
#include "debugger_interface.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
@@ -217,7 +218,9 @@
const uint8_t* code,
size_t code_size,
bool osr,
- Handle<mirror::ObjectArray<mirror::Object>> roots) {
+ Handle<mirror::ObjectArray<mirror::Object>> roots,
+ bool has_should_deoptimize_flag,
+ const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
uint8_t* result = CommitCodeInternal(self,
method,
stack_map,
@@ -228,7 +231,9 @@
code,
code_size,
osr,
- roots);
+ roots,
+ has_should_deoptimize_flag,
+ cha_single_implementation_list);
if (result == nullptr) {
// Retry.
GarbageCollectCache(self);
@@ -242,7 +247,9 @@
code,
code_size,
osr,
- roots);
+ roots,
+ has_should_deoptimize_flag,
+ cha_single_implementation_list);
}
return result;
}
@@ -277,6 +284,10 @@
reinterpret_cast<uint32_t*>(roots_data)[length] = length;
}
+static const uint8_t* FromStackMapToRoots(const uint8_t* stack_map_data) {
+ return stack_map_data - ComputeRootTableSize(GetNumberOfRoots(stack_map_data));
+}
+
static void FillRootTable(uint8_t* roots_data, Handle<mirror::ObjectArray<mirror::Object>> roots)
REQUIRES_SHARED(Locks::mutator_lock_) {
GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
@@ -293,7 +304,6 @@
}
gc_roots[i] = GcRoot<mirror::Object>(object);
}
- FillRootTableLength(roots_data, length);
}
static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
@@ -359,7 +369,7 @@
}
}
-void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
+void JitCodeCache::FreeCode(const void* code_ptr) {
uintptr_t allocation = FromCodeToAllocation(code_ptr);
// Notify native debugger that we are about to remove the code.
// It does nothing if we are not using native debugger.
@@ -368,41 +378,69 @@
FreeCode(reinterpret_cast<uint8_t*>(allocation));
}
+void JitCodeCache::FreeAllMethodHeaders(
+ const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
+ {
+ MutexLock mu(Thread::Current(), *Locks::cha_lock_);
+ Runtime::Current()->GetClassHierarchyAnalysis()
+ ->RemoveDependentsWithMethodHeaders(method_headers);
+ }
+
+ // We need to remove entries in method_headers from CHA dependencies
+ // first since once we do FreeCode() below, the memory can be reused
+ // so it's possible for the same method_header to start representing
+ // different compile code.
+ MutexLock mu(Thread::Current(), lock_);
+ ScopedCodeCacheWrite scc(code_map_.get());
+ for (const OatQuickMethodHeader* method_header : method_headers) {
+ FreeCode(method_header->GetCode());
+ }
+}
+
void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
ScopedTrace trace(__PRETTY_FUNCTION__);
- MutexLock mu(self, lock_);
- // We do not check if a code cache GC is in progress, as this method comes
- // with the classlinker_classes_lock_ held, and suspending ourselves could
- // lead to a deadlock.
+ // We use a set to first collect all method_headers whose code need to be
+ // removed. We need to free the underlying code after we remove CHA dependencies
+ // for entries in this set. And it's more efficient to iterate through
+ // the CHA dependency map just once with an unordered_set.
+ std::unordered_set<OatQuickMethodHeader*> method_headers;
{
- ScopedCodeCacheWrite scc(code_map_.get());
- for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
- if (alloc.ContainsUnsafe(it->second)) {
- FreeCode(it->first, it->second);
- it = method_code_map_.erase(it);
+ MutexLock mu(self, lock_);
+ // We do not check if a code cache GC is in progress, as this method comes
+ // with the classlinker_classes_lock_ held, and suspending ourselves could
+ // lead to a deadlock.
+ {
+ ScopedCodeCacheWrite scc(code_map_.get());
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ if (alloc.ContainsUnsafe(it->second)) {
+ method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
+ it = method_code_map_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+ }
+ for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) {
+ if (alloc.ContainsUnsafe(it->first)) {
+ // Note that the code has already been pushed to method_headers in the loop
+ // above and is going to be removed in FreeCode() below.
+ it = osr_code_map_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+ for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
+ ProfilingInfo* info = *it;
+ if (alloc.ContainsUnsafe(info->GetMethod())) {
+ info->GetMethod()->SetProfilingInfo(nullptr);
+ FreeData(reinterpret_cast<uint8_t*>(info));
+ it = profiling_infos_.erase(it);
} else {
++it;
}
}
}
- for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) {
- if (alloc.ContainsUnsafe(it->first)) {
- // Note that the code has already been removed in the loop above.
- it = osr_code_map_.erase(it);
- } else {
- ++it;
- }
- }
- for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
- ProfilingInfo* info = *it;
- if (alloc.ContainsUnsafe(info->GetMethod())) {
- info->GetMethod()->SetProfilingInfo(nullptr);
- FreeData(reinterpret_cast<uint8_t*>(info));
- it = profiling_infos_.erase(it);
- } else {
- ++it;
- }
- }
+ FreeAllMethodHeaders(method_headers);
}
bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
@@ -464,13 +502,15 @@
const uint8_t* code,
size_t code_size,
bool osr,
- Handle<mirror::ObjectArray<mirror::Object>> roots) {
+ Handle<mirror::ObjectArray<mirror::Object>> roots,
+ bool has_should_deoptimize_flag,
+ const ArenaSet<ArtMethod*>&
+ cha_single_implementation_list) {
DCHECK(stack_map != nullptr);
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
// Ensure the header ends up at expected instruction alignment.
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
size_t total_size = header_size + code_size;
- const uint32_t num_roots = roots->GetLength();
OatQuickMethodHeader* method_header = nullptr;
uint8_t* code_ptr = nullptr;
@@ -483,9 +523,6 @@
ScopedCodeCacheWrite scc(code_map_.get());
memory = AllocateCode(total_size);
if (memory == nullptr) {
- // Fill root table length so that ClearData works correctly in case of failure. Otherwise
- // the length will be 0 and cause incorrect DCHECK failure.
- FillRootTableLength(roots_data, num_roots);
return nullptr;
}
code_ptr = memory + header_size;
@@ -506,15 +543,48 @@
// https://patchwork.kernel.org/patch/9047921/
FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
reinterpret_cast<char*>(code_ptr + code_size));
+ DCHECK(!Runtime::Current()->IsAotCompiler());
+ if (has_should_deoptimize_flag) {
+ method_header->SetHasShouldDeoptimizeFlag();
+ }
}
number_of_compilations_++;
}
// We need to update the entry point in the runnable state for the instrumentation.
{
+ // Need cha_lock_ for checking all single-implementation flags and register
+ // dependencies.
+ MutexLock cha_mu(self, *Locks::cha_lock_);
+ bool single_impl_still_valid = true;
+ for (ArtMethod* single_impl : cha_single_implementation_list) {
+ if (!single_impl->HasSingleImplementation()) {
+ // We simply discard the compiled code. Clear the
+ // counter so that it may be recompiled later. Hopefully the
+ // class hierarchy will be more stable when compilation is retried.
+ single_impl_still_valid = false;
+ method->ClearCounter();
+ break;
+ }
+ }
+
+ // Discard the code if any single-implementation assumptions are now invalid.
+ if (!single_impl_still_valid) {
+ VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
+ return nullptr;
+ }
+ for (ArtMethod* single_impl : cha_single_implementation_list) {
+ Runtime::Current()->GetClassHierarchyAnalysis()->AddDependency(
+ single_impl, method, method_header);
+ }
+
+ // The following needs to be guarded by cha_lock_ also. Otherwise it's
+ // possible that the compiled code is considered invalidated by some class linking,
+ // but below we still make the compiled code valid for the method.
MutexLock mu(self, lock_);
method_code_map_.Put(code_ptr, method);
// Fill the root table before updating the entry point.
+ DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
FillRootTable(roots_data, roots);
if (osr) {
number_of_osr_compilations_++;
@@ -535,7 +605,8 @@
<< " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
<< " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
<< reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
- << reinterpret_cast<const void*>(method_header->GetEntryPoint() + method_header->code_size_);
+ << reinterpret_cast<const void*>(method_header->GetEntryPoint() +
+ method_header->GetCodeSize());
histogram_code_memory_use_.AddValue(code_size);
if (code_size > kCodeSizeLogThreshold) {
LOG(INFO) << "JIT allocated "
@@ -566,10 +637,6 @@
return used_memory_for_data_;
}
-static const uint8_t* FromStackMapToRoots(const uint8_t* stack_map_data) {
- return stack_map_data - ComputeRootTableSize(GetNumberOfRoots(stack_map_data));
-}
-
void JitCodeCache::ClearData(Thread* self,
uint8_t* stack_map_data,
uint8_t* roots_data) {
@@ -612,8 +679,14 @@
<< " for stack maps of "
<< ArtMethod::PrettyMethod(method);
}
- *roots_data = result;
- *stack_map_data = result + table_size;
+ if (result != nullptr) {
+ *roots_data = result;
+ *stack_map_data = result + table_size;
+ FillRootTableLength(*roots_data, number_of_roots);
+ } else {
+ *roots_data = nullptr;
+ *stack_map_data = nullptr;
+ }
}
class MarkCodeVisitor FINAL : public StackVisitor {
@@ -836,20 +909,23 @@
void JitCodeCache::RemoveUnmarkedCode(Thread* self) {
ScopedTrace trace(__FUNCTION__);
- MutexLock mu(self, lock_);
- ScopedCodeCacheWrite scc(code_map_.get());
- // Iterate over all compiled code and remove entries that are not marked.
- for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
- const void* code_ptr = it->first;
- ArtMethod* method = it->second;
- uintptr_t allocation = FromCodeToAllocation(code_ptr);
- if (GetLiveBitmap()->Test(allocation)) {
- ++it;
- } else {
- FreeCode(code_ptr, method);
- it = method_code_map_.erase(it);
+ std::unordered_set<OatQuickMethodHeader*> method_headers;
+ {
+ MutexLock mu(self, lock_);
+ ScopedCodeCacheWrite scc(code_map_.get());
+ // Iterate over all compiled code and remove entries that are not marked.
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ const void* code_ptr = it->first;
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ if (GetLiveBitmap()->Test(allocation)) {
+ ++it;
+ } else {
+ method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
+ it = method_code_map_.erase(it);
+ }
}
}
+ FreeAllMethodHeaders(method_headers);
}
void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index be2cec5..30e2efb 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -20,6 +20,7 @@
#include "instrumentation.h"
#include "atomic.h"
+#include "base/arena_containers.h"
#include "base/histogram-inl.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -91,6 +92,11 @@
REQUIRES(!lock_);
// Allocate and write code and its metadata to the code cache.
+ // `cha_single_implementation_list` needs to be registered via CHA (if it's
+ // still valid), since the compiled code still needs to be invalidated if the
+ // single-implementation assumptions are violated later. This needs to be done
+ // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
+ // guard elimination.
uint8_t* CommitCode(Thread* self,
ArtMethod* method,
uint8_t* stack_map,
@@ -101,7 +107,9 @@
const uint8_t* code,
size_t code_size,
bool osr,
- Handle<mirror::ObjectArray<mirror::Object>> roots)
+ Handle<mirror::ObjectArray<mirror::Object>> roots,
+ bool has_should_deoptimize_flag,
+ const ArenaSet<ArtMethod*>& cha_single_implementation_list)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
@@ -230,7 +238,9 @@
const uint8_t* code,
size_t code_size,
bool osr,
- Handle<mirror::ObjectArray<mirror::Object>> roots)
+ Handle<mirror::ObjectArray<mirror::Object>> roots,
+ bool has_should_deoptimize_flag,
+ const ArenaSet<ArtMethod*>& cha_single_implementation_list)
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -245,8 +255,13 @@
bool WaitForPotentialCollectionToComplete(Thread* self)
REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
- // Free in the mspace allocations taken by 'method'.
- void FreeCode(const void* code_ptr, ArtMethod* method) REQUIRES(lock_);
+ // Remove CHA dependents and underlying allocations for entries in `method_headers`.
+ void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
+ REQUIRES(!lock_)
+ REQUIRES(!Locks::cha_lock_);
+
+ // Free in the mspace allocations for `code_ptr`.
+ void FreeCode(const void* code_ptr) REQUIRES(lock_);
// Number of bytes allocated in the code cache.
size_t CodeCacheSizeLocked() REQUIRES(lock_);
diff --git a/runtime/jit/offline_profiling_info.h b/runtime/jit/offline_profiling_info.h
index 4136488..53d0eea 100644
--- a/runtime/jit/offline_profiling_info.h
+++ b/runtime/jit/offline_profiling_info.h
@@ -180,6 +180,7 @@
friend class ProfileCompilationInfoTest;
friend class CompilerDriverProfileTest;
friend class ProfileAssistantTest;
+ friend class Dex2oatLayoutTest;
DexFileToProfileInfoMap info_;
};
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index 52a0f23..398bfbc 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -29,7 +29,7 @@
class Object;
} // namespace mirror
-union PACKED(4) JValue {
+union PACKED(alignof(mirror::Object*)) JValue {
// We default initialize JValue instances to all-zeros.
JValue() : j(0) {}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 5def65e..5fdf8f3 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -238,7 +238,7 @@
template<VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption>
inline PointerArray* Class::GetVTable() {
- DCHECK(IsResolved<kVerifyFlags>() || IsErroneous<kVerifyFlags>());
+ DCHECK(IsLoaded<kVerifyFlags>() || IsErroneous<kVerifyFlags>());
return GetFieldObject<PointerArray, kVerifyFlags, kReadBarrierOption>(
OFFSET_OF_OBJECT_MEMBER(Class, vtable_));
}
diff --git a/runtime/mirror/class_ext.cc b/runtime/mirror/class_ext.cc
index 259bbbe..7c6a710 100644
--- a/runtime/mirror/class_ext.cc
+++ b/runtime/mirror/class_ext.cc
@@ -46,8 +46,9 @@
SetFieldObject<false>(obsolete_methods_off, methods.Ptr());
}
-// TODO We really need to be careful how we update this. If we ever in the future make it so that
-// these arrays are written into without all threads being suspended we have a race condition!
+// We really need to be careful how we update this. If we ever in the future make it so that
+// these arrays are written into without all threads being suspended we have a race condition! This
+// race could cause obsolete methods to be missed.
bool ClassExt::ExtendObsoleteArrays(Thread* self, uint32_t increase) {
DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId())
<< "Obsolete arrays are set without synchronization!";
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index a32d51f..741cf3b 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -22,15 +22,123 @@
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
#include "globals.h"
+#include "linear_alloc.h"
#include "object.h"
#include "object-inl.h"
#include "object_array-inl.h"
#include "runtime.h"
#include "string.h"
+#include "thread.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
namespace art {
namespace mirror {
+void DexCache::InitializeDexCache(Thread* self,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::String> location,
+ const DexFile* dex_file,
+ LinearAlloc* linear_alloc,
+ PointerSize image_pointer_size) {
+ DCHECK(dex_file != nullptr);
+ ScopedAssertNoThreadSuspension sants(__FUNCTION__);
+ DexCacheArraysLayout layout(image_pointer_size, dex_file);
+ uint8_t* raw_arrays = nullptr;
+
+ const OatDexFile* const oat_dex = dex_file->GetOatDexFile();
+ if (oat_dex != nullptr && oat_dex->GetDexCacheArrays() != nullptr) {
+ raw_arrays = oat_dex->GetDexCacheArrays();
+ } else if (dex_file->NumStringIds() != 0u ||
+ dex_file->NumTypeIds() != 0u ||
+ dex_file->NumMethodIds() != 0u ||
+ dex_file->NumFieldIds() != 0u) {
+ // Zero-initialized.
+ raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
+ }
+
+ mirror::StringDexCacheType* strings = (dex_file->NumStringIds() == 0u) ? nullptr :
+ reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset());
+ GcRoot<mirror::Class>* types = (dex_file->NumTypeIds() == 0u) ? nullptr :
+ reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset());
+ ArtMethod** methods = (dex_file->NumMethodIds() == 0u) ? nullptr :
+ reinterpret_cast<ArtMethod**>(raw_arrays + layout.MethodsOffset());
+ ArtField** fields = (dex_file->NumFieldIds() == 0u) ? nullptr :
+ reinterpret_cast<ArtField**>(raw_arrays + layout.FieldsOffset());
+
+ size_t num_strings = mirror::DexCache::kDexCacheStringCacheSize;
+ if (dex_file->NumStringIds() < num_strings) {
+ num_strings = dex_file->NumStringIds();
+ }
+
+ // Note that we allocate the method type dex caches regardless of this flag,
+ // and we make sure here that they're not used by the runtime. This is in the
+ // interest of simplicity and to avoid extensive compiler and layout class changes.
+ //
+ // If this needs to be mitigated in a production system running this code,
+ // DexCache::kDexCacheMethodTypeCacheSize can be set to zero.
+ mirror::MethodTypeDexCacheType* method_types = nullptr;
+ size_t num_method_types = 0;
+
+ if (dex_file->NumProtoIds() < mirror::DexCache::kDexCacheMethodTypeCacheSize) {
+ num_method_types = dex_file->NumProtoIds();
+ } else {
+ num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
+ }
+
+ if (num_method_types > 0) {
+ method_types = reinterpret_cast<mirror::MethodTypeDexCacheType*>(
+ raw_arrays + layout.MethodTypesOffset());
+ }
+
+ DCHECK_ALIGNED(raw_arrays, alignof(mirror::StringDexCacheType)) <<
+ "Expected raw_arrays to align to StringDexCacheType.";
+ DCHECK_ALIGNED(layout.StringsOffset(), alignof(mirror::StringDexCacheType)) <<
+ "Expected StringsOffset() to align to StringDexCacheType.";
+ DCHECK_ALIGNED(strings, alignof(mirror::StringDexCacheType)) <<
+ "Expected strings to align to StringDexCacheType.";
+ static_assert(alignof(mirror::StringDexCacheType) == 8u,
+ "Expected StringDexCacheType to have align of 8.");
+ if (kIsDebugBuild) {
+ // Sanity check to make sure all the dex cache arrays are empty. b/28992179
+ for (size_t i = 0; i < num_strings; ++i) {
+ CHECK_EQ(strings[i].load(std::memory_order_relaxed).index, 0u);
+ CHECK(strings[i].load(std::memory_order_relaxed).object.IsNull());
+ }
+ for (size_t i = 0; i < dex_file->NumTypeIds(); ++i) {
+ CHECK(types[i].IsNull());
+ }
+ for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
+ CHECK(mirror::DexCache::GetElementPtrSize(methods, i, image_pointer_size) == nullptr);
+ }
+ for (size_t i = 0; i < dex_file->NumFieldIds(); ++i) {
+ CHECK(mirror::DexCache::GetElementPtrSize(fields, i, image_pointer_size) == nullptr);
+ }
+ for (size_t i = 0; i < num_method_types; ++i) {
+ CHECK_EQ(method_types[i].load(std::memory_order_relaxed).index, 0u);
+ CHECK(method_types[i].load(std::memory_order_relaxed).object.IsNull());
+ }
+ }
+ if (strings != nullptr) {
+ mirror::StringDexCachePair::Initialize(strings);
+ }
+ if (method_types != nullptr) {
+ mirror::MethodTypeDexCachePair::Initialize(method_types);
+ }
+ dex_cache->Init(dex_file,
+ location,
+ strings,
+ num_strings,
+ types,
+ dex_file->NumTypeIds(),
+ methods,
+ dex_file->NumMethodIds(),
+ fields,
+ dex_file->NumFieldIds(),
+ method_types,
+ num_method_types,
+ image_pointer_size);
+}
+
void DexCache::Init(const DexFile* dex_file,
ObjPtr<String> location,
StringDexCacheType* strings,
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index cc4d01a..ec265e5 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -31,6 +31,8 @@
class DexFile;
class ImageWriter;
union JValue;
+class LinearAlloc;
+class Thread;
namespace mirror {
@@ -137,19 +139,14 @@
return sizeof(DexCache);
}
- void Init(const DexFile* dex_file,
- ObjPtr<String> location,
- StringDexCacheType* strings,
- uint32_t num_strings,
- GcRoot<Class>* resolved_types,
- uint32_t num_resolved_types,
- ArtMethod** resolved_methods,
- uint32_t num_resolved_methods,
- ArtField** resolved_fields,
- uint32_t num_resolved_fields,
- MethodTypeDexCacheType* resolved_methodtypes,
- uint32_t num_resolved_methodtypes,
- PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void InitializeDexCache(Thread* self,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::String> location,
+ const DexFile* dex_file,
+ LinearAlloc* linear_alloc,
+ PointerSize image_pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::dex_lock_);
void Fixup(ArtMethod* trampoline, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -339,6 +336,21 @@
static void SetElementPtrSize(PtrType* ptr_array, size_t idx, PtrType ptr, PointerSize ptr_size);
private:
+ void Init(const DexFile* dex_file,
+ ObjPtr<String> location,
+ StringDexCacheType* strings,
+ uint32_t num_strings,
+ GcRoot<Class>* resolved_types,
+ uint32_t num_resolved_types,
+ ArtMethod** resolved_methods,
+ uint32_t num_resolved_methods,
+ ArtField** resolved_fields,
+ uint32_t num_resolved_fields,
+ MethodTypeDexCacheType* resolved_methodtypes,
+ uint32_t num_resolved_methodtypes,
+ PointerSize pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Visit instance fields of the dex cache as well as its associated arrays.
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
diff --git a/runtime/mirror/method_handle_impl.cc b/runtime/mirror/method_handle_impl.cc
index fdfaaa8..4f1c448 100644
--- a/runtime/mirror/method_handle_impl.cc
+++ b/runtime/mirror/method_handle_impl.cc
@@ -22,6 +22,12 @@
namespace art {
namespace mirror {
+mirror::Class* MethodHandle::StaticClass() {
+ mirror::Class* klass = MethodHandleImpl::StaticClass()->GetSuperClass();
+ DCHECK(klass->DescriptorEquals("Ljava/lang/invoke/MethodHandle;"));
+ return klass;
+}
+
GcRoot<mirror::Class> MethodHandleImpl::static_class_;
void MethodHandleImpl::SetClass(Class* klass) {
diff --git a/runtime/mirror/method_handle_impl.h b/runtime/mirror/method_handle_impl.h
index 9054216..5ea82b5 100644
--- a/runtime/mirror/method_handle_impl.h
+++ b/runtime/mirror/method_handle_impl.h
@@ -57,6 +57,8 @@
return static_cast<MethodHandleKind>(handle_kind);
}
+ static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
HeapReference<mirror::MethodType> nominal_type_;
HeapReference<mirror::MethodType> method_type_;
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index a1110d9..ae6b31d 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -67,10 +67,15 @@
// Set by the verifier for a method that could not be verified to follow structured locking.
static constexpr uint32_t kAccMustCountLocks = 0x02000000; // method (runtime)
-// Set to indicate that the ArtMethod is obsolete and has a different DexCache from it's declaring
+// Set to indicate that the ArtMethod is obsolete and has a different DexCache from its declaring
// class.
// TODO Might want to re-arrange some of these so that we can have obsolete + intrinsic methods.
static constexpr uint32_t kAccObsoleteMethod = 0x04000000; // method (runtime)
+
+// Set by the class linker for a method that has only one implementation for a
+// virtual call.
+static constexpr uint32_t kAccSingleImplementation = 0x08000000; // method (runtime)
+
static constexpr uint32_t kAccIntrinsic = 0x80000000; // method (runtime)
// Special runtime-only flags.
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index adf35b6..67b2e1c 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -177,8 +177,22 @@
}
static void VMDebug_printLoadedClasses(JNIEnv* env, jclass, jint flags) {
+ class DumpClassVisitor : public ClassVisitor {
+ public:
+ explicit DumpClassVisitor(int dump_flags) : flags_(dump_flags) {}
+
+ bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ klass->DumpClass(LOG_STREAM(ERROR), flags_);
+ return true;
+ }
+
+ private:
+ const int flags_;
+ };
+ DumpClassVisitor visitor(flags);
+
ScopedFastNativeObjectAccess soa(env);
- return Runtime::Current()->GetClassLinker()->DumpAllClasses(flags);
+ return Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
}
static jint VMDebug_getLoadedClassCount(JNIEnv* env, jclass) {
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 2376889..5565565 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -272,7 +272,7 @@
if (code == 0) {
return pc == 0;
}
- uintptr_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
+ uintptr_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].GetCodeSize();
return code <= pc && pc <= (code + code_size);
}
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index d7d0c4f..721fab9 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -44,7 +44,7 @@
if (method_header == nullptr) {
return 0u;
}
- return reinterpret_cast<const uint8_t*>(&method_header->code_size_) - begin_;
+ return reinterpret_cast<const uint8_t*>(method_header->GetCodeSizeAddr()) - begin_;
}
inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
@@ -52,7 +52,7 @@
if (code == nullptr) {
return 0u;
}
- return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].frame_info_.FrameSizeInBytes();
+ return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].GetFrameInfo().FrameSizeInBytes();
}
inline uint32_t OatFile::OatMethod::GetCoreSpillMask() const {
@@ -60,7 +60,7 @@
if (code == nullptr) {
return 0u;
}
- return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].frame_info_.CoreSpillMask();
+ return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].GetFrameInfo().CoreSpillMask();
}
inline uint32_t OatFile::OatMethod::GetFpSpillMask() const {
@@ -68,7 +68,7 @@
if (code == nullptr) {
return 0u;
}
- return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].frame_info_.FpSpillMask();
+ return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].GetFrameInfo().FpSpillMask();
}
inline uint32_t OatFile::OatMethod::GetVmapTableOffset() const {
@@ -81,7 +81,7 @@
if (method_header == nullptr) {
return 0u;
}
- return reinterpret_cast<const uint8_t*>(&method_header->vmap_table_offset_) - begin_;
+ return reinterpret_cast<const uint8_t*>(method_header->GetVmapTableOffsetAddr()) - begin_;
}
inline const uint8_t* OatFile::OatMethod::GetVmapTable() const {
@@ -89,7 +89,7 @@
if (code == nullptr) {
return nullptr;
}
- uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].vmap_table_offset_;
+ uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].GetVmapTableOffset();
if (UNLIKELY(offset == 0u)) {
return nullptr;
}
@@ -101,7 +101,7 @@
if (code == nullptr) {
return 0u;
}
- return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
+ return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].GetCodeSize();
}
inline uint32_t OatFile::OatMethod::GetCodeOffset() const {
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 4d1e1ea..6a62a16 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -314,7 +314,7 @@
const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(
dex_location_.c_str(), dex_checksum_pointer, &error_msg);
if (oat_dex_file == nullptr) {
- VLOG(oat) << error_msg;
+ LOG(ERROR) << error_msg;
return kOatDexOutOfDate;
}
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 4afca7d..3cdde5a 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -58,7 +58,7 @@
}
bool IsOptimized() const {
- return code_size_ != 0 && vmap_table_offset_ != 0;
+ return GetCodeSize() != 0 && vmap_table_offset_ != 0;
}
const void* GetOptimizedCodeInfoPtr() const {
@@ -81,7 +81,23 @@
}
uint32_t GetCodeSize() const {
- return code_size_;
+ return code_size_ & kCodeSizeMask;
+ }
+
+ const uint32_t* GetCodeSizeAddr() const {
+ return &code_size_;
+ }
+
+ uint32_t GetVmapTableOffset() const {
+ return vmap_table_offset_;
+ }
+
+ void SetVmapTableOffset(uint32_t offset) {
+ vmap_table_offset_ = offset;
+ }
+
+ const uint32_t* GetVmapTableOffsetAddr() const {
+ return &vmap_table_offset_;
}
const uint8_t* GetVmapTable() const {
@@ -96,7 +112,7 @@
// On Thumb-2, the pc is offset by one.
code_start++;
}
- return code_start <= pc && pc <= (code_start + code_size_);
+ return code_start <= pc && pc <= (code_start + GetCodeSize());
}
const uint8_t* GetEntryPoint() const {
@@ -130,11 +146,25 @@
uint32_t ToDexPc(ArtMethod* method, const uintptr_t pc, bool abort_on_failure = true) const;
+ void SetHasShouldDeoptimizeFlag() {
+ DCHECK_EQ(code_size_ & kShouldDeoptimizeMask, 0u);
+ code_size_ |= kShouldDeoptimizeMask;
+ }
+
+ bool HasShouldDeoptimizeFlag() const {
+ return (code_size_ & kShouldDeoptimizeMask) != 0;
+ }
+
+ private:
+ static constexpr uint32_t kShouldDeoptimizeMask = 0x80000000;
+ static constexpr uint32_t kCodeSizeMask = ~kShouldDeoptimizeMask;
+
// The offset in bytes from the start of the vmap table to the end of the header.
uint32_t vmap_table_offset_;
// The stack frame information.
QuickMethodFrameInfo frame_info_;
- // The code size in bytes.
+ // The code size in bytes. The highest bit is used to signify if the compiled
+ // code with the method header has should_deoptimize flag.
uint32_t code_size_;
// The actual code.
uint8_t code_[0];
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index d1c2293..1ad3f08 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -1142,6 +1142,34 @@
return RetransformClassesWithHook(reinterpret_cast<ArtJvmTiEnv*>(env), classes, hook);
}
+ static jvmtiError RedefineClassDirect(ArtJvmTiEnv* env,
+ jclass klass,
+ jint dex_size,
+ unsigned char* dex_file) {
+ if (!IsValidEnv(env)) {
+ return ERR(INVALID_ENVIRONMENT);
+ }
+ jvmtiError ret = OK;
+ std::string location;
+ if ((ret = GetClassLocation(env, klass, &location)) != OK) {
+ // TODO Do something more here? Maybe give log statements?
+ return ret;
+ }
+ std::string error;
+ ret = Redefiner::RedefineClass(env,
+ art::Runtime::Current(),
+ art::Thread::Current(),
+ klass,
+ location,
+ dex_size,
+ reinterpret_cast<uint8_t*>(dex_file),
+ &error);
+ if (ret != OK) {
+ LOG(ERROR) << "FAILURE TO REDEFINE " << error;
+ }
+ return ret;
+ }
+
// TODO This will be called by the event handler for the art::ti Event Load Event
static jvmtiError RetransformClassesWithHook(ArtJvmTiEnv* env,
const std::vector<jclass>& classes,
@@ -1252,7 +1280,10 @@
reinterpret_cast<void*>(JvmtiFunctions::RetransformClassWithHook),
// nullptr, // reserved1
JvmtiFunctions::SetEventNotificationMode,
- nullptr, // reserved3
+ // SPECIAL FUNCTION: RedefineClassDirect Is normally reserved3
+ // TODO Remove once we have events working.
+ reinterpret_cast<void*>(JvmtiFunctions::RedefineClassDirect),
+ // nullptr, // reserved3
JvmtiFunctions::GetAllThreads,
JvmtiFunctions::SuspendThread,
JvmtiFunctions::ResumeThread,
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index 69bd887..d0349b9 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -66,7 +66,8 @@
return map;
}
memcpy(map->Begin(), dex_data, data_len);
- // Make the dex files mmap read only.
+ // Make the dex files mmap read only. This matches how other DexFiles are mmaped and prevents
+ // programs from corrupting it.
map->Protect(PROT_READ);
return map;
}
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index f3a5834..c819acd 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -68,7 +68,7 @@
public:
// Redefine the given class with the given dex data. Note this function does not take ownership of
// the dex_data pointer. It is not used after this call however and may be freed if desired.
- // The caller is responsible for freeing it. The runtime makes it's own copy of the data.
+ // The caller is responsible for freeing it. The runtime makes its own copy of the data.
static jvmtiError RedefineClass(ArtJvmTiEnv* env,
art::Runtime* runtime,
art::Thread* self,
@@ -146,6 +146,7 @@
// This will check that no constraints are violated (more than 1 class in dex file, any changes in
// number/declaration of methods & fields, changes in access flags, etc.)
bool EnsureRedefinitionIsValid() {
+ LOG(WARNING) << "Redefinition is not checked for validity currently";
return true;
}
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
index f7b8b92..f545125 100644
--- a/runtime/openjdkjvmti/transform.cc
+++ b/runtime/openjdkjvmti/transform.cc
@@ -58,6 +58,21 @@
namespace openjdkjvmti {
+jvmtiError GetClassLocation(ArtJvmTiEnv* env, jclass klass, /*out*/std::string* location) {
+ JNIEnv* jni_env = nullptr;
+ jint ret = env->art_vm->GetEnv(reinterpret_cast<void**>(&jni_env), JNI_VERSION_1_1);
+ if (ret != JNI_OK) {
+ // TODO Different error might be better?
+ return ERR(INTERNAL);
+ }
+ art::ScopedObjectAccess soa(jni_env);
+ art::StackHandleScope<1> hs(art::Thread::Current());
+ art::Handle<art::mirror::Class> hs_klass(hs.NewHandle(soa.Decode<art::mirror::Class>(klass)));
+ const art::DexFile& dex = hs_klass->GetDexFile();
+ *location = dex.GetLocation();
+ return OK;
+}
+
// TODO Move this function somewhere more appropriate.
// Gets the data surrounding the given class.
jvmtiError GetTransformationData(ArtJvmTiEnv* env,
diff --git a/runtime/openjdkjvmti/transform.h b/runtime/openjdkjvmti/transform.h
index 35b990b..0ad5099 100644
--- a/runtime/openjdkjvmti/transform.h
+++ b/runtime/openjdkjvmti/transform.h
@@ -41,6 +41,8 @@
namespace openjdkjvmti {
+jvmtiError GetClassLocation(ArtJvmTiEnv* env, jclass klass, /*out*/std::string* location);
+
// Gets the data surrounding the given class.
jvmtiError GetTransformationData(ArtJvmTiEnv* env,
jclass klass,
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 68b956b..66bb803 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -62,6 +62,7 @@
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
+#include "cha.h"
#include "class_linker-inl.h"
#include "compiler_callbacks.h"
#include "debugger.h"
@@ -349,6 +350,7 @@
delete monitor_list_;
delete monitor_pool_;
delete class_linker_;
+ delete cha_;
delete heap_;
delete intern_table_;
delete oat_file_manager_;
@@ -1203,6 +1205,7 @@
CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U);
class_linker_ = new ClassLinker(intern_table_);
+ cha_ = new ClassHierarchyAnalysis;
if (GetHeap()->HasBootImageSpace()) {
bool result = class_linker_->InitFromBootImage(&error_msg);
if (!result) {
@@ -1733,9 +1736,24 @@
}
}
+static ArtMethod* CreateRuntimeMethod(ClassLinker* class_linker, LinearAlloc* linear_alloc) {
+ const PointerSize image_pointer_size = class_linker->GetImagePointerSize();
+ const size_t method_alignment = ArtMethod::Alignment(image_pointer_size);
+ const size_t method_size = ArtMethod::Size(image_pointer_size);
+ LengthPrefixedArray<ArtMethod>* method_array = class_linker->AllocArtMethodArray(
+ Thread::Current(),
+ linear_alloc,
+ 1);
+ ArtMethod* method = &method_array->At(0, method_size, method_alignment);
+ CHECK(method != nullptr);
+ method->SetDexMethodIndex(DexFile::kDexNoIndex);
+ CHECK(method->IsRuntimeMethod());
+ return method;
+}
+
ArtMethod* Runtime::CreateImtConflictMethod(LinearAlloc* linear_alloc) {
ClassLinker* const class_linker = GetClassLinker();
- ArtMethod* method = class_linker->CreateRuntimeMethod(linear_alloc);
+ ArtMethod* method = CreateRuntimeMethod(class_linker, linear_alloc);
// When compiling, the code pointer will get set later when the image is loaded.
const PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
if (IsAotCompiler()) {
@@ -1756,7 +1774,7 @@
}
ArtMethod* Runtime::CreateResolutionMethod() {
- auto* method = GetClassLinker()->CreateRuntimeMethod(GetLinearAlloc());
+ auto* method = CreateRuntimeMethod(GetClassLinker(), GetLinearAlloc());
// When compiling, the code pointer will get set later when the image is loaded.
if (IsAotCompiler()) {
PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
@@ -1768,7 +1786,7 @@
}
ArtMethod* Runtime::CreateCalleeSaveMethod() {
- auto* method = GetClassLinker()->CreateRuntimeMethod(GetLinearAlloc());
+ auto* method = CreateRuntimeMethod(GetClassLinker(), GetLinearAlloc());
PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
DCHECK_NE(instruction_set_, kNone);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 4f31887..e6b3128 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -76,6 +76,7 @@
} // namespace verifier
class ArenaPool;
class ArtMethod;
+class ClassHierarchyAnalysis;
class ClassLinker;
class Closure;
class CompilerCallbacks;
@@ -674,6 +675,10 @@
void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
+ ClassHierarchyAnalysis* GetClassHierarchyAnalysis() {
+ return cha_;
+ }
+
NO_RETURN
static void Aborter(const char* abort_message);
@@ -921,6 +926,8 @@
// Generic system-weak holders.
std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
+ ClassHierarchyAnalysis* cha_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 167a30b..f20aa20 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -648,7 +648,7 @@
return;
}
- uint32_t code_size = OatQuickMethodHeader::FromEntryPoint(code)->code_size_;
+ uint32_t code_size = OatQuickMethodHeader::FromEntryPoint(code)->GetCodeSize();
uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
CHECK(code_start <= pc && pc <= (code_start + code_size))
<< method->PrettyMethod()
diff --git a/runtime/stack.h b/runtime/stack.h
index e879214..d02e4b7 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -66,6 +66,11 @@
struct ShadowFrameDeleter;
using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>;
+// Size in bytes of the should_deoptimize flag on stack.
+// We just need 4 bytes for our purpose regardless of the architecture. Frame size
+// calculation will automatically do alignment for the final frame size.
+static constexpr size_t kShouldDeoptimizeFlagSize = 4;
+
// Counting locks by storing object pointers into a vector. Duplicate entries mark recursive locks.
// The vector will be visited with the ShadowFrame during GC (so all the locked-on objects are
// thread roots).
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 1283cf0..17f5513 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1394,6 +1394,7 @@
os << " | group=\"" << group_name << "\""
<< " sCount=" << thread->tls32_.suspend_count
<< " dsCount=" << thread->tls32_.debug_suspend_count
+ << " flags=" << thread->tls32_.state_and_flags.as_struct.flags
<< " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer)
<< " self=" << reinterpret_cast<const void*>(thread) << "\n";
}
@@ -3175,4 +3176,8 @@
tlsPtr_.exception = new_exception.Ptr();
}
+bool Thread::IsAotCompiler() {
+ return Runtime::Current()->IsAotCompiler();
+}
+
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index 35226f2..b80fdc7 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -33,13 +33,13 @@
#include "base/mutex.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc_root.h"
#include "globals.h"
#include "handle_scope.h"
#include "instrumentation.h"
#include "jvalue.h"
#include "object_callbacks.h"
#include "offsets.h"
-#include "runtime.h"
#include "runtime_stats.h"
#include "stack.h"
#include "thread_state.h"
@@ -87,7 +87,6 @@
class JavaVMExt;
struct JNIEnvExt;
class Monitor;
-class Runtime;
class ScopedObjectAccessAlreadyRunnable;
class ShadowFrame;
class SingleStepControl;
@@ -949,17 +948,17 @@
}
std::vector<ArtMethod*>* GetStackTraceSample() const {
- DCHECK(!Runtime::Current()->IsAotCompiler());
+ DCHECK(!IsAotCompiler());
return tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
}
void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
- DCHECK(!Runtime::Current()->IsAotCompiler());
+ DCHECK(!IsAotCompiler());
tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample = sample;
}
verifier::VerifierDeps* GetVerifierDeps() const {
- DCHECK(Runtime::Current()->IsAotCompiler());
+ DCHECK(IsAotCompiler());
return tlsPtr_.deps_or_stack_trace_sample.verifier_deps;
}
@@ -967,7 +966,7 @@
// entry in the thread is cleared before destruction of the actual VerifierDeps
// object, or the thread.
void SetVerifierDeps(verifier::VerifierDeps* verifier_deps) {
- DCHECK(Runtime::Current()->IsAotCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(verifier_deps == nullptr || tlsPtr_.deps_or_stack_trace_sample.verifier_deps == nullptr);
tlsPtr_.deps_or_stack_trace_sample.verifier_deps = verifier_deps;
}
@@ -1246,6 +1245,8 @@
// Install the protected region for implicit stack checks.
void InstallImplicitProtection();
+ static bool IsAotCompiler();
+
// 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
// change from being Suspended to Runnable without a suspend request occurring.
union PACKED(4) StateAndFlags {
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 27fb37a..a6bd83d 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -375,7 +375,7 @@
return count;
}
-size_t ThreadList::RunEmptyCheckpoint() {
+size_t ThreadList::RunEmptyCheckpoint(std::vector<uint32_t>& runnable_thread_ids) {
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertNotExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
@@ -392,6 +392,9 @@
// This thread will run an empty checkpoint (decrement the empty checkpoint barrier)
// some time in the near future.
++count;
+ if (kIsDebugBuild) {
+ runnable_thread_ids.push_back(thread->GetThreadId());
+ }
break;
}
if (thread->GetState() != kRunnable) {
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 133d430..1acabcb 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -27,6 +27,7 @@
#include <bitset>
#include <list>
+#include <vector>
namespace art {
namespace gc {
@@ -106,7 +107,9 @@
// in-flight mutator heap access (eg. a read barrier.) Runnable threads will respond by
// decrementing the empty checkpoint barrier count. This works even when the weak ref access is
// disabled. Only one concurrent use is currently supported.
- size_t RunEmptyCheckpoint()
+ // In debug build, runnable_thread_ids will be populated with the thread IDS of the runnable
+ // thread to wait for.
+ size_t RunEmptyCheckpoint(std::vector<uint32_t>& runnable_thread_ids)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function)
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 843be92..dabf8c8 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -35,10 +35,12 @@
return (memcmp(version_, kVdexVersion, sizeof(kVdexVersion)) == 0);
}
-VdexFile::Header::Header(uint32_t dex_size,
+VdexFile::Header::Header(uint32_t number_of_dex_files,
+ uint32_t dex_size,
uint32_t verifier_deps_size,
uint32_t quickening_info_size)
- : dex_size_(dex_size),
+ : number_of_dex_files_(number_of_dex_files),
+ dex_size_(dex_size),
verifier_deps_size_(verifier_deps_size),
quickening_info_size_(quickening_info_size) {
memcpy(magic_, kVdexMagic, sizeof(kVdexMagic));
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 75a0d5e..3b114a9 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -43,7 +43,10 @@
public:
struct Header {
public:
- Header(uint32_t dex_size, uint32_t verifier_deps_size, uint32_t quickening_info_size);
+ Header(uint32_t number_of_dex_files_,
+ uint32_t dex_size,
+ uint32_t verifier_deps_size,
+ uint32_t quickening_info_size);
const char* GetMagic() const { return reinterpret_cast<const char*>(magic_); }
const char* GetVersion() const { return reinterpret_cast<const char*>(version_); }
@@ -54,18 +57,22 @@
uint32_t GetDexSize() const { return dex_size_; }
uint32_t GetVerifierDepsSize() const { return verifier_deps_size_; }
uint32_t GetQuickeningInfoSize() const { return quickening_info_size_; }
+ uint32_t GetNumberOfDexFiles() const { return number_of_dex_files_; }
private:
static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' };
- static constexpr uint8_t kVdexVersion[] = { '0', '0', '0', '\0' };
+ static constexpr uint8_t kVdexVersion[] = { '0', '0', '1', '\0' };
uint8_t magic_[4];
uint8_t version_[4];
+ uint32_t number_of_dex_files_;
uint32_t dex_size_;
uint32_t verifier_deps_size_;
uint32_t quickening_info_size_;
};
+ typedef uint32_t VdexChecksum;
+
static VdexFile* Open(const std::string& vdex_filename,
bool writable,
bool low_4gb,
@@ -88,7 +95,7 @@
ArrayRef<const uint8_t> GetVerifierDepsData() const {
return ArrayRef<const uint8_t>(
- Begin() + sizeof(Header) + GetHeader().GetDexSize(), GetHeader().GetVerifierDepsSize());
+ DexBegin() + GetHeader().GetDexSize(), GetHeader().GetVerifierDepsSize());
}
ArrayRef<const uint8_t> GetQuickeningInfo() const {
@@ -107,6 +114,12 @@
// is none.
const uint8_t* GetNextDexFileData(const uint8_t* cursor) const;
+ // Get the location checksum of the dex file number `dex_file_index`.
+ uint32_t GetLocationChecksum(uint32_t dex_file_index) const {
+ DCHECK_LT(dex_file_index, GetHeader().GetNumberOfDexFiles());
+ return reinterpret_cast<const uint32_t*>(Begin() + sizeof(Header))[dex_file_index];
+ }
+
private:
explicit VdexFile(MemMap* mmap) : mmap_(mmap) {}
@@ -115,11 +128,15 @@
}
const uint8_t* DexBegin() const {
- return Begin() + sizeof(Header);
+ return Begin() + sizeof(Header) + GetSizeOfChecksumsSection();
}
const uint8_t* DexEnd() const {
- return Begin() + sizeof(Header) + GetHeader().GetDexSize();
+ return DexBegin() + GetHeader().GetDexSize();
+ }
+
+ size_t GetSizeOfChecksumsSection() const {
+ return sizeof(VdexChecksum) * GetHeader().GetNumberOfDexFiles();
}
std::unique_ptr<MemMap> mmap_;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 7137db8..ebecc85 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -41,6 +41,7 @@
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
+#include "mirror/method_handle_impl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "reg_type-inl.h"
@@ -410,15 +411,15 @@
result.kind = kSoftFailure;
if (method != nullptr &&
!CanCompilerHandleVerificationFailure(verifier.encountered_failure_types_)) {
- method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother);
+ method->AddAccessFlags(kAccCompileDontBother);
}
}
if (method != nullptr) {
if (verifier.HasInstructionThatWillThrow()) {
- method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother);
+ method->AddAccessFlags(kAccCompileDontBother);
}
if ((verifier.encountered_failure_types_ & VerifyError::VERIFY_ERROR_LOCKING) != 0) {
- method->SetAccessFlags(method->GetAccessFlags() | kAccMustCountLocks);
+ method->AddAccessFlags(kAccMustCountLocks);
}
}
} else {
@@ -1184,6 +1185,11 @@
result = result && CheckWideRegisterIndex(inst->VRegC());
break;
}
+ switch (inst->GetVerifyTypeArgumentH()) {
+ case Instruction::kVerifyRegHPrototype:
+ result = result && CheckPrototypeIndex(inst->VRegH());
+ break;
+ }
switch (inst->GetVerifyExtraFlags()) {
case Instruction::kVerifyArrayData:
result = result && CheckArrayData(code_offset);
@@ -1289,6 +1295,15 @@
return true;
}
+inline bool MethodVerifier::CheckPrototypeIndex(uint32_t idx) {
+ if (idx >= dex_file_->GetHeader().proto_ids_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad prototype index " << idx << " (max "
+ << dex_file_->GetHeader().proto_ids_size_ << ")";
+ return false;
+ }
+ return true;
+}
+
inline bool MethodVerifier::CheckStringIndex(uint32_t idx) {
if (idx >= dex_file_->GetHeader().string_ids_size_) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad string index " << idx << " (max "
@@ -2934,7 +2949,7 @@
* allowing the latter only if the "this" argument is the same as the "this" argument to
* this method (which implies that we're in a constructor ourselves).
*/
- const RegType& this_type = work_line_->GetInvocationThis(this, inst, is_range);
+ const RegType& this_type = work_line_->GetInvocationThis(this, inst);
if (this_type.IsConflict()) // failure.
break;
@@ -3015,7 +3030,7 @@
/* Get the type of the "this" arg, which should either be a sub-interface of called
* interface or Object (see comments in RegType::JoinClass).
*/
- const RegType& this_type = work_line_->GetInvocationThis(this, inst, is_range);
+ const RegType& this_type = work_line_->GetInvocationThis(this, inst);
if (this_type.IsZero()) {
/* null pointer always passes (and always fails at runtime) */
} else {
@@ -3057,10 +3072,37 @@
}
case Instruction::INVOKE_POLYMORPHIC:
case Instruction::INVOKE_POLYMORPHIC_RANGE: {
+ bool is_range = (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
+ ArtMethod* called_method = VerifyInvocationArgs(inst, METHOD_POLYMORPHIC, is_range);
+ if (called_method == nullptr) {
+ // Convert potential soft failures in VerifyInvocationArgs() to hard errors.
+ if (failure_messages_.size() > 0) {
+ std::string message = failure_messages_.back()->str();
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << message;
+ } else {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke-polymorphic verification failure.";
+ }
+ break;
+ }
+ if (!CheckSignaturePolymorphicMethod(called_method) ||
+ !CheckSignaturePolymorphicReceiver(inst)) {
+ break;
+ }
+ const uint32_t proto_idx = (is_range) ? inst->VRegH_4rcc() : inst->VRegH_45cc();
+ const char* descriptor =
+ dex_file_->GetReturnTypeDescriptor(dex_file_->GetProtoId(proto_idx));
+ const RegType& return_type =
+ reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
+ if (!return_type.IsLowHalf()) {
+ work_line_->SetResultRegisterType(this, return_type);
+ } else {
+ work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(®_types_));
+ }
+ // TODO(oth): remove when compiler support is available.
Fail(VERIFY_ERROR_FORCE_INTERPRETER)
- << "instruction is not supported by verifier; skipping verification";
+ << "invoke-polymorphic is not supported by compiler";
have_pending_experimental_failure_ = true;
- return false;
+ break;
}
case Instruction::NEG_INT:
case Instruction::NOT_INT:
@@ -3416,8 +3458,6 @@
work_line_->SetResultTypeToUnknown(this);
}
-
-
/*
* Handle "branch". Tag the branch target.
*
@@ -3740,7 +3780,8 @@
} else if (method_type == METHOD_SUPER && is_interface) {
return kInterfaceMethodResolution;
} else {
- DCHECK(method_type == METHOD_VIRTUAL || method_type == METHOD_SUPER);
+ DCHECK(method_type == METHOD_VIRTUAL || method_type == METHOD_SUPER
+ || method_type == METHOD_POLYMORPHIC);
return kVirtualMethodResolution;
}
}
@@ -3868,15 +3909,18 @@
return nullptr;
}
// See if the method type implied by the invoke instruction matches the access flags for the
- // target method.
+ // target method. The flags for METHOD_POLYMORPHIC are based on there being precisely two
+ // signature polymorphic methods supported by the run-time which are native methods with variable
+ // arguments.
if ((method_type == METHOD_DIRECT && (!res_method->IsDirect() || res_method->IsStatic())) ||
(method_type == METHOD_STATIC && !res_method->IsStatic()) ||
((method_type == METHOD_SUPER ||
method_type == METHOD_VIRTUAL ||
- method_type == METHOD_INTERFACE) && res_method->IsDirect())
- ) {
+ method_type == METHOD_INTERFACE) && res_method->IsDirect()) ||
+ ((method_type == METHOD_POLYMORPHIC) &&
+ (!res_method->IsNative() || !res_method->IsVarargs()))) {
Fail(VERIFY_ERROR_CLASS_CHANGE) << "invoke type (" << method_type << ") does not match method "
- " type of " << res_method->PrettyMethod();
+ "type of " << res_method->PrettyMethod();
return nullptr;
}
return res_method;
@@ -3888,20 +3932,18 @@
// We use vAA as our expected arg count, rather than res_method->insSize, because we need to
// match the call to the signature. Also, we might be calling through an abstract method
// definition (which doesn't have register count values).
- const size_t expected_args = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
+ const size_t expected_args = inst->VRegA();
/* caught by static verifier */
DCHECK(is_range || expected_args <= 5);
- if (expected_args > code_item_->outs_size_) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid argument count (" << expected_args
- << ") exceeds outsSize (" << code_item_->outs_size_ << ")";
- return nullptr;
- }
- uint32_t arg[5];
- if (!is_range) {
- inst->GetVarArgs(arg);
+ // TODO(oth): Enable this path for invoke-polymorphic when b/33099829 is resolved.
+ if (method_type != METHOD_POLYMORPHIC) {
+ if (expected_args > code_item_->outs_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid argument count (" << expected_args
+ << ") exceeds outsSize (" << code_item_->outs_size_ << ")";
+ return nullptr;
+ }
}
- uint32_t sig_registers = 0;
/*
* Check the "this" argument, which must be an instance of the class that declared the method.
@@ -3909,7 +3951,7 @@
* rigorous check here (which is okay since we have to do it at runtime).
*/
if (method_type != METHOD_STATIC) {
- const RegType& actual_arg_type = work_line_->GetInvocationThis(this, inst, is_range);
+ const RegType& actual_arg_type = work_line_->GetInvocationThis(this, inst);
if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
CHECK(have_pending_hard_failure_);
return nullptr;
@@ -3945,7 +3987,7 @@
res_method_class = &FromClass(klass->GetDescriptor(&temp), klass,
klass->CannotBeAssignedFromOtherTypes());
} else {
- const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ const uint32_t method_idx = inst->VRegB();
const dex::TypeIndex class_idx = dex_file_->GetMethodId(method_idx).class_idx_;
res_method_class = ®_types_.FromDescriptor(
GetClassLoader(),
@@ -3965,13 +4007,17 @@
}
}
}
- sig_registers = 1;
}
+ uint32_t arg[5];
+ if (!is_range) {
+ inst->GetVarArgs(arg);
+ }
+ uint32_t sig_registers = (method_type == METHOD_STATIC) ? 0 : 1;
for ( ; it->HasNext(); it->Next()) {
if (sig_registers >= expected_args) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation, expected " << inst->VRegA() <<
- " arguments, found " << sig_registers << " or more.";
+ " argument registers, method signature has " << sig_registers + 1 << " or more";
return nullptr;
}
@@ -3984,7 +4030,7 @@
}
const RegType& reg_type = reg_types_.FromDescriptor(GetClassLoader(), param_descriptor, false);
- uint32_t get_reg = is_range ? inst->VRegC_3rc() + static_cast<uint32_t>(sig_registers) :
+ uint32_t get_reg = is_range ? inst->VRegC() + static_cast<uint32_t>(sig_registers) :
arg[sig_registers];
if (reg_type.IsIntegralTypes()) {
const RegType& src_type = work_line_->GetRegisterType(this, get_reg);
@@ -4020,7 +4066,7 @@
}
if (expected_args != sig_registers) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation, expected " << expected_args <<
- " arguments, found " << sig_registers;
+ " argument registers, method signature has " << sig_registers;
return nullptr;
}
return res_method;
@@ -4032,11 +4078,10 @@
// As the method may not have been resolved, make this static check against what we expect.
// The main reason for this code block is to fail hard when we find an illegal use, e.g.,
// wrong number of arguments or wrong primitive types, even if the method could not be resolved.
- const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ const uint32_t method_idx = inst->VRegB();
DexFileParameterIterator it(*dex_file_,
dex_file_->GetProtoId(dex_file_->GetMethodId(method_idx).proto_idx_));
- VerifyInvocationArgsFromIterator<DexFileParameterIterator>(&it, inst, method_type, is_range,
- nullptr);
+ VerifyInvocationArgsFromIterator(&it, inst, method_type, is_range, nullptr);
}
class MethodParamListDescriptorIterator {
@@ -4069,8 +4114,7 @@
const Instruction* inst, MethodType method_type, bool is_range) {
// Resolve the method. This could be an abstract or concrete method depending on what sort of call
// we're making.
- const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
-
+ const uint32_t method_idx = inst->VRegB();
ArtMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type);
if (res_method == nullptr) { // error or class is unresolved
// Check what we can statically.
@@ -4133,10 +4177,84 @@
}
}
- // Process the target method's signature. This signature may or may not
- MethodParamListDescriptorIterator it(res_method);
- return VerifyInvocationArgsFromIterator<MethodParamListDescriptorIterator>(&it, inst, method_type,
- is_range, res_method);
+ if (method_type == METHOD_POLYMORPHIC) {
+ // Process the signature of the calling site that is invoking the method handle.
+ DexFileParameterIterator it(*dex_file_, dex_file_->GetProtoId(inst->VRegH()));
+ return VerifyInvocationArgsFromIterator(&it, inst, method_type, is_range, res_method);
+ } else {
+ // Process the target method's signature.
+ MethodParamListDescriptorIterator it(res_method);
+ return VerifyInvocationArgsFromIterator(&it, inst, method_type, is_range, res_method);
+ }
+}
+
+bool MethodVerifier::CheckSignaturePolymorphicMethod(ArtMethod* method) {
+ mirror::Class* klass = method->GetDeclaringClass();
+ if (klass != mirror::MethodHandle::StaticClass()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "Signature polymorphic method must be declared in java.lang.invoke.MethodClass";
+ return false;
+ }
+
+ const char* method_name = method->GetName();
+ if (strcmp(method_name, "invoke") != 0 && strcmp(method_name, "invokeExact") != 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "Signature polymorphic method name invalid: " << method_name;
+ return false;
+ }
+
+ const DexFile::TypeList* types = method->GetParameterTypeList();
+ if (types->Size() != 1) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "Signature polymorphic method has too many arguments " << types->Size() << " != 1";
+ return false;
+ }
+
+ const dex::TypeIndex argument_type_index = types->GetTypeItem(0).type_idx_;
+ const char* argument_descriptor = method->GetTypeDescriptorFromTypeIdx(argument_type_index);
+ if (strcmp(argument_descriptor, "[Ljava/lang/Object;") != 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "Signature polymorphic method has unexpected argument type: " << argument_descriptor;
+ return false;
+ }
+
+ const char* return_descriptor = method->GetReturnTypeDescriptor();
+ if (strcmp(return_descriptor, "Ljava/lang/Object;") != 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "Signature polymorphic method has unexpected return type: " << return_descriptor;
+ return false;
+ }
+
+ return true;
+}
+
+bool MethodVerifier::CheckSignaturePolymorphicReceiver(const Instruction* inst) {
+ const RegType& this_type = work_line_->GetInvocationThis(this, inst);
+ if (this_type.IsZero()) {
+ /* null pointer always passes (and always fails at run time) */
+ return true;
+ } else if (!this_type.IsNonZeroReferenceTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "invoke-polymorphic receiver is not a reference: "
+ << this_type;
+ return false;
+ } else if (this_type.IsUninitializedReference()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "invoke-polymorphic receiver is uninitialized: "
+ << this_type;
+ return false;
+ } else if (!this_type.HasClass()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "invoke-polymorphic receiver has no class: "
+ << this_type;
+ return false;
+ } else if (!this_type.GetClass()->IsSubClass(mirror::MethodHandle::StaticClass())) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "invoke-polymorphic receiver is not a subclass of MethodHandle: "
+ << this_type;
+ return false;
+ }
+ return true;
}
ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
@@ -4146,7 +4264,7 @@
} else {
DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_VIRTUAL_QUICK);
}
- const RegType& actual_arg_type = reg_line->GetInvocationThis(this, inst, is_range, allow_failure);
+ const RegType& actual_arg_type = reg_line->GetInvocationThis(this, inst, allow_failure);
if (!actual_arg_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << actual_arg_type << "'";
return nullptr;
@@ -4208,7 +4326,7 @@
// We use vAA as our expected arg count, rather than res_method->insSize, because we need to
// match the call to the signature. Also, we might be calling through an abstract method
// definition (which doesn't have register count values).
- const RegType& actual_arg_type = work_line_->GetInvocationThis(this, inst, is_range);
+ const RegType& actual_arg_type = work_line_->GetInvocationThis(this, inst);
if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
return nullptr;
}
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index f3faecd..fa5a698 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -63,7 +63,8 @@
METHOD_STATIC, // static
METHOD_VIRTUAL, // virtual
METHOD_SUPER, // super
- METHOD_INTERFACE // interface
+ METHOD_INTERFACE, // interface
+ METHOD_POLYMORPHIC // polymorphic
};
std::ostream& operator<<(std::ostream& os, const MethodType& rhs);
@@ -474,6 +475,10 @@
// reference isn't for an array class.
bool CheckNewInstance(dex::TypeIndex idx);
+ // Perform static checks on a prototype indexing instruction. All we do here is ensure that the
+ // prototype index is in the valid range.
+ bool CheckPrototypeIndex(uint32_t idx);
+
/* Ensure that the string index is in the valid range. */
bool CheckStringIndex(uint32_t idx);
@@ -512,6 +517,12 @@
// - vA holds word count, vC holds index of first reg.
bool CheckVarArgRangeRegs(uint32_t vA, uint32_t vC);
+ // Checks the method matches the expectations required to be signature polymorphic.
+ bool CheckSignaturePolymorphicMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Checks the invoked receiver matches the expectations for signature polymorphic methods.
+ bool CheckSignaturePolymorphicReceiver(const Instruction* inst) REQUIRES_SHARED(Locks::mutator_lock_);
+
// Extract the relative offset from a branch instruction.
// Returns "false" on failure (e.g. this isn't a branch instruction).
bool GetBranchOffset(uint32_t cur_offset, int32_t* pOffset, bool* pConditional,
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index da3d946..a6088aa 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -44,8 +44,9 @@
}
const RegType& RegisterLine::GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
- bool is_range, bool allow_failure) {
- const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
+ bool allow_failure) {
+ DCHECK(inst->IsInvoke());
+ const size_t args_count = inst->VRegA();
if (args_count < 1) {
if (!allow_failure) {
verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
@@ -53,7 +54,7 @@
return verifier->GetRegTypeCache()->Conflict();
}
/* Get the element type of the array held in vsrc */
- const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ const uint32_t this_reg = inst->VRegC();
const RegType& this_type = GetRegisterType(verifier, this_reg);
if (!this_type.IsReferenceTypes()) {
if (!allow_failure) {
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 7603a79..221aa80 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -217,7 +217,6 @@
*/
const RegType& GetInvocationThis(MethodVerifier* verifier,
const Instruction* inst,
- bool is_range,
bool allow_failure = false)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/test/080-oom-fragmentation/expected.txt b/test/080-oom-fragmentation/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/080-oom-fragmentation/expected.txt
diff --git a/test/080-oom-fragmentation/info.txt b/test/080-oom-fragmentation/info.txt
new file mode 100644
index 0000000..5bcc425
--- /dev/null
+++ b/test/080-oom-fragmentation/info.txt
@@ -0,0 +1,2 @@
+Test that the allocator can go from a full heap to an empty one and is able to allocate a large
+object array.
diff --git a/test/080-oom-fragmentation/src/Main.java b/test/080-oom-fragmentation/src/Main.java
new file mode 100644
index 0000000..cf21139
--- /dev/null
+++ b/test/080-oom-fragmentation/src/Main.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ // Reserve around 1/4 of the RAM for keeping objects live.
+ long maxMem = Runtime.getRuntime().maxMemory();
+ Object[] holder = new Object[(int)maxMem / 16];
+ int count = 0;
+ try {
+ while (true) {
+ holder[count++] = new Object[1025]; // A bit over one page.
+ }
+ } catch (OutOfMemoryError e) {}
+ for (int i = 0; i < count; ++i) {
+ holder[i] = null;
+ }
+ // Make sure the heap can handle allocating large object array. This makes sure that free
+ // pages are correctly coalesced together by the allocator.
+ holder[0] = new Object[(int)maxMem / 8];
+ }
+}
diff --git a/test/522-checker-regression-monitor-exit/src/Main.java b/test/522-checker-regression-monitor-exit/src/Main.java
index a5e9512..c4f80fc 100644
--- a/test/522-checker-regression-monitor-exit/src/Main.java
+++ b/test/522-checker-regression-monitor-exit/src/Main.java
@@ -66,7 +66,7 @@
}
try {
- List<Future<Integer>> results = pool.invokeAll(queries, 5, TimeUnit.SECONDS);
+ List<Future<Integer>> results = pool.invokeAll(queries);
int hash = obj.hashCode();
for (int i = 0; i < numThreads; ++i) {
diff --git a/test/616-cha/expected.txt b/test/616-cha/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/616-cha/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/616-cha/info.txt b/test/616-cha/info.txt
new file mode 100644
index 0000000..50e3b0d
--- /dev/null
+++ b/test/616-cha/info.txt
@@ -0,0 +1 @@
+Test for Class Hierarchy Analysis (CHA).
diff --git a/test/616-cha/src/Main.java b/test/616-cha/src/Main.java
new file mode 100644
index 0000000..787318d
--- /dev/null
+++ b/test/616-cha/src/Main.java
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main1 {
+ String getName() {
+ return "Main1";
+ }
+
+ void printError(String msg) {
+ System.out.println(msg);
+ }
+
+ void foo(int i) {
+ if (i != 1) {
+ printError("error1");
+ }
+ }
+
+ int getValue1() {
+ return 1;
+ }
+ int getValue2() {
+ return 2;
+ }
+ int getValue3() {
+ return 3;
+ }
+ int getValue4() {
+ return 4;
+ }
+ int getValue5() {
+ return 5;
+ }
+ int getValue6() {
+ return 6;
+ }
+}
+
+class Main2 extends Main1 {
+ String getName() {
+ return "Main2";
+ }
+
+ void foo(int i) {
+ if (i != 2) {
+ printError("error2");
+ }
+ }
+}
+
+class Main3 extends Main1 {
+ String getName() {
+ return "Main3";
+ }
+}
+
+public class Main {
+ static Main1 sMain1;
+ static Main1 sMain2;
+
+ static boolean sIsOptimizing = true;
+ static boolean sHasJIT = true;
+ static volatile boolean sOtherThreadStarted;
+
+ // sMain1.foo() will be always be Main1.foo() before Main2 is loaded/linked.
+ // So sMain1.foo() can be devirtualized to Main1.foo() and be inlined.
+ // After Dummy.createMain2() which links in Main2, live testOverride() on stack
+ // should be deoptimized.
+ static void testOverride(boolean createMain2, boolean wait, boolean setHasJIT) {
+ if (setHasJIT) {
+ if (isInterpreted()) {
+ sHasJIT = false;
+ }
+ return;
+ }
+
+ if (createMain2 && (sIsOptimizing || sHasJIT)) {
+ assertIsManaged();
+ }
+
+ sMain1.foo(sMain1.getClass() == Main1.class ? 1 : 2);
+
+ if (createMain2) {
+ // Wait for the other thread to start.
+ while (!sOtherThreadStarted);
+ // Create an Main2 instance and assign it to sMain2.
+ // sMain1 is kept the same.
+ sMain2 = Dummy.createMain2();
+ // Wake up the other thread.
+ synchronized(Main.class) {
+ Main.class.notify();
+ }
+ } else if (wait) {
+ // This is the other thread.
+ synchronized(Main.class) {
+ sOtherThreadStarted = true;
+ // Wait for Main2 to be linked and deoptimization is triggered.
+ try {
+ Main.class.wait();
+ } catch (Exception e) {
+ }
+ }
+ }
+
+ // There should be a deoptimization here right after Main2 is linked by
+ // calling Dummy.createMain2(), even though sMain1 didn't change.
+ // The behavior here would be different if inline-cache is used, which
+ // doesn't deoptimize since sMain1 still hits the type cache.
+ sMain1.foo(sMain1.getClass() == Main1.class ? 1 : 2);
+ if ((createMain2 || wait) && sHasJIT && !sIsOptimizing) {
+ // This method should be deoptimized right after Main2 is created.
+ assertIsInterpreted();
+ }
+
+ if (sMain2 != null) {
+ sMain2.foo(sMain2.getClass() == Main1.class ? 1 : 2);
+ }
+ }
+
+ static Main1[] sArray;
+
+ static long calcValue(Main1 m) {
+ return m.getValue1()
+ + m.getValue2() * 2
+ + m.getValue3() * 3
+ + m.getValue4() * 4
+ + m.getValue5() * 5
+ + m.getValue6() * 6;
+ }
+
+ static long testNoOverrideLoop(int count) {
+ long sum = 0;
+ for (int i=0; i<count; i++) {
+ sum += calcValue(sArray[0]);
+ sum += calcValue(sArray[1]);
+ sum += calcValue(sArray[2]);
+ }
+ return sum;
+ }
+
+ static void testNoOverride() {
+ sArray = new Main1[3];
+ sArray[0] = new Main1();
+ sArray[1] = Dummy.createMain2();
+ sArray[2] = Dummy.createMain3();
+ long sum = 0;
+ // Loop enough to get methods JITed.
+ for (int i=0; i<100; i++) {
+ testNoOverrideLoop(1);
+ }
+ ensureJitCompiled(Main.class, "testNoOverrideLoop");
+ ensureJitCompiled(Main.class, "calcValue");
+
+ long t1 = System.currentTimeMillis();
+ sum = testNoOverrideLoop(100000);
+ long t2 = System.currentTimeMillis();
+ if (sum != 27300000L) {
+ System.out.println("Unexpected result.");
+ }
+ }
+
+ private static void assertSingleImplementation(Class<?> clazz, String method_name, boolean b) {
+ if (hasSingleImplementation(clazz, method_name) != b) {
+ System.out.println(clazz + "." + method_name +
+ " doesn't have single implementation value of " + b);
+ }
+ }
+
+ // Test scanerios under which CHA-based devirtualization happens,
+ // and class loading that overrides a method can invalidate compiled code.
+ // Also test pure non-overriding case, which is more for checking generated
+ // code form.
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+
+ // CHeck some boot-image methods.
+ assertSingleImplementation(java.util.ArrayList.class, "size", true);
+ // java.util.LinkedHashMap overrides get().
+ assertSingleImplementation(java.util.HashMap.class, "get", false);
+
+ // We don't set single-implementation modifier bit for final classes or methods
+ // since we can devirtualize without CHA for those cases. However hasSingleImplementation()
+ // should return true for those cases.
+ assertSingleImplementation(java.lang.String.class, "charAt", true);
+ assertSingleImplementation(java.lang.Thread.class, "join", true);
+ // We don't set single-implementation modifier bit for native methods.
+ assertSingleImplementation(java.lang.Thread.class, "isInterrupted", false);
+
+ if (isInterpreted()) {
+ sIsOptimizing = false;
+ }
+
+ // sMain1 is an instance of Main1. Main2 hasn't bee loaded yet.
+ sMain1 = new Main1();
+
+ // Loop enough to get testOverride() JITed.
+ for (int i=0; i<100; i++) {
+ testOverride(false, false, false);
+ }
+
+ ensureJitCompiled(Main.class, "testOverride");
+ testOverride(false, false, true);
+
+ if (sHasJIT && !sIsOptimizing) {
+ assertSingleImplementation(Main1.class, "foo", true);
+ } else {
+ // Main2 is verified ahead-of-time so it's linked in already.
+ }
+ assertSingleImplementation(Main1.class, "getValue1", true);
+
+ // Create another thread that also calls sMain1.foo().
+ // Try to test suspend and deopt another thread.
+ new Thread() {
+ public void run() {
+ testOverride(false, true, false);
+ }
+ }.start();
+
+ // This will create Main2 instance in the middle of testOverride().
+ testOverride(true, false, false);
+ assertSingleImplementation(Main1.class, "foo", false);
+ assertSingleImplementation(Main1.class, "getValue1", true);
+
+ testNoOverride();
+ }
+
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+ private static native void assertIsInterpreted();
+ private static native void assertIsManaged();
+ private static native boolean isInterpreted();
+ private static native boolean hasSingleImplementation(Class<?> clazz, String method_name);
+}
+
+// Do it in another class to avoid class loading due to verifier.
+class Dummy {
+ static Main1 createMain2() {
+ return new Main2();
+ }
+ static Main1 createMain3() {
+ return new Main3();
+ }
+}
diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc
deleted file mode 100644
index b035896..0000000
--- a/test/626-const-class-linking/clear_dex_cache_types.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "jni.h"
-#include "object_lock.h"
-#include "scoped_thread_state_change-inl.h"
-
-namespace art {
-
-extern "C" JNIEXPORT void JNICALL Java_Main_nativeClearResolvedTypes(JNIEnv*, jclass, jclass cls) {
- ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = soa.Decode<mirror::Class>(cls)->GetDexCache();
- for (size_t i = 0, num_types = dex_cache->NumResolvedTypes(); i != num_types; ++i) {
- dex_cache->SetResolvedType(dex::TypeIndex(i), ObjPtr<mirror::Class>(nullptr));
- }
-}
-
-extern "C" JNIEXPORT void JNICALL Java_Main_nativeSkipVerification(JNIEnv*, jclass, jclass cls) {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::Class> klass = hs.NewHandle(soa.Decode<mirror::Class>(cls));
- mirror::Class::Status status = klass->GetStatus();
- if (status == mirror::Class::kStatusResolved) {
- ObjectLock<mirror::Class> lock(soa.Self(), klass);
- klass->SetStatus(klass, mirror::Class::kStatusVerified, soa.Self());
- } else {
- LOG(ERROR) << klass->PrettyClass() << " has unexpected status: " << status;
- }
-}
-
-extern "C" JNIEXPORT void JNICALL Java_Main_nativeDumpClasses(JNIEnv*, jclass, jobjectArray array) {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ObjectArray<mirror::Object>> classes =
- hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(array));
- CHECK(classes.Get() != nullptr);
- for (size_t i = 0, length = classes->GetLength(); i != length; ++i) {
- CHECK(classes->Get(i) != nullptr) << i;
- CHECK(classes->Get(i)->IsClass())
- << i << " " << classes->Get(i)->GetClass()->PrettyDescriptor();
- mirror::Class* as_class = classes->Get(i)->AsClass();
- mirror::ClassLoader* loader = as_class->GetClassLoader();
- LOG(ERROR) << "Class #" << i << ": " << as_class->PrettyDescriptor()
- << " @" << static_cast<const void*>(as_class)
- << " status:" << as_class->GetStatus()
- << " definingLoader:" << static_cast<const void*>(loader)
- << " definingLoaderClass:"
- << (loader != nullptr ? loader->GetClass()->PrettyDescriptor() : "N/A");
- }
-}
-
-} // namespace art
diff --git a/test/626-const-class-linking/expected.txt b/test/626-const-class-linking/expected.txt
deleted file mode 100644
index de1b815..0000000
--- a/test/626-const-class-linking/expected.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-JNI_OnLoad called
-first: Helper1 class loader: DelegatingLoader
-second: Test class loader: DefiningLoader
-first: Helper1 class loader: DelegatingLoader
-second: Test class loader: DefiningLoader
-testClearDexCache done
-first: Helper1 class loader: DelegatingLoader
-second: Test class loader: DefiningLoader
-first: Helper2 class loader: DelegatingLoader
-second: Test class loader: DefiningLoader
-testMultiDex done
-first: Helper1 class loader: RacyLoader
-second: Test class loader: DefiningLoader
-first: Helper1 class loader: RacyLoader
-second: Test class loader: DefiningLoader
-first: Helper1 class loader: RacyLoader
-second: Test class loader: DefiningLoader
-first: Helper1 class loader: RacyLoader
-second: Test class loader: DefiningLoader
-total: 4
- throwables: 0
- classes: 4 (1 unique)
-testRacyLoader done
-first: Helper1 class loader: RacyLoader
-second: Test class loader: DefiningLoader
-first: Helper1 class loader: RacyLoader
-second: Test class loader: DefiningLoader
-first: Helper3 class loader: RacyLoader
-second: Test3 class loader: DefiningLoader
-first: Helper3 class loader: RacyLoader
-second: Test3 class loader: DefiningLoader
-total: 4
- throwables: 0
- classes: 4 (2 unique)
-testRacyLoader2 done
-java.lang.NoClassDefFoundError: Initiating class loader of type MisbehavingLoader returned class Helper2 instead of Test.
-testMisbehavingLoader done
-first: Helper1 class loader: RacyMisbehavingLoader
-second: Test class loader: DefiningLoader
-first: Helper1 class loader: RacyMisbehavingLoader
-second: Test class loader: DefiningLoader
-first: Helper1 class loader: RacyMisbehavingLoader
-second: Test class loader: DefiningLoader
-first: Helper1 class loader: RacyMisbehavingLoader
-second: Test class loader: DefiningLoader
-total: 4
- throwables: 0
- classes: 4 (1 unique)
-testRacyMisbehavingLoader done
-first: Helper1 class loader: RacyMisbehavingLoader
-second: Test class loader: DefiningLoader
-first: Helper1 class loader: RacyMisbehavingLoader
-second: Test class loader: DefiningLoader
-first: Helper1 class loader: RacyMisbehavingLoader
-second: Test class loader: DefiningLoader
-first: Helper1 class loader: RacyMisbehavingLoader
-second: Test class loader: DefiningLoader
-total: 4
- throwables: 0
- classes: 4 (1 unique)
-testRacyMisbehavingLoader2 done
diff --git a/test/626-const-class-linking/info.txt b/test/626-const-class-linking/info.txt
deleted file mode 100644
index 9c19a46..0000000
--- a/test/626-const-class-linking/info.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-Test that once a const-class instruction is linked, it will keep referring
-to the same class even in the presence of custom class loaders even after
-clearing the dex cache type array.
diff --git a/test/626-const-class-linking/multidex.jpp b/test/626-const-class-linking/multidex.jpp
deleted file mode 100644
index c7a6648..0000000
--- a/test/626-const-class-linking/multidex.jpp
+++ /dev/null
@@ -1,27 +0,0 @@
-ClassPair:
- @@com.android.jack.annotations.ForceInMainDex
- class ClassPair
-DefiningLoader:
- @@com.android.jack.annotations.ForceInMainDex
- class DefiningLoader
-DelegatingLoader:
- @@com.android.jack.annotations.ForceInMainDex
- class DelegatingLoader
-Helper1:
- @@com.android.jack.annotations.ForceInMainDex
- class Helper1
-Main:
- @@com.android.jack.annotations.ForceInMainDex
- class Main
-MisbehavingLoader:
- @@com.android.jack.annotations.ForceInMainDex
- class MisbehavingLoader
-RacyLoader:
- @@com.android.jack.annotations.ForceInMainDex
- class RacyLoader
-RacyMisbehavingHelper:
- @@com.android.jack.annotations.ForceInMainDex
- class RacyMisbehavingHelper
-RacyMisbehavingLoader:
- @@com.android.jack.annotations.ForceInMainDex
- class RacyMisbehavingLoader
diff --git a/test/626-const-class-linking/src-multidex/Helper2.java b/test/626-const-class-linking/src-multidex/Helper2.java
deleted file mode 100644
index 5bb31ee..0000000
--- a/test/626-const-class-linking/src-multidex/Helper2.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Helper2 {
- public static ClassPair get() {
- Class<?> helper2_class = Helper2.class;
- Class<?> test_class = Test.class;
- return new ClassPair(helper2_class, test_class);
- }
-}
diff --git a/test/626-const-class-linking/src-multidex/Helper3.java b/test/626-const-class-linking/src-multidex/Helper3.java
deleted file mode 100644
index af996de..0000000
--- a/test/626-const-class-linking/src-multidex/Helper3.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Helper3 {
- public static ClassPair get() {
- Class<?> helper3_class = Helper3.class;
- Class<?> test3_class = Test3.class;
- return new ClassPair(helper3_class, test3_class);
- }
-}
diff --git a/test/626-const-class-linking/src-multidex/Test3.java b/test/626-const-class-linking/src-multidex/Test3.java
deleted file mode 100644
index c4b134d..0000000
--- a/test/626-const-class-linking/src-multidex/Test3.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Test3 {
-}
diff --git a/test/626-const-class-linking/src/ClassPair.java b/test/626-const-class-linking/src/ClassPair.java
deleted file mode 100644
index b07036c..0000000
--- a/test/626-const-class-linking/src/ClassPair.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class ClassPair {
- public Class<?> first;
- public Class<?> second;
-
- public ClassPair(Class<?> first, Class<?> second) {
- this.first = first;
- this.second = second;
- }
-
- public void print() {
- String first_loader_name = first.getClassLoader().getClass().getName();
- System.out.println("first: " + first.getName() + " class loader: " + first_loader_name);
- String second_loader_name = second.getClassLoader().getClass().getName();
- System.out.println("second: " + second.getName() + " class loader: " + second_loader_name);
- }
-}
diff --git a/test/626-const-class-linking/src/DefiningLoader.java b/test/626-const-class-linking/src/DefiningLoader.java
deleted file mode 100644
index b17ab77..0000000
--- a/test/626-const-class-linking/src/DefiningLoader.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Method;
-import java.lang.reflect.InvocationTargetException;
-
-/**
- * A class loader with atypical behavior: we try to load a private
- * class implementation before asking the system or boot loader. This
- * is used to create multiple classes with identical names in a single VM.
- *
- * If DexFile is available, we use that; if not, we assume we're not in
- * Dalvik and instantiate the class with defineClass().
- *
- * The location of the DEX files and class data is dependent upon the
- * test framework.
- */
-public class DefiningLoader extends ClassLoader {
- static {
- // For JVM, register as parallel capable.
- // Android treats all class loaders as parallel capable and makes this a no-op.
- registerAsParallelCapable();
- }
-
- /* this is where the .class files live */
- static final String CLASS_PATH1 = "classes/";
- static final String CLASS_PATH2 = "classes2/";
-
- /* this is the DEX/Jar file */
- static final String DEX_FILE = System.getenv("DEX_LOCATION") + "/626-const-class-linking.jar";
-
- /* on Dalvik, this is a DexFile; otherwise, it's null */
- private Class<?> mDexClass;
-
- private Object mDexFile;
-
- /**
- * Construct DefiningLoader, grabbing a reference to the DexFile class
- * if we're running under Dalvik.
- */
- public DefiningLoader(ClassLoader parent) {
- super(parent);
-
- try {
- mDexClass = parent.loadClass("dalvik.system.DexFile");
- } catch (ClassNotFoundException cnfe) {
- // ignore -- not running Dalvik
- }
- }
-
- /**
- * Finds the class with the specified binary name.
- *
- * We search for a file in CLASS_PATH or pull an entry from DEX_FILE.
- * If we don't find a match, we throw an exception.
- */
- protected Class<?> findClass(String name) throws ClassNotFoundException
- {
- if (mDexClass != null) {
- return findClassDalvik(name);
- } else {
- return findClassNonDalvik(name);
- }
- }
-
- /**
- * Finds the class with the specified binary name, from a DEX file.
- */
- private Class<?> findClassDalvik(String name)
- throws ClassNotFoundException {
-
- if (mDexFile == null) {
- synchronized (DefiningLoader.class) {
- Constructor<?> ctor;
- /*
- * Construct a DexFile object through reflection.
- */
- try {
- ctor = mDexClass.getConstructor(String.class);
- } catch (NoSuchMethodException nsme) {
- throw new ClassNotFoundException("getConstructor failed",
- nsme);
- }
-
- try {
- mDexFile = ctor.newInstance(DEX_FILE);
- } catch (InstantiationException ie) {
- throw new ClassNotFoundException("newInstance failed", ie);
- } catch (IllegalAccessException iae) {
- throw new ClassNotFoundException("newInstance failed", iae);
- } catch (InvocationTargetException ite) {
- throw new ClassNotFoundException("newInstance failed", ite);
- }
- }
- }
-
- /*
- * Call DexFile.loadClass(String, ClassLoader).
- */
- Method meth;
-
- try {
- meth = mDexClass.getMethod("loadClass", String.class, ClassLoader.class);
- } catch (NoSuchMethodException nsme) {
- throw new ClassNotFoundException("getMethod failed", nsme);
- }
-
- try {
- meth.invoke(mDexFile, name, this);
- } catch (IllegalAccessException iae) {
- throw new ClassNotFoundException("loadClass failed", iae);
- } catch (InvocationTargetException ite) {
- throw new ClassNotFoundException("loadClass failed",
- ite.getCause());
- }
-
- return null;
- }
-
- /**
- * Finds the class with the specified binary name, from .class files.
- */
- private Class<?> findClassNonDalvik(String name)
- throws ClassNotFoundException {
-
- String[] pathNames = { CLASS_PATH1 + name + ".class", CLASS_PATH2 + name + ".class" };
-
- String pathName = null;
- RandomAccessFile raf = null;
-
- for (String pn : pathNames) {
- pathName = pn;
- try {
- //System.out.println("--- Defining: looking for " + pathName);
- raf = new RandomAccessFile(new File(pathName), "r");
- break;
- } catch (FileNotFoundException fnfe) {
- }
- }
- if (raf == null) {
- throw new ClassNotFoundException("Not found: " + pathNames[0] + ":" + pathNames[1]);
- }
-
- /* read the entire file in */
- byte[] fileData;
- try {
- fileData = new byte[(int) raf.length()];
- raf.readFully(fileData);
- } catch (IOException ioe) {
- throw new ClassNotFoundException("Read error: " + pathName);
- } finally {
- try {
- raf.close();
- } catch (IOException ioe) {
- // drop
- }
- }
-
- /* create the class */
- //System.out.println("--- Defining: defining " + name);
- try {
- return defineClass(name, fileData, 0, fileData.length);
- } catch (Throwable th) {
- throw new ClassNotFoundException("defineClass failed", th);
- }
- }
-
- /**
- * Load a class.
- *
- * Normally a class loader wouldn't override this, but we want our
- * version of the class to take precedence over an already-loaded
- * version.
- *
- * We still want the system classes (e.g. java.lang.Object) from the
- * bootstrap class loader.
- */
- synchronized protected Class<?> loadClass(String name, boolean resolve)
- throws ClassNotFoundException
- {
- Class<?> res;
-
- /*
- * 1. Invoke findLoadedClass(String) to check if the class has
- * already been loaded.
- *
- * This doesn't change.
- */
- res = findLoadedClass(name);
- if (res != null) {
- // System.out.println("FancyLoader.loadClass: " + name + " already loaded");
- if (resolve)
- resolveClass(res);
- return res;
- }
-
- /*
- * 3. Invoke the findClass(String) method to find the class.
- */
- try {
- res = findClass(name);
- if (resolve)
- resolveClass(res);
- }
- catch (ClassNotFoundException e) {
- // we couldn't find it, so eat the exception and keep going
- }
-
- /*
- * 2. Invoke the loadClass method on the parent class loader. If
- * the parent loader is null the class loader built-in to the
- * virtual machine is used, instead.
- *
- * (Since we're not in java.lang, we can't actually invoke the
- * parent's loadClass() method, but we passed our parent to the
- * super-class which can take care of it for us.)
- */
- res = super.loadClass(name, resolve); // returns class or throws
- return res;
- }
-}
diff --git a/test/626-const-class-linking/src/DelegatingLoader.java b/test/626-const-class-linking/src/DelegatingLoader.java
deleted file mode 100644
index 49955d4..0000000
--- a/test/626-const-class-linking/src/DelegatingLoader.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class DelegatingLoader extends DefiningLoader {
- private DefiningLoader defining_loader;
-
- public DelegatingLoader(ClassLoader parent, DefiningLoader defining_loader) {
- super(parent);
- this.defining_loader = defining_loader;
- }
-
- public void resetDefiningLoader(DefiningLoader defining_loader) {
- this.defining_loader = defining_loader;
- }
-
- protected Class<?> findClass(String name) throws ClassNotFoundException
- {
- if (name.equals("Test")) {
- throw new Error("Unexpected DelegatingLoader.findClass(\"Test\")");
- }
- return super.findClass(name);
- }
-
- protected Class<?> loadClass(String name, boolean resolve)
- throws ClassNotFoundException
- {
- if (name.equals("Test")) {
- return defining_loader.loadClass(name, resolve);
- }
- return super.loadClass(name, resolve);
- }
-}
diff --git a/test/626-const-class-linking/src/Helper1.java b/test/626-const-class-linking/src/Helper1.java
deleted file mode 100644
index ff9cd1a..0000000
--- a/test/626-const-class-linking/src/Helper1.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Helper1 {
- public static ClassPair get() {
- Class<?> helper1_class = Helper1.class;
- Class<?> test_class = Test.class;
- return new ClassPair(helper1_class, test_class);
- }
-}
diff --git a/test/626-const-class-linking/src/Main.java b/test/626-const-class-linking/src/Main.java
deleted file mode 100644
index 0029428..0000000
--- a/test/626-const-class-linking/src/Main.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.lang.ref.WeakReference;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-
-public class Main {
- public static void main(String[] args) throws Exception {
- try {
- System.loadLibrary(args[0]);
- } catch (UnsatisfiedLinkError ule) {
- usingRI = true;
- // Add expected JNI_OnLoad log line to match expected.txt.
- System.out.println("JNI_OnLoad called");
- }
-
- testClearDexCache();
- testMultiDex();
- testRacyLoader();
- testRacyLoader2();
- testMisbehavingLoader();
- testRacyMisbehavingLoader();
- testRacyMisbehavingLoader2();
- }
-
- private static void testClearDexCache() throws Exception {
- DelegatingLoader delegating_loader = createDelegatingLoader();
- Class<?> helper = delegating_loader.loadClass("Helper1");
-
- WeakReference<Class<?>> weak_test1 = wrapHelperGet(helper);
- changeInner(delegating_loader);
- clearResolvedTypes(helper);
- Runtime.getRuntime().gc();
- WeakReference<Class<?>> weak_test2 = wrapHelperGet(helper);
- Runtime.getRuntime().gc();
-
- Class<?> test1 = weak_test1.get();
- if (test1 == null) {
- System.out.println("test1 disappeared");
- }
- Class<?> test2 = weak_test2.get();
- if (test2 == null) {
- System.out.println("test2 disappeared");
- }
- if (test1 != test2) {
- System.out.println("test1 != test2");
- }
-
- System.out.println("testClearDexCache done");
- }
-
- private static void testMultiDex() throws Exception {
- DelegatingLoader delegating_loader = createDelegatingLoader();
-
- Class<?> helper1 = delegating_loader.loadClass("Helper1");
- WeakReference<Class<?>> weak_test1 = wrapHelperGet(helper1);
-
- changeInner(delegating_loader);
-
- Class<?> helper2 = delegating_loader.loadClass("Helper2");
- WeakReference<Class<?>> weak_test2 = wrapHelperGet(helper2);
-
- Runtime.getRuntime().gc();
-
- Class<?> test1 = weak_test1.get();
- if (test1 == null) {
- System.out.println("test1 disappeared");
- }
- Class<?> test2 = weak_test2.get();
- if (test2 == null) {
- System.out.println("test2 disappeared");
- }
- if (test1 != test2) {
- System.out.println("test1 != test2");
- }
-
- System.out.println("testMultiDex done");
- }
-
- private static void testMisbehavingLoader() throws Exception {
- ClassLoader system_loader = ClassLoader.getSystemClassLoader();
- DefiningLoader defining_loader = new DefiningLoader(system_loader);
- MisbehavingLoader misbehaving_loader =
- new MisbehavingLoader(system_loader, defining_loader);
- Class<?> helper = misbehaving_loader.loadClass("Helper1");
-
- try {
- WeakReference<Class<?>> weak_test = wrapHelperGet(helper);
- } catch (InvocationTargetException ite) {
- String message = ite.getCause().getMessage();
- if (usingRI && "Test".equals(message)) {
- // Replace RI message with dalvik message to match expected.txt.
- message = "Initiating class loader of type " +
- misbehaving_loader.getClass().getName() +
- " returned class Helper2 instead of Test.";
- }
- System.out.println(ite.getCause().getClass().getName() + ": " + message);
- }
- System.out.println("testMisbehavingLoader done");
- }
-
- private static void testRacyLoader() throws Exception {
- final ClassLoader system_loader = ClassLoader.getSystemClassLoader();
-
- final Thread[] threads = new Thread[4];
- final Object[] results = new Object[threads.length];
-
- final RacyLoader racy_loader = new RacyLoader(system_loader, threads.length);
- final Class<?> helper1 = racy_loader.loadClass("Helper1");
- skipVerification(helper1); // Avoid class loading during verification.
-
- for (int i = 0; i != threads.length; ++i) {
- final int my_index = i;
- Thread t = new Thread() {
- public void run() {
- try {
- Method get = helper1.getDeclaredMethod("get");
- results[my_index] = get.invoke(null);
- } catch (InvocationTargetException ite) {
- results[my_index] = ite.getCause();
- } catch (Throwable t) {
- results[my_index] = t;
- }
- }
- };
- t.start();
- threads[i] = t;
- }
- for (Thread t : threads) {
- t.join();
- }
- dumpResultStats(results, 1);
- System.out.println("testRacyLoader done");
- }
-
- private static void testRacyLoader2() throws Exception {
- final ClassLoader system_loader = ClassLoader.getSystemClassLoader();
-
- final Thread[] threads = new Thread[4];
- final Object[] results = new Object[threads.length];
-
- final RacyLoader racy_loader = new RacyLoader(system_loader, threads.length);
- final Class<?> helper1 = racy_loader.loadClass("Helper1");
- skipVerification(helper1); // Avoid class loading during verification.
- final Class<?> helper3 = racy_loader.loadClass("Helper3");
- skipVerification(helper3); // Avoid class loading during verification.
-
- for (int i = 0; i != threads.length; ++i) {
- final int my_index = i;
- Thread t = new Thread() {
- public void run() {
- try {
- Class<?> helper = (my_index < threads.length / 2) ? helper1 : helper3;
- Method get = helper.getDeclaredMethod("get");
- results[my_index] = get.invoke(null);
- } catch (InvocationTargetException ite) {
- results[my_index] = ite.getCause();
- } catch (Throwable t) {
- results[my_index] = t;
- }
- }
- };
- t.start();
- threads[i] = t;
- }
- for (Thread t : threads) {
- t.join();
- }
- dumpResultStats(results, 2);
- System.out.println("testRacyLoader2 done");
- }
-
- private static void testRacyMisbehavingLoader() throws Exception {
- final ClassLoader system_loader = ClassLoader.getSystemClassLoader();
-
- final Thread[] threads = new Thread[4];
- final Object[] results = new Object[threads.length];
-
- final RacyMisbehavingLoader racy_loader =
- new RacyMisbehavingLoader(system_loader, threads.length, false);
- final Class<?> helper1 = racy_loader.loadClass("RacyMisbehavingHelper");
- skipVerification(helper1); // Avoid class loading during verification.
-
- for (int i = 0; i != threads.length; ++i) {
- final int my_index = i;
- Thread t = new Thread() {
- public void run() {
- try {
- Method get = helper1.getDeclaredMethod("get");
- results[my_index] = get.invoke(null);
- } catch (InvocationTargetException ite) {
- results[my_index] = ite.getCause();
- } catch (Throwable t) {
- results[my_index] = t;
- }
- }
- };
- t.start();
- threads[i] = t;
- }
- for (Thread t : threads) {
- t.join();
- }
- dumpResultStats(results, 1);
- System.out.println("testRacyMisbehavingLoader done");
- }
-
- private static void testRacyMisbehavingLoader2() throws Exception {
- final ClassLoader system_loader = ClassLoader.getSystemClassLoader();
-
- final Thread[] threads = new Thread[4];
- final Object[] results = new Object[threads.length];
-
- final RacyMisbehavingLoader racy_loader =
- new RacyMisbehavingLoader(system_loader, threads.length, true);
- final Class<?> helper1 = racy_loader.loadClass("RacyMisbehavingHelper");
- skipVerification(helper1); // Avoid class loading during verification.
-
- for (int i = 0; i != threads.length; ++i) {
- final int my_index = i;
- Thread t = new Thread() {
- public void run() {
- try {
- Method get = helper1.getDeclaredMethod("get");
- results[my_index] = get.invoke(null);
- } catch (InvocationTargetException ite) {
- results[my_index] = ite.getCause();
- } catch (Throwable t) {
- results[my_index] = t;
- }
- }
- };
- t.start();
- threads[i] = t;
- }
- for (Thread t : threads) {
- t.join();
- }
- dumpResultStats(results, 1);
- System.out.println("testRacyMisbehavingLoader2 done");
- }
-
- private static void dumpResultStats(Object[] results, int expected_unique) throws Exception {
- int throwables = 0;
- int classes = 0;
- int unique_classes = 0;
- for (int i = 0; i != results.length; ++i) {
- Object r = results[i];
- if (r instanceof Throwable) {
- ++throwables;
- System.out.println(((Throwable) r).getMessage());
- } else if (isClassPair(r)) {
- printPair(r);
- Object ref = getSecond(r);
- ++classes;
- ++unique_classes;
- for (int j = 0; j != i; ++j) {
- Object rj = results[j];
- if (isClassPair(results[j]) && getSecond(results[j]) == ref) {
- --unique_classes;
- break;
- }
- }
- }
- }
- System.out.println("total: " + results.length);
- System.out.println(" throwables: " + throwables);
- System.out.println(" classes: " + classes
- + " (" + unique_classes + " unique)");
- if (expected_unique != unique_classes) {
- System.out.println("MISMATCH with expected_unique: " + expected_unique);
- ArrayList<Class<?>> list = new ArrayList<Class<?>>();
- for (int i = 0; i != results.length; ++i) {
- Object r = results[i];
- if (isClassPair(r)) {
- list.add(getSecond(r));
- }
- }
- nativeDumpClasses(list.toArray());
- }
- }
-
- private static DelegatingLoader createDelegatingLoader() {
- ClassLoader system_loader = ClassLoader.getSystemClassLoader();
- DefiningLoader defining_loader = new DefiningLoader(system_loader);
- return new DelegatingLoader(system_loader, defining_loader);
- }
-
- private static void changeInner(DelegatingLoader delegating_loader) {
- ClassLoader system_loader = ClassLoader.getSystemClassLoader();
- DefiningLoader defining_loader = new DefiningLoader(system_loader);
- delegating_loader.resetDefiningLoader(defining_loader);
- }
-
- private static WeakReference<Class<?>> wrapHelperGet(Class<?> helper) throws Exception {
- Method get = helper.getDeclaredMethod("get");
- Object pair = get.invoke(null);
- printPair(pair);
- return new WeakReference<Class<?>>(getSecond(pair));
- }
-
- private static void printPair(Object pair) throws Exception {
- Method print = pair.getClass().getDeclaredMethod("print");
- print.invoke(pair);
- }
-
- private static Class<?> getSecond(Object pair) throws Exception {
- Field second = pair.getClass().getDeclaredField("second");
- return (Class<?>) second.get(pair);
- }
-
- private static boolean isClassPair(Object r) {
- return r != null && r.getClass().getName().equals("ClassPair");
- }
-
- public static void clearResolvedTypes(Class<?> c) {
- if (!usingRI) {
- nativeClearResolvedTypes(c);
- }
- }
-
- // Skip verification of a class on ART. Verification can cause classes to be loaded
- // while holding a lock on the class being verified and holding that lock can interfere
- // with the intent of the "racy" tests. In these tests we're waiting in the loadClass()
- // for all the tested threads to synchronize and they cannot reach that point if they
- // are waiting for the class lock on ClassLinker::InitializeClass(Helper1/Helper3).
- public static void skipVerification(Class<?> c) {
- if (!usingRI) {
- nativeSkipVerification(c);
- }
- }
-
- public static native void nativeClearResolvedTypes(Class<?> c);
- public static native void nativeSkipVerification(Class<?> c);
- public static native void nativeDumpClasses(Object[] array);
-
- static boolean usingRI = false;
-}
diff --git a/test/626-const-class-linking/src/MisbehavingLoader.java b/test/626-const-class-linking/src/MisbehavingLoader.java
deleted file mode 100644
index ca9783e..0000000
--- a/test/626-const-class-linking/src/MisbehavingLoader.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Class loader that returns Helper2.class when asked to load "Test".
-public class MisbehavingLoader extends DefiningLoader {
- private DefiningLoader defining_loader;
-
- public MisbehavingLoader(ClassLoader parent, DefiningLoader defining_loader) {
- super(parent);
- this.defining_loader = defining_loader;
- }
-
- protected Class<?> findClass(String name) throws ClassNotFoundException
- {
- if (name.equals("Helper1") || name.equals("Helper2")) {
- return super.findClass(name);
- } else if (name.equals("Test")) {
- throw new Error("Unexpected MisbehavingLoader.findClass(\"Test\")");
- }
- return super.findClass(name);
- }
-
- protected Class<?> loadClass(String name, boolean resolve)
- throws ClassNotFoundException
- {
- if (name.equals("Helper1") || name.equals("Helper2")) {
- return super.loadClass(name, resolve);
- } else if (name.equals("Test")) {
- // Ask for a different class.
- return defining_loader.loadClass("Helper2", resolve);
- }
- return super.loadClass(name, resolve);
- }
-}
diff --git a/test/626-const-class-linking/src/RacyLoader.java b/test/626-const-class-linking/src/RacyLoader.java
deleted file mode 100644
index 9c164a3..0000000
--- a/test/626-const-class-linking/src/RacyLoader.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class RacyLoader extends DefiningLoader {
- static {
- // For JVM, register as parallel capable.
- // Android treats all class loaders as parallel capable and makes this a no-op.
- registerAsParallelCapable();
- }
-
- private Object lock = new Object();
- private int index = 0;
- private int count;
-
- private DefiningLoader[] defining_loaders;
-
- public RacyLoader(ClassLoader parent, int count) {
- super(parent);
- this.count = count;
- defining_loaders = new DefiningLoader[2];
- for (int i = 0; i != defining_loaders.length; ++i) {
- defining_loaders[i] = new DefiningLoader(parent);
- }
- }
-
- protected Class<?> findClass(String name) throws ClassNotFoundException
- {
- if (name.equals("Test") || name.equals("Test3")) {
- throw new Error("Unexpected RacyLoader.findClass(\"" + name + "\")");
- }
- return super.findClass(name);
- }
-
- protected Class<?> loadClass(String name, boolean resolve)
- throws ClassNotFoundException
- {
- if (name.equals("Test") || name.equals("Test3")) {
- int my_index = syncWithOtherInstances(count);
- Class<?> result = defining_loaders[my_index & 1].loadClass(name, resolve);
- syncWithOtherInstances(2 * count);
- return result;
- }
- return super.loadClass(name, resolve);
- }
-
- private int syncWithOtherInstances(int limit) {
- int my_index;
- synchronized (lock) {
- my_index = index;
- ++index;
- if (index != limit) {
- do {
- try {
- lock.wait();
- } catch (InterruptedException ie) {
- throw new Error(ie);
- }
- } while (index < limit);
- } else {
- lock.notifyAll();
- }
- }
- return my_index;
- }
-}
diff --git a/test/626-const-class-linking/src/RacyMisbehavingHelper.java b/test/626-const-class-linking/src/RacyMisbehavingHelper.java
deleted file mode 100644
index 4525278..0000000
--- a/test/626-const-class-linking/src/RacyMisbehavingHelper.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.lang.reflect.Method;
-
-public class RacyMisbehavingHelper {
- public static ClassPair get() {
- Class<?> helper1_class = Helper1.class;
- Class<?> test_class = Test.class;
- try {
- // After loading the correct class, allow loading the incorrect class.
- ClassLoader loader = helper1_class.getClassLoader();
- Method reportAfterLoading = loader.getClass().getDeclaredMethod("reportAfterLoading");
- reportAfterLoading.invoke(loader);
- } catch (Throwable t) {
- t.printStackTrace();
- }
- return new ClassPair(helper1_class, test_class);
- }
-}
diff --git a/test/626-const-class-linking/src/RacyMisbehavingLoader.java b/test/626-const-class-linking/src/RacyMisbehavingLoader.java
deleted file mode 100644
index f5bcb4c..0000000
--- a/test/626-const-class-linking/src/RacyMisbehavingLoader.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class RacyMisbehavingLoader extends DefiningLoader {
- static {
- // For JVM, register as parallel capable.
- // Android treats all class loaders as parallel capable and makes this a no-op.
- registerAsParallelCapable();
- }
-
- private Object lock = new Object();
- private int index = 0;
- private int count;
- private boolean throw_error;
-
- private DefiningLoader[] defining_loaders;
-
- public RacyMisbehavingLoader(ClassLoader parent, int count, boolean throw_error) {
- super(parent);
- this.count = count;
- this.throw_error = throw_error;
- defining_loaders = new DefiningLoader[2];
- for (int i = 0; i != defining_loaders.length; ++i) {
- defining_loaders[i] = new DefiningLoader(parent);
- }
- }
-
- public void reportAfterLoading() {
- synchronized (lock) {
- ++index;
- if (index == 2 * count) {
- lock.notifyAll();
- }
- }
- }
-
- protected Class<?> findClass(String name) throws ClassNotFoundException
- {
- if (name.equals("Test")) {
- throw new Error("Unexpected RacyLoader.findClass(\"" + name + "\")");
- }
- return super.findClass(name);
- }
-
- protected Class<?> loadClass(String name, boolean resolve)
- throws ClassNotFoundException
- {
- if (name.equals("Test")) {
- int my_index = syncWithOtherInstances(count);
- Class<?> result;
- if ((my_index & 1) == 0) {
- // Do not delay loading the correct class.
- result = defining_loaders[my_index & 1].loadClass(name, resolve);
- } else {
- // Delay loading the wrong class.
- syncWithOtherInstances(2 * count);
- if (throw_error) {
- throw new Error("RacyMisbehavingLoader throw_error=true");
- }
- result = defining_loaders[my_index & 1].loadClass("Test3", resolve);
- }
- return result;
- }
- return super.loadClass(name, resolve);
- }
-
- private int syncWithOtherInstances(int limit) {
- int my_index;
- synchronized (lock) {
- my_index = index;
- ++index;
- if (index != limit) {
- do {
- try {
- lock.wait();
- } catch (InterruptedException ie) {
- throw new Error(ie);
- }
- } while (index < limit);
- } else {
- lock.notifyAll();
- }
- }
- return my_index;
- }
-}
diff --git a/test/629-vdex-speed/expected.txt b/test/629-vdex-speed/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/629-vdex-speed/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/629-vdex-speed/info.txt b/test/629-vdex-speed/info.txt
new file mode 100644
index 0000000..6d84cb5
--- /dev/null
+++ b/test/629-vdex-speed/info.txt
@@ -0,0 +1,2 @@
+Regression test for vdex that used to not AOT compile
+methods when the VerifierDeps were verified.
diff --git a/test/629-vdex-speed/run b/test/629-vdex-speed/run
new file mode 100644
index 0000000..f1b0a95
--- /dev/null
+++ b/test/629-vdex-speed/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec ${RUN} --vdex "${@}"
diff --git a/test/626-const-class-linking/src-multidex/Test.java b/test/629-vdex-speed/src/Main.java
similarity index 67%
rename from test/626-const-class-linking/src-multidex/Test.java
rename to test/629-vdex-speed/src/Main.java
index 1b0cc2a..470565a 100644
--- a/test/626-const-class-linking/src-multidex/Test.java
+++ b/test/629-vdex-speed/src/Main.java
@@ -14,5 +14,14 @@
* limitations under the License.
*/
-public class Test {
+public class Main {
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ if (!isAotCompiled(Main.class, "main")) {
+ throw new Error("Expected Main.main to be AOT compiled");
+ }
+ }
+
+ private native static boolean isAotCompiled(Class<?> cls, String methodName);
}
+
diff --git a/test/902-hello-transformation/expected.txt b/test/902-hello-transformation/expected.txt
index a826f93..4774b81 100644
--- a/test/902-hello-transformation/expected.txt
+++ b/test/902-hello-transformation/expected.txt
@@ -1,3 +1,2 @@
hello
-modifying class 'Transform'
Goodbye
diff --git a/test/902-hello-transformation/run b/test/902-hello-transformation/run
index 3755d1d..94a8b2d 100755
--- a/test/902-hello-transformation/run
+++ b/test/902-hello-transformation/run
@@ -27,12 +27,11 @@
arg="jvm"
else
arg="art"
-fi
-
-if [[ "$@" != *"--debuggable"* ]]; then
- other_args=" -Xcompiler-option --debuggable "
-else
- other_args=""
+ if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+ else
+ other_args=""
+ fi
fi
./default-run "$@" --experimental agents \
diff --git a/test/902-hello-transformation/src/Main.java b/test/902-hello-transformation/src/Main.java
index 204b6e7..ec47119 100644
--- a/test/902-hello-transformation/src/Main.java
+++ b/test/902-hello-transformation/src/Main.java
@@ -14,7 +14,40 @@
* limitations under the License.
*/
+import java.util.Base64;
public class Main {
+
+ /**
+ * base64 encoded class/dex file for
+ * class Transform {
+ * public void sayHi() {
+ * System.out.println("Goodbye");
+ * }
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAHAoABgAOCQAPABAIABEKABIAEwcAFAcAFQEABjxpbml0PgEAAygpVgEABENvZGUB" +
+ "AA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwA" +
+ "BwAIBwAWDAAXABgBAAdHb29kYnllBwAZDAAaABsBAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2Jq" +
+ "ZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2ph" +
+ "dmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAABQAG" +
+ "AAAAAAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAEQABAAsACAAB" +
+ "AAkAAAAlAAIAAQAAAAmyAAISA7YABLEAAAABAAoAAAAKAAIAAAATAAgAFAABAAwAAAACAA0=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCLXSBQ5FiS3f16krSYZFF8xYZtFVp0GRXMAgAAcAAAAHhWNBIAAAAAAAAAACwCAAAO" +
+ "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACsAQAAIAEAAGIB" +
+ "AABqAQAAcwEAAIABAACXAQAAqwEAAL8BAADTAQAA4wEAAOYBAADqAQAA/gEAAAMCAAAMAgAAAgAA" +
+ "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA" +
+ "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAAB4CAAAA" +
+ "AAAAAQABAAEAAAATAgAABAAAAHAQAwAAAA4AAwABAAIAAAAYAgAACQAAAGIAAAAbAQEAAABuIAIA" +
+ "EAAOAAAAAQAAAAMABjxpbml0PgAHR29vZGJ5ZQALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50" +
+ "U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xh" +
+ "bmcvU3lzdGVtOwAOVHJhbnNmb3JtLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTMuMzYAA291" +
+ "dAAHcHJpbnRsbgAFc2F5SGkAEQAHDgATAAcOhQAAAAEBAICABKACAQG4Ag0AAAAAAAAAAQAAAAAA" +
+ "AAABAAAADgAAAHAAAAACAAAABgAAAKgAAAADAAAAAgAAAMAAAAAEAAAAAQAAANgAAAAFAAAABAAA" +
+ "AOAAAAAGAAAAAQAAAAABAAABIAAAAgAAACABAAABEAAAAQAAAFwBAAACIAAADgAAAGIBAAADIAAA" +
+ "AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=");
+
public static void main(String[] args) {
System.loadLibrary(args[1]);
doTest(new Transform());
@@ -22,10 +55,12 @@
public static void doTest(Transform t) {
t.sayHi();
- doClassTransformation(Transform.class);
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
t.sayHi();
}
// Transforms the class
- private static native void doClassTransformation(Class target);
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] class_file,
+ byte[] dex_file);
}
diff --git a/test/902-hello-transformation/transform.cc b/test/902-hello-transformation/transform.cc
deleted file mode 100644
index 3369dd4..0000000
--- a/test/902-hello-transformation/transform.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <iostream>
-#include <pthread.h>
-#include <stdio.h>
-#include <vector>
-
-#include "art_method-inl.h"
-#include "base/logging.h"
-#include "jni.h"
-#include "openjdkjvmti/jvmti.h"
-#include "ti-agent/common_helper.h"
-#include "ti-agent/common_load.h"
-#include "utils.h"
-
-namespace art {
-namespace Test902HelloTransformation {
-
-static bool RuntimeIsJvm = false;
-
-bool IsJVM() {
- return RuntimeIsJvm;
-}
-
-// base64 encoded class/dex file for
-//
-// class Transform {
-// public void sayHi() {
-// System.out.println("Goodbye");
-// }
-// }
-const char* class_file_base64 =
- "yv66vgAAADQAHAoABgAOCQAPABAIABEKABIAEwcAFAcAFQEABjxpbml0PgEAAygpVgEABENvZGUB"
- "AA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwA"
- "BwAIBwAWDAAXABgBAAdHb29kYnllBwAZDAAaABsBAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2Jq"
- "ZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2ph"
- "dmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAABQAG"
- "AAAAAAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAEQABAAsACAAB"
- "AAkAAAAlAAIAAQAAAAmyAAISA7YABLEAAAABAAoAAAAKAAIAAAATAAgAFAABAAwAAAACAA0=";
-
-const char* dex_file_base64 =
- "ZGV4CjAzNQCLXSBQ5FiS3f16krSYZFF8xYZtFVp0GRXMAgAAcAAAAHhWNBIAAAAAAAAAACwCAAAO"
- "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACsAQAAIAEAAGIB"
- "AABqAQAAcwEAAIABAACXAQAAqwEAAL8BAADTAQAA4wEAAOYBAADqAQAA/gEAAAMCAAAMAgAAAgAA"
- "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA"
- "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAAB4CAAAA"
- "AAAAAQABAAEAAAATAgAABAAAAHAQAwAAAA4AAwABAAIAAAAYAgAACQAAAGIAAAAbAQEAAABuIAIA"
- "EAAOAAAAAQAAAAMABjxpbml0PgAHR29vZGJ5ZQALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50"
- "U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xh"
- "bmcvU3lzdGVtOwAOVHJhbnNmb3JtLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTMuMzYAA291"
- "dAAHcHJpbnRsbgAFc2F5SGkAEQAHDgATAAcOhQAAAAEBAICABKACAQG4Ag0AAAAAAAAAAQAAAAAA"
- "AAABAAAADgAAAHAAAAACAAAABgAAAKgAAAADAAAAAgAAAMAAAAAEAAAAAQAAANgAAAAFAAAABAAA"
- "AOAAAAAGAAAAAQAAAAABAAABIAAAAgAAACABAAABEAAAAQAAAFwBAAACIAAADgAAAGIBAAADIAAA"
- "AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=";
-
-static void JNICALL transformationHook(jvmtiEnv *jvmtienv,
- JNIEnv* jni_env ATTRIBUTE_UNUSED,
- jclass class_being_redefined ATTRIBUTE_UNUSED,
- jobject loader ATTRIBUTE_UNUSED,
- const char* name,
- jobject protection_domain ATTRIBUTE_UNUSED,
- jint class_data_len ATTRIBUTE_UNUSED,
- const unsigned char* class_data ATTRIBUTE_UNUSED,
- jint* new_class_data_len,
- unsigned char** new_class_data) {
- if (strcmp("Transform", name)) {
- return;
- }
- printf("modifying class '%s'\n", name);
- bool is_jvm = IsJVM();
- size_t decode_len = 0;
- unsigned char* new_data;
- std::unique_ptr<uint8_t[]> file_data(
- DecodeBase64((is_jvm) ? class_file_base64 : dex_file_base64, &decode_len));
- jvmtiError ret = JVMTI_ERROR_NONE;
- if ((ret = jvmtienv->Allocate(static_cast<jlong>(decode_len), &new_data)) != JVMTI_ERROR_NONE) {
- printf("Unable to allocate buffer!\n");
- return;
- }
- memcpy(new_data, file_data.get(), decode_len);
- *new_class_data_len = static_cast<jint>(decode_len);
- *new_class_data = new_data;
- return;
-}
-
-using RetransformWithHookFunction = jvmtiError (*)(jvmtiEnv*, jclass, jvmtiEventClassFileLoadHook);
-static void DoClassTransformation(jvmtiEnv* jvmtienv, JNIEnv* jnienv, jclass target) {
- if (IsJVM()) {
- UNUSED(jnienv);
- jvmtienv->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_CLASS_FILE_LOAD_HOOK, nullptr);
- jvmtiError ret = jvmtienv->RetransformClasses(1, &target);
- if (ret != JVMTI_ERROR_NONE) {
- char* err;
- jvmtienv->GetErrorName(ret, &err);
- printf("Error transforming: %s\n", err);
- }
- } else {
- RetransformWithHookFunction f =
- reinterpret_cast<RetransformWithHookFunction>(jvmtienv->functions->reserved1);
- if (f(jvmtienv, target, transformationHook) != JVMTI_ERROR_NONE) {
- printf("Failed to tranform class!");
- return;
- }
- }
-}
-
-extern "C" JNIEXPORT void JNICALL Java_Main_doClassTransformation(JNIEnv* env,
- jclass,
- jclass target) {
- JavaVM* vm;
- if (env->GetJavaVM(&vm)) {
- printf("Unable to get javaVM!\n");
- return;
- }
- DoClassTransformation(jvmti_env, env, target);
-}
-
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options,
- void* reserved ATTRIBUTE_UNUSED) {
- RuntimeIsJvm = (strcmp("jvm", options) == 0);
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- if (IsJVM()) {
- jvmtiEventCallbacks cbs;
- memset(&cbs, 0, sizeof(cbs));
- cbs.ClassFileLoadHook = transformationHook;
- jvmti_env->SetEventCallbacks(&cbs, sizeof(jvmtiEventCallbacks));
- }
- return 0;
-}
-
-} // namespace Test902HelloTransformation
-} // namespace art
-
diff --git a/test/902-hello-transformation/transform.h b/test/902-hello-transformation/transform.h
deleted file mode 100644
index 661058d..0000000
--- a/test/902-hello-transformation/transform.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_902_HELLO_TRANSFORMATION_TRANSFORM_H_
-#define ART_TEST_902_HELLO_TRANSFORMATION_TRANSFORM_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test902HelloTransformation {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test902HelloTransformation
-} // namespace art
-
-#endif // ART_TEST_902_HELLO_TRANSFORMATION_TRANSFORM_H_
diff --git a/test/954-invoke-polymorphic-verifier/build b/test/954-invoke-polymorphic-verifier/build
new file mode 100755
index 0000000..a423ca6
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/build
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+if [[ $@ != *"--jvm"* ]]; then
+ # Don't do anything with jvm.
+ export USE_JACK=true
+fi
+
+./default-build "$@" --experimental method-handles
diff --git a/test/954-invoke-polymorphic-verifier/check b/test/954-invoke-polymorphic-verifier/check
new file mode 100755
index 0000000..dc5ddb7
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/check
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Strip out temporary file path information and indicies from output.
+sed -e "s/ [(]declaration of.*//" -e "s/\[0x[0-9A-F]*\] //g" "$2" > "$2.tmp"
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/954-invoke-polymorphic-verifier/expected.txt b/test/954-invoke-polymorphic-verifier/expected.txt
new file mode 100644
index 0000000..5df393a
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/expected.txt
@@ -0,0 +1,10 @@
+java.lang.VerifyError: Verifier rejected class MethodHandleNotInvoke: void MethodHandleNotInvoke.<init>() failed to verify: void MethodHandleNotInvoke.<init>(): void MethodHandleNotInvoke.<init>(): couldn't find method java.lang.invoke.MethodHandle.notInvoke ([Ljava/lang/Object;)Ljava/lang/Object;
+java.lang.VerifyError: Verifier rejected class MethodHandleToString: void MethodHandleToString.<init>() failed to verify: void MethodHandleToString.<init>(): void MethodHandleToString.<init>(): invoke type (METHOD_POLYMORPHIC) does not match method type of java.lang.String java.lang.invoke.MethodHandle.toString()
+java.lang.VerifyError: Verifier rejected class NonReference: void NonReference.<init>() failed to verify: void NonReference.<init>(): void NonReference.<init>(): tried to get class from non-reference register v0 (type=Precise Low-half Constant: 0)
+java.lang.VerifyError: Verifier rejected class TooFewArguments: void TooFewArguments.<init>() failed to verify: void TooFewArguments.<init>(): void TooFewArguments.<init>(): Rejecting invocation, expected 2 argument registers, method signature has 3 or more
+java.lang.VerifyError: Verifier rejected class TooManyArguments: void TooManyArguments.<init>() failed to verify: void TooManyArguments.<init>(): void TooManyArguments.<init>(): Rejecting invocation, expected 4 argument registers, method signature has 3
+java.lang.VerifyError: Verifier rejected class BadThis: void BadThis.<init>() failed to verify: void BadThis.<init>(): void BadThis.<init>(): 'this' argument 'Precise Reference: java.lang.String' not instance of 'Reference: java.lang.invoke.MethodHandle'
+java.lang.VerifyError: Verifier rejected class FakeSignaturePolymorphic: void FakeSignaturePolymorphic.<init>() failed to verify: void FakeSignaturePolymorphic.<init>(): void FakeSignaturePolymorphic.<init>(): invoke type (METHOD_POLYMORPHIC) does not match method type of java.lang.Object Main.invoke(java.lang.Object[])
+java.lang.VerifyError: Verifier rejected class BetterFakeSignaturePolymorphic: void BetterFakeSignaturePolymorphic.<init>() failed to verify: void BetterFakeSignaturePolymorphic.<init>(): Signature polymorphic method must be declared in java.lang.invoke.MethodClass
+Passed Subclass test
+java.lang.VerifyError: Verifier rejected class Unresolved: void Unresolved.<init>() failed to verify: void Unresolved.<init>(): invoke-polymorphic receiver has no class: Unresolved Reference: other.thing.Foo
diff --git a/test/954-invoke-polymorphic-verifier/info.txt b/test/954-invoke-polymorphic-verifier/info.txt
new file mode 100644
index 0000000..cb10d42
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/info.txt
@@ -0,0 +1,3 @@
+Test cases that should be rejected by the method verifier.
+
+NOTE: needs to run under ART.
diff --git a/test/954-invoke-polymorphic-verifier/run b/test/954-invoke-polymorphic-verifier/run
new file mode 100755
index 0000000..a9f1822
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/run
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+./default-run "$@" --experimental method-handles
diff --git a/test/954-invoke-polymorphic-verifier/smali/BadThis.smali b/test/954-invoke-polymorphic-verifier/smali/BadThis.smali
new file mode 100644
index 0000000..d9edf67
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/smali/BadThis.smali
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.source "BadThis.smali"
+
+.class public LBadThis;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 4
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ const-string v0, "0"
+ const-string v1, "1"
+ const-string v2, "2"
+ # v0 is a String, not a MethodHandle.
+ invoke-polymorphic {v0, v1, v2}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+ return-void
+.end method
\ No newline at end of file
diff --git a/test/954-invoke-polymorphic-verifier/smali/BetterFakeSignaturePolymorphic.smali b/test/954-invoke-polymorphic-verifier/smali/BetterFakeSignaturePolymorphic.smali
new file mode 100644
index 0000000..631e704
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/smali/BetterFakeSignaturePolymorphic.smali
@@ -0,0 +1,43 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.source "BetterFakeSignaturePolymorphic.smali"
+
+.class public LBetterFakeSignaturePolymorphic;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 4
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ invoke-static {}, LBetterFakeSignaturePolymorphic;->getMain()LMain;
+ move-result-object v0
+ const/4 v1, 0
+ move-object v1, v1
+ # Fail here because Main;->invokeExact is on wrong class.
+ invoke-polymorphic {v0, v1}, LMain;->invokeExact([Ljava/lang/Object;)Ljava/lang/Object;, ([Ljava/lang/Object;)Ljava/lang/Object;
+ return-void
+.end method
+
+.method public static getMethodHandle()Ljava/lang/invoke/MethodHandle;
+.registers 1
+ const/4 v0, 0
+ return-object v0
+.end method
+
+.method public static getMain()LMain;
+.registers 1
+ const/4 v0, 0
+ return-object v0
+.end method
\ No newline at end of file
diff --git a/test/954-invoke-polymorphic-verifier/smali/FakeSignaturePolymorphic.smali b/test/954-invoke-polymorphic-verifier/smali/FakeSignaturePolymorphic.smali
new file mode 100644
index 0000000..5bd054a
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/smali/FakeSignaturePolymorphic.smali
@@ -0,0 +1,43 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.source "FakeSignaturePolymorphic.smali"
+
+.class public LFakeSignaturePolymorphic;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 4
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ invoke-static {}, LFakeSignaturePolymorphic;->getMain()LMain;
+ move-result-object v0
+ const/4 v1, 0
+ move-object v1, v1
+ # Fail here because Main;->invoke does not have right flags (ie not native or varargs).
+ invoke-polymorphic {v0, v1}, LMain;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, ([Ljava/lang/Object;)Ljava/lang/Object;
+ return-void
+.end method
+
+.method public static getMethodHandle()Ljava/lang/invoke/MethodHandle;
+.registers 1
+ const/4 v0, 0
+ return-object v0
+.end method
+
+.method public static getMain()LMain;
+.registers 1
+ const/4 v0, 0
+ return-object v0
+.end method
\ No newline at end of file
diff --git a/test/954-invoke-polymorphic-verifier/smali/Main.smali b/test/954-invoke-polymorphic-verifier/smali/Main.smali
new file mode 100644
index 0000000..5b5e555
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/smali/Main.smali
@@ -0,0 +1,85 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This is the test suite runner. It is written in smali rather than
+# Java pending support in dx/dxmerge for invoke-polymorphic (b/33191712).
+
+.source "Main.smali"
+
+.class public LMain;
+.super Ljava/lang/Object;
+
+.method public constructor<init>()V
+.registers 1
+ invoke-direct {v0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public static main([Ljava/lang/String;)V
+.registers 1
+ # New tests should be added here.
+ const-string v0, "MethodHandleNotInvoke"
+ invoke-static {v0}, LMain;->test(Ljava/lang/String;)V
+ const-string v0, "MethodHandleToString"
+ invoke-static {v0}, LMain;->test(Ljava/lang/String;)V
+ const-string v0, "NonReference"
+ invoke-static {v0}, LMain;->test(Ljava/lang/String;)V
+ const-string v0, "TooFewArguments"
+ invoke-static {v0}, LMain;->test(Ljava/lang/String;)V
+ const-string v0, "TooManyArguments"
+ invoke-static {v0}, LMain;->test(Ljava/lang/String;)V
+ const-string v0, "BadThis"
+ invoke-static {v0}, LMain;->test(Ljava/lang/String;)V
+ const-string v0, "FakeSignaturePolymorphic"
+ invoke-static {v0}, LMain;->test(Ljava/lang/String;)V
+ const-string v0, "BetterFakeSignaturePolymorphic"
+ invoke-static {v0}, LMain;->test(Ljava/lang/String;)V
+ const-string v0, "Subclass"
+ invoke-static {v0}, LMain;->test(Ljava/lang/String;)V
+ const-string v0, "Unresolved"
+ invoke-static {v0}, LMain;->test(Ljava/lang/String;)V
+ return-void
+.end method
+
+.method public static test(Ljava/lang/String;)V
+.registers 6
+ :try_start_1
+ invoke-static {v5}, Ljava/lang/Class;->forName(Ljava/lang/String;)Ljava/lang/Class;
+ move-result-object v0
+ invoke-virtual {v0}, Ljava/lang/Class;->newInstance()Ljava/lang/Object;
+ :try_end_1
+ .catch Ljava/lang/VerifyError; {:try_start_1 .. :try_end_1} :catch_verifier
+ return-void
+ :catch_verifier
+ move-exception v3
+ invoke-virtual {v3}, Ljava/lang/Exception;->toString()Ljava/lang/String;
+ move-result-object v3
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v2, v3}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ return-void
+.end method
+
+# A test method called "invoke", but on a class other than MethodHandle.
+.method public invoke([Ljava/lang/Object;)Ljava/lang/Object;
+.registers 2
+ const/4 v0, 0
+ aget-object v0, p0, v0
+ return-object v0
+.end method
+
+# A test method called "invokeExact" that is native varargs, but is on a class
+# other than MethodHandle.
+.method public native varargs invokeExact([Ljava/lang/Object;)Ljava/lang/Object;
+.end method
\ No newline at end of file
diff --git a/test/954-invoke-polymorphic-verifier/smali/MethodHandleNotInvoke.smali b/test/954-invoke-polymorphic-verifier/smali/MethodHandleNotInvoke.smali
new file mode 100644
index 0000000..42546d1
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/smali/MethodHandleNotInvoke.smali
@@ -0,0 +1,37 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.source "MethodHandleNotInvoke.smali"
+
+.class public LMethodHandleNotInvoke;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 4
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ invoke-static {}, LMethodHandleNotInvoke;->getMethodHandle()Ljava/lang/invoke/MethodHandle;
+ move-result-object v0
+ const/4 v1, 0
+ move-object v1, v1
+ # Attempt invoke-polymorphic on MethodHandle.notInvoke().
+ invoke-polymorphic {v0, v1}, Ljava/lang/invoke/MethodHandle;->notInvoke([Ljava/lang/Object;)Ljava/lang/Object;, ([Ljava/lang/Object;)Ljava/lang/Object;
+ return-void
+.end method
+
+.method public static getMethodHandle()Ljava/lang/invoke/MethodHandle;
+.registers 1
+ const/4 v0, 0
+ return-object v0
+.end method
diff --git a/test/954-invoke-polymorphic-verifier/smali/MethodHandleToString.smali b/test/954-invoke-polymorphic-verifier/smali/MethodHandleToString.smali
new file mode 100644
index 0000000..c48429c
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/smali/MethodHandleToString.smali
@@ -0,0 +1,35 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.source "MethodHandleToString.smali"
+
+.class public LMethodHandleToString;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ invoke-static {}, LMethodHandleToString;->getMethodHandle()Ljava/lang/invoke/MethodHandle;
+ move-result-object v0
+ # Attempt invoke-polymorphic on MethodHandle.toString().
+ invoke-polymorphic {v0}, Ljava/lang/invoke/MethodHandle;->toString()Ljava/lang/String;, ()Ljava/lang/Object;
+ return-void
+.end method
+
+.method public static getMethodHandle()Ljava/lang/invoke/MethodHandle;
+.registers 1
+ const/4 v0, 0
+ return-object v0
+.end method
diff --git a/test/954-invoke-polymorphic-verifier/smali/NonReference.smali b/test/954-invoke-polymorphic-verifier/smali/NonReference.smali
new file mode 100644
index 0000000..4e1eff2
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/smali/NonReference.smali
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.source "NonReference.smali"
+
+.class public LNonReference;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 4
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ # Set v0 to have incorrect type (not a MethodHandle) and value (not null).
+ const-wide v0, 0
+ const-string v1, "1"
+ const-string v2, "2"
+ invoke-polymorphic {v0, v1, v2}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+ return-void
+.end method
diff --git a/test/954-invoke-polymorphic-verifier/smali/Subclass.smali b/test/954-invoke-polymorphic-verifier/smali/Subclass.smali
new file mode 100644
index 0000000..7ef61be
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/smali/Subclass.smali
@@ -0,0 +1,45 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.source "Subclass.smali"
+
+.class public LSubclass;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 3
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ goto :happy
+ # Get a MethodHandleImpl instance (subclass of MethodHandle).
+ invoke-static {}, LSubclass;->getMethodHandleSubclassInstance()Ljava/lang/invoke/MethodHandleImpl;
+ move-result-object v0
+ const-string v1, "1"
+ const-string v2, "2"
+ # Calling MethodHandle.invoke() on MethodHandleImpl instance (subclass of MethodHandle) => Okay
+ invoke-polymorphic {v0, v1, v2}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+ # Calling MethodHandleImpl.invoke() rather than MethodHandle.invoke() [ declaring class is okay ] => Okay
+ invoke-polymorphic {v0, v1, v2}, Ljava/lang/invoke/MethodHandleImpl;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+:happy
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v2, "Passed Subclass test"
+ invoke-virtual {v1, v2}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ return-void
+.end method
+
+.method public static getMethodHandleSubclassInstance()Ljava/lang/invoke/MethodHandleImpl;
+.registers 1
+ const/4 v0, 0
+ return-object v0
+.end method
diff --git a/test/954-invoke-polymorphic-verifier/smali/TooFewArguments.smali b/test/954-invoke-polymorphic-verifier/smali/TooFewArguments.smali
new file mode 100644
index 0000000..da29c6f
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/smali/TooFewArguments.smali
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.source "TooFewArguments.smali"
+
+.class public LTooFewArguments;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 4
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ # Set up v0 as a null MethodHandle
+ const/4 v0, 0
+ move-object v0, v0
+ invoke-virtual {v0}, Ljava/lang/invoke/MethodHandle;->asFixedArity()Ljava/lang/invoke/MethodHandle;
+ move-result-object v0
+ const-string v1, "1"
+ # Invoke with one argument too few for prototype.
+ invoke-polymorphic {v0, v1}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+ return-void
+.end method
diff --git a/test/954-invoke-polymorphic-verifier/smali/TooManyArguments.smali b/test/954-invoke-polymorphic-verifier/smali/TooManyArguments.smali
new file mode 100644
index 0000000..bc0135e
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/smali/TooManyArguments.smali
@@ -0,0 +1,35 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.source "TooManyArguments.smali"
+
+.class public LTooManyArguments;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 4
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ # Set up v0 as a null MethodHandle
+ const/4 v0, 0
+ move-object v0, v0
+ invoke-virtual {v0}, Ljava/lang/invoke/MethodHandle;->asFixedArity()Ljava/lang/invoke/MethodHandle;
+ move-result-object v0
+ const-string v1, "1"
+ const-string v2, "2"
+ const-string v3, "3"
+ # Invoke with one argument too many for prototype.
+ invoke-polymorphic {v0, v1, v2, v3}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+ return-void
+.end method
diff --git a/test/954-invoke-polymorphic-verifier/smali/Unresolved.smali b/test/954-invoke-polymorphic-verifier/smali/Unresolved.smali
new file mode 100644
index 0000000..882f0e9
--- /dev/null
+++ b/test/954-invoke-polymorphic-verifier/smali/Unresolved.smali
@@ -0,0 +1,40 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.source "Unresolved.smali"
+
+.class public LUnresolved;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 3
+.line 23
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ # Get an unresolvable instance (abstract class)
+ invoke-static {}, LAbstract;->getUnresolvedInstance()Lother/thing/Foo;
+ move-result-object v0
+ const-string v1, "1"
+ const-string v2, "2"
+ # Calling MethodHandle.invoke() on unresolved receiver.
+ invoke-polymorphic {v0, v1, v2}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+ return-void
+.end method
+
+.method public static getUnresolvedInstance()Lother/thing/Foo;
+.registers 1
+.line 37
+ const/4 v0, 0
+ return-object v0
+.end method
diff --git a/test/Android.arm_vixl.mk b/test/Android.arm_vixl.mk
index 5ae961a..c89eb4a 100644
--- a/test/Android.arm_vixl.mk
+++ b/test/Android.arm_vixl.mk
@@ -16,6 +16,5 @@
# Known broken tests for the ARM VIXL backend.
TEST_ART_BROKEN_OPTIMIZING_ARM_VIXL_RUN_TESTS := \
- 488-checker-inline-recursive-calls \
- 552-checker-sharpening \
562-checker-no-intermediate \
+ 624-checker-stringops \
diff --git a/test/Android.bp b/test/Android.bp
index 39a4059..44c64c1 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -244,8 +244,8 @@
defaults: ["libartagent-defaults"],
srcs: [
"ti-agent/common_load.cc",
+ "ti-agent/common_helper.cc",
"901-hello-ti-agent/basics.cc",
- "902-hello-transformation/transform.cc",
"903-hello-tagging/tagging.cc",
"904-object-allocation/tracking.cc",
"905-object-free/tracking_free.cc",
@@ -314,7 +314,6 @@
"595-profile-saving/profile-saving.cc",
"596-app-images/app_images.cc",
"597-deopt-new-string/deopt.cc",
- "626-const-class-linking/clear_dex_cache_types.cc",
],
shared_libs: [
"libbacktrace",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 96b984d..d7dfe5a 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -360,8 +360,10 @@
TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
# Temporarily disable some broken tests when forcing access checks in interpreter b/22414682
+# 629 requires compilation.
TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := \
- 137-cfi
+ 137-cfi \
+ 629-vdex-speed
ifneq (,$(filter interp-ac,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -470,12 +472,14 @@
# This test dynamically enables tracing to force a deoptimization. This makes the test meaningless
# when already tracing, and writes an error message that we do not want to check for.
# 130 occasional timeout b/32383962.
+# 629 requires compilation.
TEST_ART_BROKEN_TRACING_RUN_TESTS := \
087-gc-after-link \
130-hprof \
137-cfi \
141-class-unload \
570-checker-osr \
+ 629-vdex-speed \
802-deoptimization
ifneq (,$(filter trace stream,$(TRACE_TYPES)))
@@ -486,9 +490,11 @@
# Known broken tests for the interpreter.
# CFI unwinding expects managed frames.
+# 629 requires compilation.
TEST_ART_BROKEN_INTERPRETER_RUN_TESTS := \
137-cfi \
- 554-jit-profile-file
+ 554-jit-profile-file \
+ 629-vdex-speed
ifneq (,$(filter interpreter,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -504,8 +510,10 @@
# Test 906 iterates the heap filtering with different options. No instances should be created
# between those runs to be able to have precise checks.
# Test 902 hits races with the JIT compiler. b/32821077
+# Test 629 requires compilation.
TEST_ART_BROKEN_JIT_RUN_TESTS := \
137-cfi \
+ 629-vdex-speed \
902-hello-transformation \
904-object-allocation \
906-iterate-heap \
@@ -660,6 +668,15 @@
endif
endif
+# Tests disabled for GSS.
+TEST_ART_BROKEN_GSS_RUN_TESTS := 080-oom-fragmentation
+ifeq ($(ART_DEFAULT_GC_TYPE),GSS)
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
+ $(PREBUILD_TYPES),$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \
+ $(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
+ $(TEST_ART_BROKEN_GSS_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS :=
TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS :=
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index a2eb370..285f3aa 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -119,6 +119,24 @@
return JNI_TRUE;
}
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isAotCompiled(JNIEnv* env,
+ jclass,
+ jclass cls,
+ jstring method_name) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ ScopedUtfChars chars(env, method_name);
+ CHECK(chars.c_str() != nullptr);
+ ArtMethod* method = soa.Decode<mirror::Class>(cls)->FindDeclaredDirectMethodByName(
+ chars.c_str(), kRuntimePointerSize);
+ const void* code = method->GetOatMethodQuickCode(kRuntimePointerSize);
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr && jit->GetCodeCache()->ContainsPc(code)) {
+ return true;
+ }
+ return code != nullptr;
+}
+
extern "C" JNIEXPORT void JNICALL Java_Main_ensureJitCompiled(JNIEnv* env,
jclass,
jclass cls,
@@ -157,4 +175,17 @@
}
}
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasSingleImplementation(JNIEnv* env,
+ jclass,
+ jclass cls,
+ jstring method_name) {
+ ArtMethod* method = nullptr;
+ ScopedObjectAccess soa(Thread::Current());
+ ScopedUtfChars chars(env, method_name);
+ CHECK(chars.c_str() != nullptr);
+ method = soa.Decode<mirror::Class>(cls)->FindDeclaredVirtualMethodByName(
+ chars.c_str(), kRuntimePointerSize);
+ return method->HasSingleImplementation();
+}
+
} // namespace art
diff --git a/test/etc/default-build b/test/etc/default-build
index 408dcfd..f2b5078 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -71,7 +71,7 @@
declare -A JACK_EXPERIMENTAL_ARGS
JACK_EXPERIMENTAL_ARGS["default-methods"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
JACK_EXPERIMENTAL_ARGS["lambdas"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
-JACK_EXPERIMENTAL_ARGS["method-handles"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=26"
+JACK_EXPERIMENTAL_ARGS["method-handles"]="-D jack.java.source.version=1.7 -D jack.android.min-api-level=o-b1"
declare -A SMALI_EXPERIMENTAL_ARGS
SMALI_EXPERIMENTAL_ARGS["default-methods"]="--api-level 24"
@@ -273,10 +273,8 @@
fi
# Create a single jar with two dex files for multidex.
-if [ ${NEED_DEX} = "true" ]; then
- if [ ${HAS_SRC_MULTIDEX} = "true" ] || [ ${HAS_SMALI_MULTIDEX} = "true" ]; then
- zip $TEST_NAME.jar classes.dex classes2.dex
- else
- zip $TEST_NAME.jar classes.dex
- fi
+if [ ${HAS_SRC_MULTIDEX} = "true" ] || [ ${HAS_SMALI_MULTIDEX} = "true" ]; then
+ zip $TEST_NAME.jar classes.dex classes2.dex
+elif [ ${NEED_DEX} = "true" ]; then
+ zip $TEST_NAME.jar classes.dex
fi
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index f0abb44..bb3a3ad 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -316,8 +316,7 @@
if [ "$USE_JVM" = "y" ]; then
export LD_LIBRARY_PATH=${ANDROID_HOST_OUT}/lib64
# Xmx is necessary since we don't pass down the ART flags to JVM.
- # We pass the classes2 path whether it's used (src-multidex) or not.
- cmdline="${JAVA} ${DEBUGGER_OPTS} ${JVM_VERIFY_ARG} -Xmx256m -classpath classes:classes2 ${FLAGS} $MAIN $@ ${ARGS}"
+ cmdline="${JAVA} ${DEBUGGER_OPTS} ${JVM_VERIFY_ARG} -Xmx256m -classpath classes ${FLAGS} $MAIN $@ ${ARGS}"
if [ "$DEV_MODE" = "y" ]; then
echo $cmdline
fi
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
new file mode 100644
index 0000000..3e2b168
--- /dev/null
+++ b/test/ti-agent/common_helper.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ti-agent/common_helper.h"
+
+#include <stdio.h>
+
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ti-agent/common_load.h"
+#include "utils.h"
+
+namespace art {
+bool RuntimeIsJVM;
+
+bool IsJVM() {
+ return RuntimeIsJVM;
+}
+
+void SetAllCapabilities(jvmtiEnv* env) {
+ jvmtiCapabilities caps;
+ env->GetPotentialCapabilities(&caps);
+ env->AddCapabilities(&caps);
+}
+
+namespace common_redefine {
+
+using RedefineDirectFunction = jvmtiError (*)(jvmtiEnv*, jclass, jint, const unsigned char*);
+static void DoClassTransformation(jvmtiEnv* jvmti_env, JNIEnv* env,
+ jclass target,
+ jbyteArray class_file_bytes,
+ jbyteArray dex_file_bytes) {
+ jbyteArray desired_array = IsJVM() ? class_file_bytes : dex_file_bytes;
+ jint len = static_cast<jint>(env->GetArrayLength(desired_array));
+ const unsigned char* redef_bytes = reinterpret_cast<const unsigned char*>(
+ env->GetByteArrayElements(desired_array, nullptr));
+ jvmtiError res;
+ if (IsJVM()) {
+ jvmtiClassDefinition def;
+ def.klass = target;
+ def.class_byte_count = static_cast<jint>(len);
+ def.class_bytes = redef_bytes;
+ res = jvmti_env->RedefineClasses(1, &def);
+ } else {
+ RedefineDirectFunction f =
+ reinterpret_cast<RedefineDirectFunction>(jvmti_env->functions->reserved3);
+ res = f(jvmti_env, target, len, redef_bytes);
+ }
+ if (res != JVMTI_ERROR_NONE) {
+ printf("Redefinition failed!");
+ }
+}
+
+// Magic JNI export that classes can use for redefining classes.
+// To use classes should declare this as a native function with signature (Ljava/lang/Class;[B[B)V
+extern "C" JNIEXPORT void JNICALL Java_Main_doCommonClassRedefinition(JNIEnv* env,
+ jclass,
+ jclass target,
+ jbyteArray class_file_bytes,
+ jbyteArray dex_file_bytes) {
+ DoClassTransformation(jvmti_env, env, target, class_file_bytes, dex_file_bytes);
+}
+
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ SetAllCapabilities(jvmti_env);
+ return 0;
+}
+
+} // namespace common_redefine
+
+} // namespace art
diff --git a/test/ti-agent/common_helper.h b/test/ti-agent/common_helper.h
index 84997f3..76543fe 100644
--- a/test/ti-agent/common_helper.h
+++ b/test/ti-agent/common_helper.h
@@ -18,9 +18,19 @@
#define ART_TEST_TI_AGENT_COMMON_HELPER_H_
#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
#include "ScopedLocalRef.h"
namespace art {
+namespace common_redefine {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace common_redefine
+
+extern bool RuntimeIsJVM;
+
+bool IsJVM();
template <typename T>
static jobjectArray CreateObjectArray(JNIEnv* env,
@@ -53,11 +63,7 @@
return ret.release();
}
-static void SetAllCapabilities(jvmtiEnv* env) {
- jvmtiCapabilities caps;
- env->GetPotentialCapabilities(&caps);
- env->AddCapabilities(&caps);
-}
+void SetAllCapabilities(jvmtiEnv* env);
} // namespace art
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index a959482..2795cbc 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -23,9 +23,9 @@
#include "base/logging.h"
#include "base/macros.h"
#include "common_load.h"
+#include "common_helper.h"
#include "901-hello-ti-agent/basics.h"
-#include "902-hello-transformation/transform.h"
#include "903-hello-tagging/tagging.h"
#include "904-object-allocation/tracking.h"
#include "905-object-free/tracking_free.h"
@@ -54,7 +54,7 @@
// A list of all the agents we have for testing.
AgentLib agents[] = {
{ "901-hello-ti-agent", Test901HelloTi::OnLoad, nullptr },
- { "902-hello-transformation", Test902HelloTransformation::OnLoad, nullptr },
+ { "902-hello-transformation", common_redefine::OnLoad, nullptr },
{ "903-hello-tagging", Test903HelloTagging::OnLoad, nullptr },
{ "904-object-allocation", Test904ObjectAllocation::OnLoad, nullptr },
{ "905-object-free", Test905ObjectFree::OnLoad, nullptr },
@@ -95,6 +95,10 @@
return true;
}
+static void SetIsJVM(char* options) {
+ RuntimeIsJVM = strncmp(options, "jvm", 3) == 0;
+}
+
extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm, char* options, void* reserved) {
char* remaining_options = nullptr;
char* name_option = nullptr;
@@ -112,6 +116,7 @@
printf("agent: %s does not include an OnLoad method.\n", name_option);
return -3;
}
+ SetIsJVM(remaining_options);
return lib->load(vm, remaining_options, reserved);
}
@@ -132,6 +137,7 @@
printf("agent: %s does not include an OnAttach method.\n", name_option);
return -3;
}
+ SetIsJVM(remaining_options);
return lib->attach(vm, remaining_options, reserved);
}
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 53fe8fe..ae18f41 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -173,12 +173,6 @@
bug: 25178637
},
{
- description: "Non-deterministic test because of a dependency on weak ref collection.",
- result: EXEC_FAILED,
- names: ["org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet"],
- bug: 25437292
-},
-{
description: "Missing resource in classpath",
result: EXEC_FAILED,
modes: [device],