Merge "Revert "Remove unused code for patching oat files.""
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index e41d9bd..28c009e 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -770,8 +770,6 @@
existing = existing | ExperimentalFlags::kAgents;
} else if (option == "runtime-plugins") {
existing = existing | ExperimentalFlags::kRuntimePlugins;
- } else if (option == "method-handles") {
- existing = existing | ExperimentalFlags::kMethodHandles;
} else {
return Result::Failure(std::string("Unknown option '") + option + "'");
}
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index bbf9eee..e2a0942 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -176,6 +176,7 @@
kCallRelative, // NOTE: Actual patching is instruction_set-dependent.
kType,
kTypeRelative, // NOTE: Actual patching is instruction_set-dependent.
+ kTypeBssEntry, // NOTE: Actual patching is instruction_set-dependent.
kString,
kStringRelative, // NOTE: Actual patching is instruction_set-dependent.
kStringBssEntry, // NOTE: Actual patching is instruction_set-dependent.
@@ -228,6 +229,16 @@
return patch;
}
+ static LinkerPatch TypeBssEntryPatch(size_t literal_offset,
+ const DexFile* target_dex_file,
+ uint32_t pc_insn_offset,
+ uint32_t target_type_idx) {
+ LinkerPatch patch(literal_offset, Type::kTypeBssEntry, target_dex_file);
+ patch.type_idx_ = target_type_idx;
+ patch.pc_insn_offset_ = pc_insn_offset;
+ return patch;
+ }
+
static LinkerPatch StringPatch(size_t literal_offset,
const DexFile* target_dex_file,
uint32_t target_string_idx) {
@@ -282,6 +293,7 @@
switch (GetType()) {
case Type::kCallRelative:
case Type::kTypeRelative:
+ case Type::kTypeBssEntry:
case Type::kStringRelative:
case Type::kStringBssEntry:
case Type::kDexCacheArray:
@@ -299,12 +311,16 @@
}
const DexFile* TargetTypeDexFile() const {
- DCHECK(patch_type_ == Type::kType || patch_type_ == Type::kTypeRelative);
+ DCHECK(patch_type_ == Type::kType ||
+ patch_type_ == Type::kTypeRelative ||
+ patch_type_ == Type::kTypeBssEntry);
return target_dex_file_;
}
dex::TypeIndex TargetTypeIndex() const {
- DCHECK(patch_type_ == Type::kType || patch_type_ == Type::kTypeRelative);
+ DCHECK(patch_type_ == Type::kType ||
+ patch_type_ == Type::kTypeRelative ||
+ patch_type_ == Type::kTypeBssEntry);
return dex::TypeIndex(type_idx_);
}
@@ -334,6 +350,7 @@
uint32_t PcInsnOffset() const {
DCHECK(patch_type_ == Type::kTypeRelative ||
+ patch_type_ == Type::kTypeBssEntry ||
patch_type_ == Type::kStringRelative ||
patch_type_ == Type::kStringBssEntry ||
patch_type_ == Type::kDexCacheArray);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 2950266..5da59f3 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1950,66 +1950,82 @@
DCHECK(!it.HasNext());
}
-void CompilerDriver::Verify(jobject jclass_loader,
- const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings) {
+bool CompilerDriver::FastVerify(jobject jclass_loader,
+ const std::vector<const DexFile*>& dex_files,
+ TimingLogger* timings) {
verifier::VerifierDeps* verifier_deps =
Runtime::Current()->GetCompilerCallbacks()->GetVerifierDeps();
// If there is an existing `VerifierDeps`, try to use it for fast verification.
- if (verifier_deps != nullptr) {
- TimingLogger::ScopedTiming t("Fast Verify", timings);
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<2> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
- MutableHandle<mirror::Class> cls(hs.NewHandle<mirror::Class>(nullptr));
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- if (verifier_deps->ValidateDependencies(class_loader, soa.Self())) {
- // We successfully validated the dependencies, now update class status
- // of verified classes. Note that the dependencies also record which classes
- // could not be fully verified; we could try again, but that would hurt verification
- // time. So instead we assume these classes still need to be verified at
- // runtime.
- for (const DexFile* dex_file : dex_files) {
- // Fetch the list of unverified classes and turn it into a set for faster
- // lookups.
- const std::vector<dex::TypeIndex>& unverified_classes =
- verifier_deps->GetUnverifiedClasses(*dex_file);
- std::set<dex::TypeIndex> set(unverified_classes.begin(), unverified_classes.end());
- for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
- if (set.find(class_def.class_idx_) == set.end()) {
- if (!GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
- // Just update the compiled_classes_ map. The compiler doesn't need to resolve
- // the type.
- compiled_classes_.Overwrite(
- ClassReference(dex_file, i), new CompiledClass(mirror::Class::kStatusVerified));
- } else {
- // Resolve the type, so later compilation stages know they don't need to verify
- // the class.
- const char* descriptor = dex_file->GetClassDescriptor(class_def);
- cls.Assign(class_linker->FindClass(soa.Self(), descriptor, class_loader));
- if (cls.Get() != nullptr) {
- ObjectLock<mirror::Class> lock(soa.Self(), cls);
- mirror::Class::SetStatus(cls, mirror::Class::kStatusVerified, soa.Self());
- } else {
- DCHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
- }
- // Create `VerifiedMethod`s for each methods, the compiler expects one for
- // quickening or compiling.
- // Note that this means:
- // - We're only going to compile methods that did verify.
- // - Quickening will not do checkcast ellision.
- // TODO(ngeoffray): Reconsider this once we refactor compiler filters.
- PopulateVerifiedMethods(*dex_file, i, verification_results_);
+ if (verifier_deps == nullptr) {
+ return false;
+ }
+ TimingLogger::ScopedTiming t("Fast Verify", timings);
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
+ MutableHandle<mirror::Class> cls(hs.NewHandle<mirror::Class>(nullptr));
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (!verifier_deps->ValidateDependencies(class_loader, soa.Self())) {
+ return false;
+ }
+
+ // We successfully validated the dependencies, now update class status
+ // of verified classes. Note that the dependencies also record which classes
+ // could not be fully verified; we could try again, but that would hurt verification
+ // time. So instead we assume these classes still need to be verified at
+ // runtime.
+ for (const DexFile* dex_file : dex_files) {
+ // Fetch the list of unverified classes and turn it into a set for faster
+ // lookups.
+ const std::vector<dex::TypeIndex>& unverified_classes =
+ verifier_deps->GetUnverifiedClasses(*dex_file);
+ std::set<dex::TypeIndex> set(unverified_classes.begin(), unverified_classes.end());
+ for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ if (set.find(class_def.class_idx_) == set.end()) {
+ if (!GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
+ // Just update the compiled_classes_ map. The compiler doesn't need to resolve
+ // the type.
+ compiled_classes_.Overwrite(
+ ClassReference(dex_file, i), new CompiledClass(mirror::Class::kStatusVerified));
+ } else {
+ // Resolve the type, so later compilation stages know they don't need to verify
+ // the class.
+ const char* descriptor = dex_file->GetClassDescriptor(class_def);
+ cls.Assign(class_linker->FindClass(soa.Self(), descriptor, class_loader));
+ if (cls.Get() != nullptr) {
+ // Check that the class is resolved with the current dex file. We might get
+ // a boot image class, or a class in a different dex file for multidex, and
+ // we should not update the status in that case.
+ if (&cls->GetDexFile() == dex_file) {
+ ObjectLock<mirror::Class> lock(soa.Self(), cls);
+ mirror::Class::SetStatus(cls, mirror::Class::kStatusVerified, soa.Self());
}
+ } else {
+ DCHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
}
+ // Create `VerifiedMethod`s for each methods, the compiler expects one for
+ // quickening or compiling.
+ // Note that this means:
+ // - We're only going to compile methods that did verify.
+ // - Quickening will not do checkcast ellision.
+ // TODO(ngeoffray): Reconsider this once we refactor compiler filters.
+ PopulateVerifiedMethods(*dex_file, i, verification_results_);
}
}
- return;
}
}
+ return true;
+}
+
+void CompilerDriver::Verify(jobject jclass_loader,
+ const std::vector<const DexFile*>& dex_files,
+ TimingLogger* timings) {
+ if (FastVerify(jclass_loader, dex_files, timings)) {
+ return;
+ }
// If there is no existing `verifier_deps` (because of non-existing vdex), or
// the existing `verifier_deps` is not valid anymore, create a new one for
@@ -2017,7 +2033,7 @@
// Then dex2oat can update the vdex file with these new dependencies.
if (!GetCompilerOptions().IsBootImage()) {
// Create the main VerifierDeps, and set it to this thread.
- verifier_deps = new verifier::VerifierDeps(dex_files);
+ verifier::VerifierDeps* verifier_deps = new verifier::VerifierDeps(dex_files);
Runtime::Current()->GetCompilerCallbacks()->SetVerifierDeps(verifier_deps);
Thread::Current()->SetVerifierDeps(verifier_deps);
// Create per-thread VerifierDeps to avoid contention on the main one.
@@ -2026,6 +2042,7 @@
worker->GetThread()->SetVerifierDeps(new verifier::VerifierDeps(dex_files));
}
}
+
// Note: verification should not be pulling in classes anymore when compiling the boot image,
// as all should have been resolved before. As such, doing this in parallel should still
// be deterministic.
@@ -2041,6 +2058,7 @@
if (!GetCompilerOptions().IsBootImage()) {
// Merge all VerifierDeps into the main one.
+ verifier::VerifierDeps* verifier_deps = Thread::Current()->GetVerifierDeps();
for (ThreadPoolWorker* worker : parallel_thread_pool_->GetWorkers()) {
verifier::VerifierDeps* thread_deps = worker->GetThread()->GetVerifierDeps();
worker->GetThread()->SetVerifierDeps(nullptr);
@@ -2061,7 +2079,10 @@
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *manager_->GetDexFile();
if (!manager_->GetCompiler()->ShouldVerifyClassBasedOnProfile(dex_file, class_def_index)) {
- // Skip verification since the class is not in the profile.
+ // Skip verification since the class is not in the profile, and let the VerifierDeps know
+ // that the class will need to be verified at runtime.
+ verifier::VerifierDeps::MaybeRecordVerificationStatus(
+ dex_file, dex::TypeIndex(class_def_index), verifier::MethodVerifier::kSoftFailure);
return;
}
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 2e3b7c8..6bfdd4d 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -433,12 +433,18 @@
TimingLogger* timings)
REQUIRES(!Locks::mutator_lock_);
+ // Do fast verification through VerifierDeps if possible. Return whether
+ // verification was successful.
// NO_THREAD_SAFETY_ANALYSIS as the method accesses a guarded value in a
// single-threaded way.
+ bool FastVerify(jobject class_loader,
+ const std::vector<const DexFile*>& dex_files,
+ TimingLogger* timings)
+ NO_THREAD_SAFETY_ANALYSIS;
+
void Verify(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings)
- NO_THREAD_SAFETY_ANALYSIS;
+ TimingLogger* timings);
void VerifyDexFile(jobject class_loader,
const DexFile& dex_file,
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 4a9de7f..79e1785 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -224,6 +224,7 @@
} else {
// LDR/STR 32-bit or 64-bit with imm12 == 0 (unset).
DCHECK(patch.GetType() == LinkerPatch::Type::kDexCacheArray ||
+ patch.GetType() == LinkerPatch::Type::kTypeBssEntry ||
patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType();
DCHECK_EQ(insn & 0xbfbffc00, 0xb9000000) << std::hex << insn;
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index a9da09c..de5af97 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -296,6 +296,7 @@
bss_start_(0u),
bss_size_(0u),
bss_roots_offset_(0u),
+ bss_type_entries_(),
bss_string_entries_(),
oat_data_offset_(0u),
oat_header_(nullptr),
@@ -585,7 +586,7 @@
}
oat_size_ = offset;
- if (!HasBootImage()) {
+ {
TimingLogger::ScopedTiming split("InitBssLayout", timings_);
InitBssLayout(instruction_set);
}
@@ -847,6 +848,10 @@
if (!patch.IsPcRelative()) {
writer_->absolute_patch_locations_.push_back(base_loc + patch.LiteralOffset());
}
+ if (patch.GetType() == LinkerPatch::Type::kTypeBssEntry) {
+ TypeReference ref(patch.TargetTypeDexFile(), patch.TargetTypeIndex());
+ writer_->bss_type_entries_.Overwrite(ref, /* placeholder */ 0u);
+ }
if (patch.GetType() == LinkerPatch::Type::kStringBssEntry) {
StringReference ref(patch.TargetStringDexFile(), patch.TargetStringIndex());
writer_->bss_string_entries_.Overwrite(ref, /* placeholder */ 0u);
@@ -1185,6 +1190,15 @@
target_offset);
break;
}
+ case LinkerPatch::Type::kTypeBssEntry: {
+ TypeReference ref(patch.TargetTypeDexFile(), patch.TargetTypeIndex());
+ uint32_t target_offset = writer_->bss_type_entries_.Get(ref);
+ writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
+ patch,
+ offset_ + literal_offset,
+ target_offset);
+ break;
+ }
case LinkerPatch::Type::kCall: {
uint32_t target_offset = GetTargetOffset(patch);
PatchCodeAddress(&patched_code_, literal_offset, target_offset);
@@ -1619,20 +1633,34 @@
}
void OatWriter::InitBssLayout(InstructionSet instruction_set) {
- DCHECK(!HasBootImage());
+ if (HasBootImage()) {
+ DCHECK(bss_string_entries_.empty());
+ if (bss_type_entries_.empty()) {
+ // Nothing to put to the .bss section.
+ return;
+ }
+ }
// Allocate space for app dex cache arrays in the .bss section.
bss_start_ = RoundUp(oat_size_, kPageSize);
- PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set);
bss_size_ = 0u;
- for (const DexFile* dex_file : *dex_files_) {
- dex_cache_arrays_offsets_.Put(dex_file, bss_start_ + bss_size_);
- DexCacheArraysLayout layout(pointer_size, dex_file);
- bss_size_ += layout.Size();
+ if (!HasBootImage()) {
+ PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set);
+ for (const DexFile* dex_file : *dex_files_) {
+ dex_cache_arrays_offsets_.Put(dex_file, bss_start_ + bss_size_);
+ DexCacheArraysLayout layout(pointer_size, dex_file);
+ bss_size_ += layout.Size();
+ }
}
bss_roots_offset_ = bss_size_;
+ // Prepare offsets for .bss Class entries.
+ for (auto& entry : bss_type_entries_) {
+ DCHECK_EQ(entry.second, 0u);
+ entry.second = bss_start_ + bss_size_;
+ bss_size_ += sizeof(GcRoot<mirror::Class>);
+ }
// Prepare offsets for .bss String entries.
for (auto& entry : bss_string_entries_) {
DCHECK_EQ(entry.second, 0u);
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 8d087f4..db84166 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -31,6 +31,7 @@
#include "os.h"
#include "safe_map.h"
#include "string_reference.h"
+#include "utils/type_reference.h"
namespace art {
@@ -372,6 +373,11 @@
// The offset of the GC roots in .bss section.
size_t bss_roots_offset_;
+ // Map for allocating Class entries in .bss. Indexed by TypeReference for the source
+ // type in the dex file with the "type value comparator" for deduplication. The value
+ // is the target offset for patching, starting at `bss_start_ + bss_roots_offset_`.
+ SafeMap<TypeReference, size_t, TypeReferenceValueComparator> bss_type_entries_;
+
// Map for allocating String entries in .bss. Indexed by StringReference for the source
// string in the dex file with the "string value comparator" for deduplication. The value
// is the target offset for patching, starting at `bss_start_ + bss_roots_offset_`.
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index f896f11..8cf4089 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -63,7 +63,8 @@
driver,
interpreter_metadata,
compiler_stats,
- dex_cache) {}
+ dex_cache,
+ handles) {}
// Only for unit testing.
HGraphBuilder(HGraph* graph,
@@ -90,7 +91,8 @@
/* compiler_driver */ nullptr,
/* interpreter_metadata */ nullptr,
/* compiler_stats */ nullptr,
- null_dex_cache_) {}
+ null_dex_cache_,
+ handles) {}
GraphAnalysisResult BuildGraph();
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index f00648f..70c2738 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -367,6 +367,12 @@
InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
}
+void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) {
+ MoveConstant(invoke->GetLocations()->GetTemp(0), static_cast<int32_t>(invoke->GetType()));
+ QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
+ InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
+}
+
void CodeGenerator::CreateUnresolvedFieldLocationSummary(
HInstruction* field_access,
Primitive::Type field_type,
@@ -491,30 +497,33 @@
}
}
-// TODO: Remove argument `code_generator_supports_read_barrier` when
-// all code generators have read barrier support.
-void CodeGenerator::CreateLoadClassLocationSummary(HLoadClass* cls,
- Location runtime_type_index_location,
- Location runtime_return_location,
- bool code_generator_supports_read_barrier) {
- ArenaAllocator* allocator = cls->GetBlock()->GetGraph()->GetArena();
- LocationSummary::CallKind call_kind = cls->NeedsAccessCheck()
- ? LocationSummary::kCallOnMainOnly
- : (((code_generator_supports_read_barrier && kEmitCompilerReadBarrier) ||
- cls->CanCallRuntime())
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall);
- LocationSummary* locations = new (allocator) LocationSummary(cls, call_kind);
- if (cls->NeedsAccessCheck()) {
- locations->SetInAt(0, Location::NoLocation());
- locations->AddTemp(runtime_type_index_location);
- locations->SetOut(runtime_return_location);
- } else {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
- }
+void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
+ Location runtime_type_index_location,
+ Location runtime_return_location) {
+ DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod);
+ DCHECK_EQ(cls->InputCount(), 1u);
+ LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetArena()) LocationSummary(
+ cls, LocationSummary::kCallOnMainOnly);
+ locations->SetInAt(0, Location::NoLocation());
+ locations->AddTemp(runtime_type_index_location);
+ locations->SetOut(runtime_return_location);
}
+void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
+ DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod);
+ LocationSummary* locations = cls->GetLocations();
+ MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
+ if (cls->NeedsAccessCheck()) {
+ CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+ InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
+ } else if (cls->MustGenerateClinitCheck()) {
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ InvokeRuntime(kQuickInitializeStaticStorage, cls, cls->GetDexPc());
+ } else {
+ CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ InvokeRuntime(kQuickInitializeType, cls, cls->GetDexPc());
+ }
+}
void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
// The DCHECKS below check that a register is not specified twice in
@@ -927,10 +936,10 @@
if (environment->GetParent() != nullptr) {
// We emit the parent environment first.
EmitEnvironment(environment->GetParent(), slow_path);
- stack_map_stream_.BeginInlineInfoEntry(environment->GetMethodIdx(),
+ stack_map_stream_.BeginInlineInfoEntry(environment->GetMethod(),
environment->GetDexPc(),
- environment->GetInvokeType(),
- environment->Size());
+ environment->Size(),
+ &graph_->GetDexFile());
}
// Walk over the environment, and record the location of dex registers.
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 6366b98..38d532e 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -426,12 +426,12 @@
}
- // Perfoms checks pertaining to an InvokeRuntime call.
+ // Performs checks pertaining to an InvokeRuntime call.
void ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
SlowPathCode* slow_path);
- // Perfoms checks pertaining to an InvokeRuntimeWithoutRecordingPcInfo call.
+ // Performs checks pertaining to an InvokeRuntimeWithoutRecordingPcInfo call.
static void ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
SlowPathCode* slow_path);
@@ -495,6 +495,8 @@
void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
+ void GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke);
+
void CreateUnresolvedFieldLocationSummary(
HInstruction* field_access,
Primitive::Type field_type,
@@ -507,11 +509,10 @@
uint32_t dex_pc,
const FieldAccessCallingConvention& calling_convention);
- // TODO: This overlaps a bit with MoveFromReturnRegister. Refactor for a better design.
- static void CreateLoadClassLocationSummary(HLoadClass* cls,
- Location runtime_type_index_location,
- Location runtime_return_location,
- bool code_generator_supports_read_barrier = false);
+ static void CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
+ Location runtime_type_index_location,
+ Location runtime_return_location);
+ void GenerateLoadClassRuntimeCall(HLoadClass* cls);
static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke);
@@ -521,7 +522,7 @@
virtual void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path) = 0;
+ SlowPathCode* slow_path = nullptr) = 0;
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 3009103..07b1746 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -371,22 +371,23 @@
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCodeARM(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeARM(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- arm_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ arm_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -400,6 +401,23 @@
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the .bss entry address in the fast path,
+ // so that we can avoid another calculation here.
+ CodeGeneratorARM::PcRelativePatchInfo* labels =
+ arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ __ BindTrackedLabel(&labels->movw_label);
+ __ movw(IP, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->movt_label);
+ __ movt(IP, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->add_pc_label);
+ __ add(IP, IP, ShifterOperand(PC));
+ __ str(locations->Out().AsRegister<Register>(), Address(IP));
+ }
__ b(GetExitLabel());
}
@@ -409,10 +427,6 @@
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -430,7 +444,7 @@
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = load->GetStringIndex().index_;
+ const dex::StringIndex string_index = load->GetStringIndex();
Register out = locations->Out().AsRegister<Register>();
Register temp = locations->GetTemp(0).AsRegister<Register>();
constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
@@ -449,7 +463,7 @@
__ mov(entry_address, ShifterOperand(temp));
}
- __ LoadImmediate(calling_convention.GetRegisterAt(0), string_index);
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), string_index.index_);
arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -1208,6 +1222,7 @@
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
@@ -2370,6 +2385,14 @@
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderARM::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
void LocationsBuilderARM::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -3936,7 +3959,6 @@
} else {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(Location::RegisterLocation(R0));
}
@@ -3954,7 +3976,7 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
@@ -5709,17 +5731,11 @@
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kJitTableAddress:
- break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
- // We disable pc-relative load when there is an irreducible loop, as the optimization
- // is incompatible with it.
- // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- if (GetGraph()->HasIrreducibleLoops()) {
- return HLoadClass::LoadKind::kDexCacheViaMethod;
- }
+ break;
+ case HLoadClass::LoadKind::kJitTableAddress:
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
@@ -5728,15 +5744,16 @@
}
void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(R0),
- /* code_generator_supports_read_barrier */ true);
+ Location::RegisterLocation(R0));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -5747,24 +5764,23 @@
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod ||
- load_kind == HLoadClass::LoadKind::kDexCachePcRelative) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
@@ -5772,7 +5788,7 @@
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -5786,12 +5802,14 @@
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ LoadLiteral(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
CodeGeneratorARM::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
@@ -5805,41 +5823,36 @@
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ CodeGeneratorARM::PcRelativePatchInfo* labels =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ __ BindTrackedLabel(&labels->movw_label);
+ __ movw(out, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->movt_label);
+ __ movt(out, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->add_pc_label);
+ __ add(out, out, ShifterOperand(PC));
+ GenerateGcRootFieldLoad(cls, out_loc, out, 0, kCompilerReadBarrierOption);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
__ LoadLiteral(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
- cls->GetAddress()));
+ cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- Register base_reg = locations->InAt(0).AsRegister<Register>();
- HArmDexCacheArraysBase* base = cls->InputAt(0)->AsArmDexCacheArraysBase();
- int32_t offset = cls->GetDexCacheElementOffset() - base->GetElementOffset();
- // /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- Register current_method = locations->InAt(0).AsRegister<Register>();
- __ LoadFromOffset(kLoadWord,
- out,
- current_method,
- ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- }
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -5947,6 +5960,7 @@
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out, codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
load->GetStringIndex()));
return; // No dex cache slow path.
@@ -5954,7 +5968,7 @@
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorARM::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
__ BindTrackedLabel(&labels->movw_label);
__ movw(out, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->movt_label);
@@ -5974,7 +5988,7 @@
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
Register temp = locations->GetTemp(0).AsRegister<Register>();
CodeGeneratorARM::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
__ BindTrackedLabel(&labels->movw_label);
__ movw(temp, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->movt_label);
@@ -7280,8 +7294,8 @@
}
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeStringPatch(
- const DexFile& dex_file, uint32_t string_index) {
- return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeTypePatch(
@@ -7289,6 +7303,11 @@
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
}
+CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewTypeBssEntryPatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
+}
+
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -7332,8 +7351,9 @@
Literal* CodeGeneratorARM::DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
- uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -7367,6 +7387,7 @@
/* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
boot_image_address_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
@@ -7381,12 +7402,17 @@
target_string.string_index.index_));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
Literal* literal = entry.second;
@@ -7396,8 +7422,6 @@
target_type.dex_file,
target_type.type_index.index_));
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
- linker_patches);
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
Literal* literal = entry.second;
@@ -7405,6 +7429,7 @@
uint32_t literal_offset = literal->GetLabel()->Position();
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
+ DCHECK_EQ(size, linker_patches->size());
}
Literal* CodeGeneratorARM::DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map) {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index d5968e0..52d1857 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -481,8 +481,10 @@
Label add_pc_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
@@ -494,7 +496,7 @@
Handle<mirror::String> handle);
Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
- uint64_t address);
+ Handle<mirror::Class> handle);
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
@@ -635,8 +637,10 @@
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
TypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4b6a9be..b094e54 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -276,22 +276,23 @@
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCodeARM64(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeARM64(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ Mov(calling_convention.GetRegisterAt(0).W(), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- arm64_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ arm64_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -302,11 +303,32 @@
Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
- Primitive::Type type = at_->GetType();
+ Primitive::Type type = instruction_->GetType();
arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
-
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ UseScratchRegisterScope temps(arm64_codegen->GetVIXLAssembler());
+ Register temp = temps.AcquireX();
+ const DexFile& dex_file = cls_->GetDexFile();
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the ADRP in the fast path, so that we
+ // can avoid the ADRP here.
+ vixl::aarch64::Label* adrp_label =
+ arm64_codegen->NewBssEntryTypePatch(dex_file, type_index);
+ arm64_codegen->EmitAdrpPlaceholder(adrp_label, temp);
+ vixl::aarch64::Label* strp_label =
+ arm64_codegen->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
+ {
+ SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
+ __ Bind(strp_label);
+ __ str(RegisterFrom(locations->Out(), Primitive::kPrimNot),
+ MemOperand(temp, /* offset placeholder */ 0));
+ }
+ }
__ B(GetExitLabel());
}
@@ -316,10 +338,6 @@
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -349,8 +367,8 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
- __ Mov(calling_convention.GetRegisterAt(0).W(), string_index);
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
+ __ Mov(calling_convention.GetRegisterAt(0).W(), string_index.index_);
arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
Primitive::Type type = instruction_->GetType();
@@ -1154,6 +1172,7 @@
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
@@ -3994,7 +4013,7 @@
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
// Add ADRP with its PC-relative DexCache access patch.
- const DexFile& dex_file = invoke->GetDexFile();
+ const DexFile& dex_file = invoke->GetDexFileForPcRelativeDexCache();
uint32_t element_offset = invoke->GetDexCacheArrayOffset();
vixl::aarch64::Label* adrp_label = NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp));
@@ -4080,11 +4099,20 @@
__ Blr(lr);
}
+void LocationsBuilderARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeStringPatch(
const DexFile& dex_file,
- uint32_t string_index,
+ dex::StringIndex string_index,
vixl::aarch64::Label* adrp_label) {
- return NewPcRelativePatch(dex_file, string_index, adrp_label, &pc_relative_string_patches_);
+ return
+ NewPcRelativePatch(dex_file, string_index.index_, adrp_label, &pc_relative_string_patches_);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeTypePatch(
@@ -4094,6 +4122,13 @@
return NewPcRelativePatch(dex_file, type_index.index_, adrp_label, &pc_relative_type_patches_);
}
+vixl::aarch64::Label* CodeGeneratorARM64::NewBssEntryTypePatch(
+ const DexFile& dex_file,
+ dex::TypeIndex type_index,
+ vixl::aarch64::Label* adrp_label) {
+ return NewPcRelativePatch(dex_file, type_index.index_, adrp_label, &type_bss_entry_patches_);
+}
+
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file,
uint32_t element_offset,
@@ -4146,8 +4181,9 @@
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
- const DexFile& dex_file, dex::TypeIndex type_index, uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+ const DexFile& dex_file, dex::TypeIndex type_index, Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -4200,6 +4236,7 @@
pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
pc_relative_type_patches_.size() +
+ type_bss_entry_patches_.size() +
boot_image_address_patches_.size();
linker_patches->reserve(size);
for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
@@ -4216,12 +4253,17 @@
target_string.string_index.index_));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
vixl::aarch64::Literal<uint32_t>* literal = entry.second;
@@ -4229,13 +4271,12 @@
target_type.dex_file,
target_type.type_index.index_));
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
- linker_patches);
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
vixl::aarch64::Literal<uint32_t>* literal = entry.second;
linker_patches->push_back(LinkerPatch::RecordPosition(literal->GetOffset()));
}
+ DCHECK_EQ(size, linker_patches->size());
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value,
@@ -4298,12 +4339,12 @@
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
+ case HLoadClass::LoadKind::kBssEntry:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJitCompilation());
- break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -4311,15 +4352,16 @@
}
void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
LocationFrom(calling_convention.GetRegisterAt(0)),
- LocationFrom(vixl::aarch64::x0),
- /* code_generator_supports_read_barrier */ true);
+ LocationFrom(vixl::aarch64::x0));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -4330,21 +4372,21 @@
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(cls->GetLocations()->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
Location out_loc = cls->GetLocations()->Out();
Register out = OutputRegister(cls);
@@ -4353,7 +4395,7 @@
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -4387,14 +4429,35 @@
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK(cls->GetAddress() != 0u && IsUint<32>(cls->GetAddress()));
- __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(cls->GetAddress()));
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
+ __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ break;
+ }
+ case HLoadClass::LoadKind::kBssEntry: {
+ // Add ADRP with its PC-relative Class .bss entry patch.
+ const DexFile& dex_file = cls->GetDexFile();
+ dex::TypeIndex type_index = cls->GetTypeIndex();
+ vixl::aarch64::Label* adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
+ codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
+ // Add LDR with its PC-relative Class patch.
+ vixl::aarch64::Label* ldr_label =
+ codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
+ // /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
+ GenerateGcRootFieldLoad(cls,
+ cls->GetLocations()->Out(),
+ out.X(),
+ /* placeholder */ 0u,
+ ldr_label,
+ kCompilerReadBarrierOption);
+ generate_null_check = true;
break;
}
case HLoadClass::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
- cls->GetAddress()));
+ cls->GetClass()));
GenerateGcRootFieldLoad(cls,
out_loc,
out.X(),
@@ -4403,43 +4466,9 @@
kCompilerReadBarrierOption);
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- // Add ADRP with its PC-relative DexCache access patch.
- const DexFile& dex_file = cls->GetDexFile();
- uint32_t element_offset = cls->GetDexCacheElementOffset();
- vixl::aarch64::Label* adrp_label =
- codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
- codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
- // Add LDR with its PC-relative DexCache access patch.
- vixl::aarch64::Label* ldr_label =
- codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
- // /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
- GenerateGcRootFieldLoad(cls,
- out_loc,
- out.X(),
- /* offset placeholder */ 0,
- ldr_label,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- MemberOffset resolved_types_offset =
- ArtMethod::DexCacheResolvedTypesOffset(kArm64PointerSize);
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- Register current_method = InputRegisterAt(cls, 0);
- __ Ldr(out.X(), MemOperand(current_method, resolved_types_offset.Int32Value()));
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- GenerateGcRootFieldLoad(cls,
- out_loc,
- out.X(),
- CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_),
- /* fixup_label */ nullptr,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -4494,11 +4523,11 @@
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ break;
}
return desired_string_load_kind;
}
@@ -4542,7 +4571,7 @@
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
// Add ADRP with its PC-relative String patch.
const DexFile& dex_file = load->GetDexFile();
- uint32_t string_index = load->GetStringIndex().index_;
+ const dex::StringIndex string_index = load->GetStringIndex();
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
@@ -4562,7 +4591,7 @@
case HLoadString::LoadKind::kBssEntry: {
// Add ADRP with its PC-relative String .bss entry patch.
const DexFile& dex_file = load->GetDexFile();
- uint32_t string_index = load->GetStringIndex().index_;
+ const dex::StringIndex string_index = load->GetStringIndex();
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
Register temp = temps.AcquireX();
@@ -4744,7 +4773,6 @@
locations->AddTemp(LocationFrom(kArtMethodRegister));
} else {
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -4762,7 +4790,7 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index d6a5f9d..a9dca92 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -540,7 +540,7 @@
// ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
// to the associated ADRP patch label).
vixl::aarch64::Label* NewPcRelativeStringPatch(const DexFile& dex_file,
- uint32_t string_index,
+ dex::StringIndex string_index,
vixl::aarch64::Label* adrp_label = nullptr);
// Add a new PC-relative type patch for an instruction and return the label
@@ -551,6 +551,14 @@
dex::TypeIndex type_index,
vixl::aarch64::Label* adrp_label = nullptr);
+ // Add a new .bss entry type patch for an instruction and return the label
+ // to be bound before the instruction. The instruction will be either the
+ // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
+ // to the associated ADRP patch label).
+ vixl::aarch64::Label* NewBssEntryTypePatch(const DexFile& dex_file,
+ dex::TypeIndex type_index,
+ vixl::aarch64::Label* adrp_label = nullptr);
+
// Add a new PC-relative dex cache array patch for an instruction and return
// the label to be bound before the instruction. The instruction will be
// either the ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label`
@@ -571,7 +579,7 @@
Handle<mirror::String> handle);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex string_index,
- uint64_t address);
+ Handle<mirror::Class> handle);
void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg);
void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
@@ -744,8 +752,10 @@
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
TypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index b1f6d59..05a76e1 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -394,22 +394,23 @@
class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
public:
LoadClassSlowPathARMVIXL(HLoadClass* cls, HInstruction* at, uint32_t dex_pc, bool do_clinit)
- : SlowPathCodeARMVIXL(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeARMVIXL(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- __ Mov(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ Mov(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- arm_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ arm_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -423,6 +424,18 @@
arm_codegen->Move32(locations->Out(), LocationFrom(r0));
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the .bss entry address in the fast path,
+ // so that we can avoid another calculation here.
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ arm_codegen->EmitMovwMovtPlaceholder(labels, ip);
+ __ Str(OutputRegister(cls_), MemOperand(ip));
+ }
__ B(GetExitLabel());
}
@@ -432,10 +445,6 @@
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -454,7 +463,7 @@
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = load->GetStringIndex().index_;
+ const dex::StringIndex string_index = load->GetStringIndex();
vixl32::Register out = OutputRegister(load);
vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
@@ -473,7 +482,7 @@
__ Mov(entry_address, temp);
}
- __ Mov(calling_convention.GetRegisterAt(0), string_index);
+ __ Mov(calling_convention.GetRegisterAt(0), string_index.index_);
arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -1252,6 +1261,7 @@
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
@@ -2445,6 +2455,14 @@
}
}
+void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -3948,7 +3966,6 @@
} else {
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(LocationFrom(r0));
}
@@ -3970,7 +3987,7 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
@@ -5790,17 +5807,11 @@
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kJitTableAddress:
- break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
- // We disable pc-relative load when there is an irreducible loop, as the optimization
- // is incompatible with it.
- // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- if (GetGraph()->HasIrreducibleLoops()) {
- return HLoadClass::LoadKind::kDexCacheViaMethod;
- }
+ break;
+ case HLoadClass::LoadKind::kJitTableAddress:
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
@@ -5809,15 +5820,16 @@
}
void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
LocationFrom(calling_convention.GetRegisterAt(0)),
- LocationFrom(r0),
- /* code_generator_supports_read_barrier */ true);
+ LocationFrom(r0));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -5828,24 +5840,23 @@
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod ||
- load_kind == HLoadClass::LoadKind::kDexCachePcRelative) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
vixl32::Register out = OutputRegister(cls);
@@ -5853,7 +5864,7 @@
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -5867,12 +5878,14 @@
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ Ldr(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
@@ -5881,43 +5894,31 @@
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ GenerateGcRootFieldLoad(cls, out_loc, out, 0, kCompilerReadBarrierOption);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
- cls->GetAddress()));
+ cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- vixl32::Register base_reg = InputRegisterAt(cls, 0);
- HArmDexCacheArraysBase* base = cls->InputAt(0)->AsArmDexCacheArraysBase();
- int32_t offset = cls->GetDexCacheElementOffset() - base->GetElementOffset();
- // /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- vixl32::Register current_method = InputRegisterAt(cls, 0);
- const int32_t resolved_types_offset =
- ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value();
- GetAssembler()->LoadFromOffset(kLoadWord, out, current_method, resolved_types_offset);
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- default:
- TODO_VIXL32(FATAL);
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -6039,7 +6040,7 @@
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitMovwMovtPlaceholder(labels, out);
return; // No dex cache slow path.
}
@@ -6054,7 +6055,7 @@
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitMovwMovtPlaceholder(labels, temp);
GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
@@ -7398,8 +7399,8 @@
}
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeStringPatch(
- const DexFile& dex_file, uint32_t string_index) {
- return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeTypePatch(
@@ -7407,6 +7408,11 @@
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
}
+CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewTypeBssEntryPatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
+}
+
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -7463,8 +7469,9 @@
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
- uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() {
@@ -7500,6 +7507,7 @@
/* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
boot_image_address_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
@@ -7514,12 +7522,17 @@
target_string.string_index.index_));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
VIXLUInt32Literal* literal = entry.second;
@@ -7529,8 +7542,6 @@
target_type.dex_file,
target_type.type_index.index_));
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
- linker_patches);
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
VIXLUInt32Literal* literal = entry.second;
@@ -7538,6 +7549,7 @@
uint32_t literal_offset = literal->GetLocation();
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
+ DCHECK_EQ(size, linker_patches->size());
}
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal(
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 200a463..be65353 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -562,8 +562,10 @@
vixl::aarch32::Label add_pc_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
VIXLUInt32Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
@@ -577,7 +579,7 @@
Handle<mirror::String> handle);
VIXLUInt32Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
- uint64_t address);
+ Handle<mirror::Class> handle);
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
@@ -731,8 +733,10 @@
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
TypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 9af03e8..24234e1 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -213,23 +213,24 @@
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCodeMIPS(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeMIPS(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- mips_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ mips_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -240,11 +241,26 @@
Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
- Primitive::Type type = at_->GetType();
+ Primitive::Type type = instruction_->GetType();
mips_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the .bss entry address in the fast path,
+ // so that we can avoid another calculation here.
+ bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
+ Register base = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
+ DCHECK_NE(out.AsRegister<Register>(), AT);
+ CodeGeneratorMIPS::PcRelativePatchInfo* info =
+ mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ mips_codegen->EmitPcRelativeAddressPlaceholder(info, TMP, base);
+ __ StoreToOffset(kStoreWord, out.AsRegister<Register>(), TMP, 0);
+ }
__ B(GetExitLabel());
}
@@ -254,10 +270,6 @@
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -281,8 +293,8 @@
InvokeRuntimeCallingConvention calling_convention;
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = load->GetStringIndex().index_;
- __ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
+ const dex::StringIndex string_index = load->GetStringIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
Primitive::Type type = instruction_->GetType();
@@ -465,6 +477,7 @@
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
clobbered_ra_(false) {
@@ -1007,6 +1020,7 @@
pc_relative_dex_cache_patches_.size() +
pc_relative_string_patches_.size() +
pc_relative_type_patches_.size() +
+ type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
boot_image_type_patches_.size() +
boot_image_address_patches_.size();
@@ -1014,13 +1028,16 @@
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
for (const auto& entry : boot_image_string_patches_) {
const StringReference& target_string = entry.first;
@@ -1047,11 +1064,12 @@
uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
+ DCHECK_EQ(size, linker_patches->size());
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeStringPatch(
- const DexFile& dex_file, uint32_t string_index) {
- return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeTypePatch(
@@ -1059,6 +1077,11 @@
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
}
+CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewTypeBssEntryPatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
+}
+
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -5154,6 +5177,14 @@
}
}
+void LocationsBuilderMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen) {
if (invoke->GetLocations()->Intrinsified()) {
IntrinsicCodeGeneratorMIPS intrinsic(codegen);
@@ -5186,14 +5217,14 @@
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- fallback_load = false;
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
// TODO: implement.
fallback_load = true;
break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ fallback_load = false;
+ break;
}
if (fallback_load) {
desired_string_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
@@ -5222,15 +5253,13 @@
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
+ case HLoadClass::LoadKind::kBssEntry:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
fallback_load = true;
break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJitCompilation());
- // TODO: Create as many MipsDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
fallback_load = false;
break;
@@ -5427,34 +5456,32 @@
}
void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(V0),
- /* code_generator_supports_read_barrier */ false); // TODO: revisit this bool.
+ Location::RegisterLocation(V0));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadClass::LoadKind::kBootImageAddress:
+ case HLoadClass::LoadKind::kBssEntry:
if (codegen_->GetInstructionSetFeatures().IsR6()) {
break;
}
FALLTHROUGH_INTENDED;
- // We need an extra register for PC-relative dex cache accesses.
- case HLoadClass::LoadKind::kDexCachePcRelative:
case HLoadClass::LoadKind::kReferrersClass:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
locations->SetInAt(0, Location::RequiresRegister());
break;
default:
@@ -5463,16 +5490,17 @@
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
Register base_or_current_method_reg;
@@ -5480,12 +5508,11 @@
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadClass::LoadKind::kBootImageAddress:
+ case HLoadClass::LoadKind::kBssEntry:
base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
break;
- // We need an extra register for PC-relative dex cache accesses.
- case HLoadClass::LoadKind::kDexCachePcRelative:
case HLoadClass::LoadKind::kReferrersClass:
case HLoadClass::LoadKind::kDexCacheViaMethod:
base_or_current_method_reg = locations->InAt(0).AsRegister<Register>();
@@ -5508,14 +5535,14 @@
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
@@ -5523,38 +5550,29 @@
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK(!kEmitCompilerReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ CodeGeneratorMIPS::PcRelativePatchInfo* info =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
+ __ LoadFromOffset(kLoadWord, out, out, 0);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
LOG(FATAL) << "Unimplemented";
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- HMipsDexCacheArraysBase* base = cls->InputAt(0)->AsMipsDexCacheArraysBase();
- int32_t offset =
- cls->GetDexCacheElementOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset;
- // /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(cls, out_loc, base_or_current_method_reg, offset);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- __ LoadFromOffset(kLoadWord,
- out,
- base_or_current_method_reg,
- ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset);
- generate_null_check = !cls->IsInDexCache();
- }
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -5649,6 +5667,7 @@
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
@@ -5657,7 +5676,7 @@
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
return; // No dex cache slow path.
}
@@ -5673,7 +5692,7 @@
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
__ LoadFromOffset(kLoadWord, out, out, 0);
SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
@@ -5903,7 +5922,6 @@
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -5920,7 +5938,7 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 7b0812c..c8fd325 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -452,8 +452,10 @@
MipsLabel pc_rel_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
@@ -504,8 +506,10 @@
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
BootTypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 046d59c..fc8fb7a 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -167,22 +167,23 @@
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCodeMIPS64(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeMIPS64(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- mips64_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ mips64_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -193,11 +194,24 @@
Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
- Primitive::Type type = at_->GetType();
+ Primitive::Type type = instruction_->GetType();
mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the .bss entry address in the fast path,
+ // so that we can avoid another calculation here.
+ DCHECK_NE(out.AsRegister<GpuRegister>(), AT);
+ CodeGeneratorMIPS64::PcRelativePatchInfo* info =
+ mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info, AT);
+ __ Sw(out.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ }
__ Bc(GetExitLabel());
}
@@ -207,10 +221,6 @@
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -234,8 +244,8 @@
InvokeRuntimeCallingConvention calling_convention;
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
- __ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
instruction_->GetDexPc(),
@@ -422,6 +432,7 @@
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Save RA (containing the return address) to mimic Quick.
@@ -922,6 +933,7 @@
pc_relative_dex_cache_patches_.size() +
pc_relative_string_patches_.size() +
pc_relative_type_patches_.size() +
+ type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
boot_image_type_patches_.size() +
boot_image_address_patches_.size();
@@ -929,13 +941,16 @@
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
for (const auto& entry : boot_image_string_patches_) {
const StringReference& target_string = entry.first;
@@ -962,11 +977,12 @@
uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
+ DCHECK_EQ(size, linker_patches->size());
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeStringPatch(
- const DexFile& dex_file, uint32_t string_index) {
- return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeTypePatch(
@@ -974,6 +990,11 @@
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
}
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewTypeBssEntryPatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
+}
+
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -3095,7 +3116,7 @@
Location root,
GpuRegister obj,
uint32_t offset) {
- // When handling HLoadClass::LoadKind::kDexCachePcRelative, the caller calls
+ // When handling PC-relative loads, the caller calls
// EmitPcRelativeAddressPlaceholderHigh() and then GenerateGcRootFieldLoad().
// The relative patcher expects the two methods to emit the following patchable
// sequence of instructions in this case:
@@ -3256,6 +3277,14 @@
HandleInvoke(invoke);
}
+void LocationsBuilderMIPS64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
if (invoke->GetLocations()->Intrinsified()) {
IntrinsicCodeGeneratorMIPS64 intrinsic(codegen);
@@ -3314,14 +3343,14 @@
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
+ case HLoadClass::LoadKind::kBssEntry:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
// TODO: implement.
fallback_load = true;
break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJitCompilation());
- break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -3366,7 +3395,7 @@
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
uint32_t offset = invoke->GetDexCacheArrayOffset();
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset);
+ NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset);
EmitPcRelativeAddressPlaceholderHigh(info, AT);
__ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
break;
@@ -3474,38 +3503,38 @@
}
void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- calling_convention.GetReturnLocation(Primitive::kPrimNot),
- /* code_generator_supports_read_barrier */ false);
+ calling_convention.GetReturnLocation(Primitive::kPrimNot));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
GpuRegister out = out_loc.AsRegister<GpuRegister>();
GpuRegister current_method_reg = ZERO;
@@ -3526,14 +3555,14 @@
ArtMethod::DeclaringClassOffset().Int32Value());
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
@@ -3542,39 +3571,29 @@
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK(!kEmitCompilerReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ CodeGeneratorMIPS64::PcRelativePatchInfo* info =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
+ __ Lwu(out, AT, /* placeholder */ 0x5678);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
LOG(FATAL) << "Unimplemented";
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- uint32_t element_offset = cls->GetDexCacheElementOffset();
- CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeDexCacheArrayPatch(cls->GetDexFile(), element_offset);
- codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
- // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(cls, out_loc, AT, /* placeholder */ 0x5678);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- __ LoadFromOffset(kLoadDoubleword,
- out,
- current_method_reg,
- ArtMethod::DexCacheResolvedTypesOffset(kMips64PointerSize).Int32Value());
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset);
- generate_null_check = !cls->IsInDexCache();
- }
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -3638,6 +3657,7 @@
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
@@ -3646,7 +3666,7 @@
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
__ Daddiu(out, AT, /* placeholder */ 0x5678);
return; // No dex cache slow path.
@@ -3663,7 +3683,7 @@
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
__ Lwu(out, AT, /* placeholder */ 0x5678);
SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
@@ -3844,7 +3864,6 @@
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -3862,7 +3881,7 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 8ac919f..52b780c 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -411,8 +411,10 @@
Mips64Label pc_rel_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
PcRelativePatchInfo* NewPcRelativeCallPatch(const DexFile& dex_file,
@@ -469,8 +471,10 @@
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
BootTypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f13b60a..cc727d2 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -225,8 +225,8 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
- __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index));
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index.index_));
x86_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
@@ -254,21 +254,24 @@
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCode(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex().index_));
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(type_index.index_));
x86_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType,
- at_, dex_pc_, this);
+ instruction_,
+ dex_pc_,
+ this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -281,8 +284,17 @@
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
x86_codegen->Move32(out, Location::RegisterLocation(EAX));
}
-
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ Register method_address = locations->InAt(0).AsRegister<Register>();
+ __ movl(Address(method_address, CodeGeneratorX86::kDummy32BitOffset),
+ locations->Out().AsRegister<Register>());
+ Label* fixup_label = x86_codegen->NewTypeBssEntryPatch(cls_);
+ __ Bind(fixup_label);
+ }
__ jmp(GetExitLabel());
}
@@ -292,10 +304,6 @@
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -1009,7 +1017,8 @@
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
constant_area_start_(-1),
@@ -2244,6 +2253,14 @@
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderX86::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
void LocationsBuilderX86::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -4150,7 +4167,6 @@
} else {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
}
@@ -4166,7 +4182,7 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
DCHECK(!codegen_->IsLeafMethod());
}
}
@@ -4505,7 +4521,7 @@
__ movl(temp.AsRegister<Register>(), Address(base_reg, kDummy32BitOffset));
// Bind a new fixup label at the end of the "movl" insn.
uint32_t offset = invoke->GetDexCacheArrayOffset();
- __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset));
+ __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
@@ -4594,9 +4610,15 @@
__ Bind(&string_patches_.back().label);
}
-void CodeGeneratorX86::RecordTypePatch(HLoadClass* load_class) {
- type_patches_.emplace_back(load_class->GetDexFile(), load_class->GetTypeIndex().index_);
- __ Bind(&type_patches_.back().label);
+void CodeGeneratorX86::RecordBootTypePatch(HLoadClass* load_class) {
+ boot_image_type_patches_.emplace_back(load_class->GetDexFile(),
+ load_class->GetTypeIndex().index_);
+ __ Bind(&boot_image_type_patches_.back().label);
+}
+
+Label* CodeGeneratorX86::NewTypeBssEntryPatch(HLoadClass* load_class) {
+ type_bss_entry_patches_.emplace_back(load_class->GetDexFile(), load_class->GetTypeIndex().index_);
+ return &type_bss_entry_patches_.back().label;
}
Label* CodeGeneratorX86::NewStringBssEntryPatch(HLoadString* load_string) {
@@ -4633,7 +4655,8 @@
pc_relative_dex_cache_patches_.size() +
simple_patches_.size() +
string_patches_.size() +
- type_patches_.size();
+ boot_image_type_patches_.size() +
+ type_bss_entry_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
@@ -4642,24 +4665,26 @@
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(boot_image_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
} else if (GetCompilerOptions().GetCompilePic()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(boot_image_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(string_patches_, linker_patches);
} else {
+ for (const PatchInfo<Label>& info : boot_image_type_patches_) {
+ uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
+ linker_patches->push_back(LinkerPatch::TypePatch(literal_offset, &info.dex_file, info.index));
+ }
for (const PatchInfo<Label>& info : string_patches_) {
uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
linker_patches->push_back(
LinkerPatch::StringPatch(literal_offset, &info.dex_file, info.index));
}
}
- if (GetCompilerOptions().GetCompilePic()) {
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(type_patches_, linker_patches);
- } else {
- for (const PatchInfo<Label>& info : type_patches_) {
- uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::TypePatch(literal_offset, &info.dex_file, info.index));
- }
- }
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
+ DCHECK_EQ(size, linker_patches->size());
}
void CodeGeneratorX86::MarkGCCard(Register temp,
@@ -5978,7 +6003,7 @@
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
DCHECK(GetCompilerOptions().GetCompilePic());
FALLTHROUGH_INTENDED;
- case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation()); // Note: boot image is also non-JIT.
// We disable pc-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
@@ -6000,15 +6025,16 @@
}
void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(EAX),
- /* code_generator_supports_read_barrier */ true);
+ Location::RegisterLocation(EAX));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -6019,11 +6045,9 @@
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod ||
load_kind == HLoadClass::LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == HLoadClass::LoadKind::kDexCachePcRelative) {
+ load_kind == HLoadClass::LoadKind::kBssEntry) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
@@ -6031,23 +6055,26 @@
Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file,
dex::TypeIndex dex_index,
- uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), address);
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
// Add a patch entry and return the label.
jit_class_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_class_patches_.back();
return &info->label;
}
-void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
@@ -6055,7 +6082,7 @@
const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -6070,63 +6097,48 @@
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ movl(out, Immediate(/* placeholder */ 0));
- codegen_->RecordTypePatch(cls);
+ codegen_->RecordBootTypePatch(cls);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
Register method_address = locations->InAt(0).AsRegister<Register>();
__ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
- codegen_->RecordTypePatch(cls);
+ codegen_->RecordBootTypePatch(cls);
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ movl(out, Immediate(address));
codegen_->RecordSimplePatch();
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ Register method_address = locations->InAt(0).AsRegister<Register>();
+ Address address(method_address, CodeGeneratorX86::kDummy32BitOffset);
+ Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset);
Label* fixup_label = codegen_->NewJitRootClassPatch(
- cls->GetDexFile(), cls->GetTypeIndex(), cls->GetAddress());
+ cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
// /* GcRoot<mirror::Class> */ out = *address
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- Register base_reg = locations->InAt(0).AsRegister<Register>();
- uint32_t offset = cls->GetDexCacheElementOffset();
- Label* fixup_label = codegen_->NewPcRelativeDexCacheArrayPatch(cls->GetDexFile(), offset);
- // /* GcRoot<mirror::Class> */ out = *(base + offset) /* PC-relative */
- GenerateGcRootFieldLoad(cls,
- out_loc,
- Address(base_reg, CodeGeneratorX86::kDummy32BitOffset),
- fixup_label,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- Register current_method = locations->InAt(0).AsRegister<Register>();
- __ movl(out, Address(current_method,
- ArtMethod::DexCacheResolvedTypesOffset(kX86PointerSize).Int32Value()));
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- GenerateGcRootFieldLoad(cls,
- out_loc,
- Address(out,
- CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_)),
- /* fixup_label */ nullptr,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -6196,11 +6208,11 @@
break;
case HLoadString::LoadKind::kBootImageAddress:
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ break;
}
return desired_string_load_kind;
}
@@ -6251,11 +6263,13 @@
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ movl(out, Immediate(/* placeholder */ 0));
codegen_->RecordBootStringPatch(load);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
Register method_address = locations->InAt(0).AsRegister<Register>();
__ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
codegen_->RecordBootStringPatch(load);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index dd1628c..9eb9765 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -412,13 +412,16 @@
void RecordSimplePatch();
void RecordBootStringPatch(HLoadString* load_string);
- void RecordTypePatch(HLoadClass* load_class);
+ void RecordBootTypePatch(HLoadClass* load_class);
+ Label* NewTypeBssEntryPatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
Label* NewJitRootStringPatch(const DexFile& dex_file,
dex::StringIndex dex_index,
Handle<mirror::String> handle);
- Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
+ Label* NewJitRootClassPatch(const DexFile& dex_file,
+ dex::TypeIndex dex_index,
+ Handle<mirror::Class> handle);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
@@ -621,8 +624,10 @@
ArenaDeque<Label> simple_patches_;
// String patch locations; type depends on configuration (app .bss or boot image PIC/non-PIC).
ArenaDeque<PatchInfo<Label>> string_patches_;
- // Type patch locations.
- ArenaDeque<PatchInfo<Label>> type_patches_;
+ // Type patch locations for boot image; type depends on configuration (boot image PIC/non-PIC).
+ ArenaDeque<PatchInfo<Label>> boot_image_type_patches_;
+ // Type patch locations for kBssEntry.
+ ArenaDeque<PatchInfo<Label>> type_bss_entry_patches_;
// Patches for string root accesses in JIT compiled code.
ArenaDeque<PatchInfo<Label>> jit_string_patches_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 89f4ae0..9adedab 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -234,12 +234,12 @@
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCode(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
@@ -249,7 +249,7 @@
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)),
Immediate(cls_->GetTypeIndex().index_));
x86_64_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage : kQuickInitializeType,
- at_,
+ instruction_,
dex_pc_,
this);
if (do_clinit_) {
@@ -266,6 +266,15 @@
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ __ movl(Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false),
+ locations->Out().AsRegister<CpuRegister>());
+ Label* fixup_label = x86_64_codegen->NewTypeBssEntryPatch(cls_);
+ __ Bind(fixup_label);
+ }
__ jmp(GetExitLabel());
}
@@ -275,10 +284,6 @@
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -300,9 +305,9 @@
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
// Custom calling convention: RAX serves as both input and output.
- __ movl(CpuRegister(RAX), Immediate(string_index));
+ __ movl(CpuRegister(RAX), Immediate(string_index.index_));
x86_64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
instruction_->GetDexPc(),
@@ -986,7 +991,7 @@
Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
// Bind a new fixup label at the end of the "movl" insn.
uint32_t offset = invoke->GetDexCacheArrayOffset();
- __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset));
+ __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
@@ -1079,9 +1084,15 @@
__ Bind(&string_patches_.back().label);
}
-void CodeGeneratorX86_64::RecordTypePatch(HLoadClass* load_class) {
- type_patches_.emplace_back(load_class->GetDexFile(), load_class->GetTypeIndex().index_);
- __ Bind(&type_patches_.back().label);
+void CodeGeneratorX86_64::RecordBootTypePatch(HLoadClass* load_class) {
+ boot_image_type_patches_.emplace_back(load_class->GetDexFile(),
+ load_class->GetTypeIndex().index_);
+ __ Bind(&boot_image_type_patches_.back().label);
+}
+
+Label* CodeGeneratorX86_64::NewTypeBssEntryPatch(HLoadClass* load_class) {
+ type_bss_entry_patches_.emplace_back(load_class->GetDexFile(), load_class->GetTypeIndex().index_);
+ return &type_bss_entry_patches_.back().label;
}
Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) {
@@ -1118,7 +1129,8 @@
pc_relative_dex_cache_patches_.size() +
simple_patches_.size() +
string_patches_.size() +
- type_patches_.size();
+ boot_image_type_patches_.size() +
+ type_bss_entry_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
@@ -1127,13 +1139,17 @@
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(boot_image_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
} else {
- // These are always PC-relative, see GetSupportedLoadStringKind().
+ // These are always PC-relative, see GetSupportedLoadClassKind()/GetSupportedLoadStringKind().
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(boot_image_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(string_patches_, linker_patches);
}
- // These are always PC-relative, see GetSupportedLoadClassKind().
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(type_patches_, linker_patches);
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
+ DCHECK_EQ(size, linker_patches->size());
}
void CodeGeneratorX86_64::DumpCoreRegister(std::ostream& stream, int reg) const {
@@ -1214,7 +1230,8 @@
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
@@ -2423,6 +2440,14 @@
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderX86_64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86_64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
void LocationsBuilderX86_64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -4038,7 +4063,6 @@
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(Location::RegisterLocation(RAX));
}
@@ -4055,7 +4079,7 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
DCHECK(!codegen_->IsLeafMethod());
}
}
@@ -5417,11 +5441,12 @@
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kJitTableAddress:
- break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kJitTableAddress:
+ DCHECK(Runtime::Current()->UseJitCompilation());
+ break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -5429,15 +5454,16 @@
}
void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(RAX),
- /* code_generator_supports_read_barrier */ true);
+ Location::RegisterLocation(RAX));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -5448,9 +5474,7 @@
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
@@ -5458,23 +5482,26 @@
Label* CodeGeneratorX86_64::NewJitRootClassPatch(const DexFile& dex_file,
dex::TypeIndex dex_index,
- uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), address);
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(
+ TypeReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
// Add a patch entry and return the label.
jit_class_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_class_patches_.back();
return &info->label;
}
-void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
@@ -5482,7 +5509,7 @@
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -5497,54 +5524,38 @@
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
- codegen_->RecordTypePatch(cls);
+ codegen_->RecordBootTypePatch(cls);
break;
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ movl(out, Immediate(address)); // Zero-extended.
codegen_->RecordSimplePatch();
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
+ /* no_rip */ false);
+ Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
+ // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
/* no_rip */ true);
Label* fixup_label =
- codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetAddress());
+ codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
// /* GcRoot<mirror::Class> */ out = *address
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- uint32_t offset = cls->GetDexCacheElementOffset();
- Label* fixup_label = codegen_->NewPcRelativeDexCacheArrayPatch(cls->GetDexFile(), offset);
- Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ false);
- // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
- __ movq(out,
- Address(current_method,
- ArtMethod::DexCacheResolvedTypesOffset(kX86_64PointerSize).Int32Value()));
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- GenerateGcRootFieldLoad(
- cls,
- out_loc,
- Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_)),
- /* fixup_label */ nullptr,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
default:
LOG(FATAL) << "Unexpected load kind: " << cls->GetLoadKind();
UNREACHABLE();
@@ -5600,11 +5611,11 @@
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ break;
}
return desired_string_load_kind;
}
@@ -5650,6 +5661,7 @@
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
codegen_->RecordBootStringPatch(load);
return; // No dex cache slow path.
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 32d006c..3438b81 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -409,13 +409,16 @@
void RecordSimplePatch();
void RecordBootStringPatch(HLoadString* load_string);
- void RecordTypePatch(HLoadClass* load_class);
+ void RecordBootTypePatch(HLoadClass* load_class);
+ Label* NewTypeBssEntryPatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
Label* NewJitRootStringPatch(const DexFile& dex_file,
dex::StringIndex dex_index,
Handle<mirror::String> handle);
- Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
+ Label* NewJitRootClassPatch(const DexFile& dex_file,
+ dex::TypeIndex dex_index,
+ Handle<mirror::Class> handle);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
@@ -604,8 +607,10 @@
ArenaDeque<Label> simple_patches_;
// String patch locations; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PatchInfo<Label>> string_patches_;
- // Type patch locations.
- ArenaDeque<PatchInfo<Label>> type_patches_;
+ // Type patch locations for boot image (always PIC).
+ ArenaDeque<PatchInfo<Label>> boot_image_type_patches_;
+ // Type patch locations for kBssEntry.
+ ArenaDeque<PatchInfo<Label>> type_bss_entry_patches_;
// Fixups for jump tables need to be handled specially.
ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.cc b/compiler/optimizing/dex_cache_array_fixups_arm.cc
index 10a36c6..9ddcd56 100644
--- a/compiler/optimizing/dex_cache_array_fixups_arm.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.cc
@@ -59,29 +59,15 @@
}
private:
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
- // If this is a load with PC-relative access to the dex cache types array,
- // we need to add the dex cache arrays base as the special input.
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCachePcRelative) {
- // Initialize base for target dex file if needed.
- const DexFile& dex_file = load_class->GetDexFile();
- HArmDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(dex_file);
- // Update the element offset in base.
- DexCacheArraysLayout layout(kArmPointerSize, &dex_file);
- base->UpdateElementOffset(layout.TypeOffset(load_class->GetTypeIndex()));
- // Add the special argument base to the load.
- load_class->AddSpecialInput(base);
- }
- }
-
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
// If this is an invoke with PC-relative access to the dex cache methods array,
// we need to add the dex cache arrays base as the special input.
if (invoke->HasPcRelativeDexCache() &&
!IsCallFreeIntrinsic<IntrinsicLocationsBuilderARMType>(invoke, codegen_)) {
- HArmDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(invoke->GetDexFile());
+ HArmDexCacheArraysBase* base =
+ GetOrCreateDexCacheArrayBase(invoke->GetDexFileForPcRelativeDexCache());
// Update the element offset in base.
- DexCacheArraysLayout layout(kArmPointerSize, &invoke->GetDexFile());
+ DexCacheArraysLayout layout(kArmPointerSize, &invoke->GetDexFileForPcRelativeDexCache());
base->UpdateElementOffset(layout.MethodOffset(invoke->GetDexMethodIndex()));
// Add the special argument base to the method.
DCHECK(!invoke->HasCurrentMethodInput());
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.cc b/compiler/optimizing/dex_cache_array_fixups_mips.cc
index 31fff26..04a4294 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.cc
@@ -53,30 +53,16 @@
}
private:
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
- // If this is a load with PC-relative access to the dex cache types array,
- // we need to add the dex cache arrays base as the special input.
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCachePcRelative) {
- // Initialize base for target dex file if needed.
- const DexFile& dex_file = load_class->GetDexFile();
- HMipsDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(dex_file);
- // Update the element offset in base.
- DexCacheArraysLayout layout(kMipsPointerSize, &dex_file);
- base->UpdateElementOffset(layout.TypeOffset(load_class->GetTypeIndex()));
- // Add the special argument base to the load.
- load_class->AddSpecialInput(base);
- }
- }
-
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
// If this is an invoke with PC-relative access to the dex cache methods array,
// we need to add the dex cache arrays base as the special input.
if (invoke->HasPcRelativeDexCache() &&
!IsCallFreeIntrinsic<IntrinsicLocationsBuilderMIPS>(invoke, codegen_)) {
// Initialize base for target method dex file if needed.
- HMipsDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(invoke->GetDexFile());
+ HMipsDexCacheArraysBase* base =
+ GetOrCreateDexCacheArrayBase(invoke->GetDexFileForPcRelativeDexCache());
// Update the element offset in base.
- DexCacheArraysLayout layout(kMipsPointerSize, &invoke->GetDexFile());
+ DexCacheArraysLayout layout(kMipsPointerSize, &invoke->GetDexFileForPcRelativeDexCache());
base->UpdateElementOffset(layout.MethodOffset(invoke->GetDexMethodIndex()));
// Add the special argument base to the method.
DCHECK(!invoke->HasCurrentMethodInput());
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 09dcefa..f6fba88 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -464,6 +464,11 @@
StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
}
+ void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+ VisitInvoke(invoke);
+ StartAttributeStream("invoke_type") << "InvokePolymorphic";
+ }
+
void VisitInstanceFieldGet(HInstanceFieldGet* iget) OVERRIDE {
StartAttributeStream("field_name") <<
iget->GetFieldInfo().GetDexFile().PrettyField(iget->GetFieldInfo().GetFieldIndex(),
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index d5c4c2f..6d8ae75 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -368,10 +368,14 @@
}
}
-bool InductionVarRange::IsFinite(HLoopInformation* loop) const {
+bool InductionVarRange::IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const {
HInductionVarAnalysis::InductionInfo *trip =
induction_analysis_->LookupInfo(loop, GetLoopControl(loop));
- return trip != nullptr && !IsUnsafeTripCount(trip);
+ if (trip != nullptr && !IsUnsafeTripCount(trip)) {
+ IsConstant(trip->op_a, kExact, tc);
+ return true;
+ }
+ return false;
}
//
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index ba14847..6c424b7 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -150,9 +150,9 @@
}
/**
- * Checks if header logic of a loop terminates.
+ * Checks if header logic of a loop terminates. Sets trip-count tc if known.
*/
- bool IsFinite(HLoopInformation* loop) const;
+ bool IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const;
private:
/*
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index e5d05e9..50aa442 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -308,8 +308,10 @@
}
bool HInliner::TryInline(HInvoke* invoke_instruction) {
- if (invoke_instruction->IsInvokeUnresolved()) {
- return false; // Don't bother to move further if we know the method is unresolved.
+ if (invoke_instruction->IsInvokeUnresolved() ||
+ invoke_instruction->IsInvokePolymorphic()) {
+ return false; // Don't bother to move further if we know the method is unresolved or an
+ // invoke-polymorphic.
}
ScopedObjectAccess soa(Thread::Current());
@@ -472,10 +474,10 @@
HInstruction* receiver = invoke_instruction->InputAt(0);
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- Handle<mirror::Class> handle = handles_->NewHandle(GetMonomorphicType(classes));
+ Handle<mirror::Class> monomorphic_type = handles_->NewHandle(GetMonomorphicType(classes));
if (!TryInlineAndReplace(invoke_instruction,
resolved_method,
- ReferenceTypeInfo::Create(handle, /* is_exact */ true),
+ ReferenceTypeInfo::Create(monomorphic_type, /* is_exact */ true),
/* do_rtp */ false,
/* cha_devirtualize */ false)) {
return false;
@@ -486,7 +488,7 @@
cursor,
bb_cursor,
class_index,
- GetMonomorphicType(classes),
+ monomorphic_type,
invoke_instruction,
/* with_deoptimization */ true);
@@ -531,11 +533,9 @@
HInstruction* cursor,
HBasicBlock* bb_cursor,
dex::TypeIndex class_index,
- mirror::Class* klass,
+ Handle<mirror::Class> klass,
HInstruction* invoke_instruction,
bool with_deoptimization) {
- ScopedAssertNoThreadSuspension sants("Adding compiler type guard");
-
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
HInstanceFieldGet* receiver_class = BuildGetReceiverClass(
class_linker, receiver, invoke_instruction->GetDexPc());
@@ -546,19 +546,20 @@
}
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
- bool is_referrer = (klass == outermost_graph_->GetArtMethod()->GetDeclaringClass());
+ bool is_referrer = (klass.Get() == outermost_graph_->GetArtMethod()->GetDeclaringClass());
// Note that we will just compare the classes, so we don't need Java semantics access checks.
// Note that the type index and the dex file are relative to the method this type guard is
// inlined into.
HLoadClass* load_class = new (graph_->GetArena()) HLoadClass(graph_->GetCurrentMethod(),
class_index,
caller_dex_file,
+ klass,
is_referrer,
invoke_instruction->GetDexPc(),
/* needs_access_check */ false);
bb_cursor->InsertInstructionAfter(load_class, receiver_class);
// Sharpen after adding the instruction, as the sharpening may remove inputs.
- HSharpening::SharpenClass(load_class, klass, handles_, codegen_, compiler_driver_);
+ HSharpening::SharpenClass(load_class, codegen_, compiler_driver_);
// TODO: Extend reference type propagation to understand the guard.
HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
@@ -635,7 +636,7 @@
cursor,
bb_cursor,
class_index,
- handle.Get(),
+ handle,
invoke_instruction,
deoptimize);
if (deoptimize) {
@@ -1428,15 +1429,6 @@
return false;
}
- if (current->IsNewInstance() &&
- (current->AsNewInstance()->GetEntrypoint() == kQuickAllocObjectWithAccessCheck)) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it is using an entrypoint"
- << " with access checks";
- // Allocation entrypoint does not handle inlined frames.
- return false;
- }
-
if (current->IsNewArray() &&
(current->AsNewArray()->GetEntrypoint() == kQuickAllocArrayWithAccessCheck)) {
VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 4c0b990..11aacab 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -170,7 +170,7 @@
HInstruction* cursor,
HBasicBlock* bb_cursor,
dex::TypeIndex class_index,
- mirror::Class* klass,
+ Handle<mirror::Class> klass,
HInstruction* invoke_instruction,
bool with_deoptimization)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 768b1d8..8ed0e7f 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1,3 +1,4 @@
+
/*
* Copyright (C) 2016 The Android Open Source Project
*
@@ -207,10 +208,8 @@
HEnvironment* environment = new (arena_) HEnvironment(
arena_,
current_locals_->size(),
- graph_->GetDexFile(),
- graph_->GetMethodIdx(),
+ graph_->GetArtMethod(),
instruction->GetDexPc(),
- graph_->GetInvokeType(),
instruction);
environment->CopyFrom(*current_locals_);
instruction->SetRawEnvironment(environment);
@@ -906,51 +905,69 @@
false /* is_unresolved */);
}
+bool HInstructionBuilder::BuildInvokePolymorphic(const Instruction& instruction ATTRIBUTE_UNUSED,
+ uint32_t dex_pc,
+ uint32_t method_idx,
+ uint32_t proto_idx,
+ uint32_t number_of_vreg_arguments,
+ bool is_range,
+ uint32_t* args,
+ uint32_t register_index) {
+ const char* descriptor = dex_file_->GetShorty(proto_idx);
+ DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), number_of_vreg_arguments);
+ Primitive::Type return_type = Primitive::GetType(descriptor[0]);
+ size_t number_of_arguments = strlen(descriptor);
+ HInvoke* invoke = new (arena_) HInvokePolymorphic(arena_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx);
+ return HandleInvoke(invoke,
+ number_of_vreg_arguments,
+ args,
+ register_index,
+ is_range,
+ descriptor,
+ nullptr /* clinit_check */,
+ false /* is_unresolved */);
+}
+
bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index)));
- const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
- bool finalizable;
- bool needs_access_check = NeedsAccessCheck(type_index, dex_cache, &finalizable);
-
- // Only the non-resolved entrypoint handles the finalizable class case. If we
- // need access checks, then we haven't resolved the method and the class may
- // again be finalizable.
- QuickEntrypointEnum entrypoint = (finalizable || needs_access_check)
- ? kQuickAllocObject
- : kQuickAllocObjectInitialized;
-
if (outer_dex_cache.Get() != dex_cache.Get()) {
// We currently do not support inlining allocations across dex files.
return false;
}
- HLoadClass* load_class = new (arena_) HLoadClass(
- graph_->GetCurrentMethod(),
- type_index,
- outer_dex_file,
- IsOutermostCompilingClass(type_index),
- dex_pc,
- needs_access_check);
+ HLoadClass* load_class = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
- AppendInstruction(load_class);
HInstruction* cls = load_class;
- if (!IsInitialized(resolved_class)) {
+ Handle<mirror::Class> klass = load_class->GetClass();
+
+ if (!IsInitialized(klass)) {
cls = new (arena_) HClinitCheck(load_class, dex_pc);
AppendInstruction(cls);
}
+ // Only the access check entrypoint handles the finalizable class case. If we
+ // need access checks, then we haven't resolved the method and the class may
+ // again be finalizable.
+ QuickEntrypointEnum entrypoint = kQuickAllocObjectInitialized;
+ if (load_class->NeedsAccessCheck() || klass->IsFinalizable() || !klass->IsInstantiable()) {
+ entrypoint = kQuickAllocObjectWithChecks;
+ }
+
+ // Consider classes we haven't resolved as potentially finalizable.
+ bool finalizable = (klass.Get() == nullptr) || klass->IsFinalizable();
+
AppendInstruction(new (arena_) HNewInstance(
cls,
- graph_->GetCurrentMethod(),
dex_pc,
type_index,
*dex_compilation_unit_->GetDexFile(),
- needs_access_check,
finalizable,
entrypoint));
return true;
@@ -991,7 +1008,6 @@
ArtMethod* resolved_method,
uint32_t method_idx,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
- const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Thread* self = Thread::Current();
StackHandleScope<2> hs(self);
Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
@@ -1019,15 +1035,9 @@
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
} else if (storage_index.IsValid()) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
- HLoadClass* load_class = new (arena_) HLoadClass(
- graph_->GetCurrentMethod(),
- storage_index,
- outer_dex_file,
- is_outer_class,
- dex_pc,
- /*needs_access_check*/ false);
- AppendInstruction(load_class);
- clinit_check = new (arena_) HClinitCheck(load_class, dex_pc);
+ HLoadClass* cls = BuildLoadClass(
+ storage_index, dex_pc, /* check_access */ false, /* outer */ true);
+ clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
AppendInstruction(clinit_check);
}
return clinit_check;
@@ -1349,7 +1359,6 @@
}
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
- const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
@@ -1377,16 +1386,10 @@
}
}
- HLoadClass* constant = new (arena_) HLoadClass(graph_->GetCurrentMethod(),
- storage_index,
- outer_dex_file,
- is_outer_class,
- dex_pc,
- /*needs_access_check*/ false);
- AppendInstruction(constant);
+ HLoadClass* constant = BuildLoadClass(
+ storage_index, dex_pc, /* check_access */ false, /* outer */ true);
HInstruction* cls = constant;
-
Handle<mirror::Class> klass(hs.NewHandle(resolved_field->GetDeclaringClass()));
if (!IsInitialized(klass)) {
cls = new (arena_) HClinitCheck(constant, dex_pc);
@@ -1633,33 +1636,53 @@
}
}
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
+ uint32_t dex_pc,
+ bool check_access,
+ bool outer) {
+ ScopedObjectAccess soa(Thread::Current());
+ const DexCompilationUnit* compilation_unit =
+ outer ? outer_compilation_unit_ : dex_compilation_unit_;
+ const DexFile& dex_file = *compilation_unit->GetDexFile();
+ Handle<mirror::DexCache> dex_cache = compilation_unit->GetDexCache();
+ bool is_accessible = false;
+ Handle<mirror::Class> klass = handles_->NewHandle(dex_cache->GetResolvedType(type_index));
+ if (!check_access) {
+ is_accessible = true;
+ } else if (klass.Get() != nullptr) {
+ if (klass->IsPublic()) {
+ is_accessible = true;
+ } else {
+ mirror::Class* compiling_class = GetCompilingClass();
+ if (compiling_class != nullptr && compiling_class->CanAccess(klass.Get())) {
+ is_accessible = true;
+ }
+ }
+ }
+
+ HLoadClass* load_class = new (arena_) HLoadClass(
+ graph_->GetCurrentMethod(),
+ type_index,
+ dex_file,
+ klass,
+ klass.Get() != nullptr && (klass.Get() == GetOutermostCompilingClass()),
+ dex_pc,
+ !is_accessible);
+
+ AppendInstruction(load_class);
+ return load_class;
+}
+
void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
uint8_t destination,
uint8_t reference,
dex::TypeIndex type_index,
uint32_t dex_pc) {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index)));
-
- bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(),
- dex_cache,
- type_index);
-
HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
- HLoadClass* cls = new (arena_) HLoadClass(
- graph_->GetCurrentMethod(),
- type_index,
- dex_file,
- IsOutermostCompilingClass(type_index),
- dex_pc,
- !can_access);
- AppendInstruction(cls);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
- TypeCheckKind check_kind = ComputeTypeCheckKind(resolved_class);
+ ScopedObjectAccess soa(Thread::Current());
+ TypeCheckKind check_kind = ComputeTypeCheckKind(cls->GetClass());
if (instruction.Opcode() == Instruction::INSTANCE_OF) {
AppendInstruction(new (arena_) HInstanceOf(object, cls, check_kind, dex_pc));
UpdateLocal(destination, current_block_->GetLastInstruction());
@@ -1916,6 +1939,37 @@
break;
}
+ case Instruction::INVOKE_POLYMORPHIC: {
+ uint16_t method_idx = instruction.VRegB_45cc();
+ uint16_t proto_idx = instruction.VRegH_45cc();
+ uint32_t number_of_vreg_arguments = instruction.VRegA_45cc();
+ uint32_t args[5];
+ instruction.GetVarArgs(args);
+ return BuildInvokePolymorphic(instruction,
+ dex_pc,
+ method_idx,
+ proto_idx,
+ number_of_vreg_arguments,
+ false,
+ args,
+ -1);
+ }
+
+ case Instruction::INVOKE_POLYMORPHIC_RANGE: {
+ uint16_t method_idx = instruction.VRegB_4rcc();
+ uint16_t proto_idx = instruction.VRegH_4rcc();
+ uint32_t number_of_vreg_arguments = instruction.VRegA_4rcc();
+ uint32_t register_index = instruction.VRegC_4rcc();
+ return BuildInvokePolymorphic(instruction,
+ dex_pc,
+ method_idx,
+ proto_idx,
+ number_of_vreg_arguments,
+ true,
+ nullptr,
+ register_index);
+ }
+
case Instruction::NEG_INT: {
Unop_12x<HNeg>(instruction, Primitive::kPrimInt, dex_pc);
break;
@@ -2632,21 +2686,7 @@
case Instruction::CONST_CLASS: {
dex::TypeIndex type_index(instruction.VRegB_21c());
- // `CanAccessTypeWithoutChecks` will tell whether the method being
- // built is trying to access its own class, so that the generated
- // code can optimize for this case. However, the optimization does not
- // work for inlining, so we use `IsOutermostCompilingClass` instead.
- ScopedObjectAccess soa(Thread::Current());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(), dex_cache, type_index);
- AppendInstruction(new (arena_) HLoadClass(
- graph_->GetCurrentMethod(),
- type_index,
- *dex_file_,
- IsOutermostCompilingClass(type_index),
- dex_pc,
- !can_access));
+ BuildLoadClass(type_index, dex_pc, /* check_access */ true);
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index f29e522..5efe950 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -46,9 +46,11 @@
CompilerDriver* driver,
const uint8_t* interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
- Handle<mirror::DexCache> dex_cache)
+ Handle<mirror::DexCache> dex_cache,
+ VariableSizedHandleScope* handles)
: arena_(graph->GetArena()),
graph_(graph),
+ handles_(handles),
dex_file_(dex_file),
code_item_(code_item),
return_type_(return_type),
@@ -175,6 +177,17 @@
uint32_t* args,
uint32_t register_index);
+ // Builds an invocation node for invoke-polymorphic and returns whether the
+ // instruction is supported.
+ bool BuildInvokePolymorphic(const Instruction& instruction,
+ uint32_t dex_pc,
+ uint32_t method_idx,
+ uint32_t proto_idx,
+ uint32_t number_of_vreg_arguments,
+ bool is_range,
+ uint32_t* args,
+ uint32_t register_index);
+
// Builds a new array node and the instructions that fill it.
void BuildFilledNewArray(uint32_t dex_pc,
dex::TypeIndex type_index,
@@ -212,6 +225,14 @@
// Builds an instruction sequence for a switch statement.
void BuildSwitch(const Instruction& instruction, uint32_t dex_pc);
+ // Builds a `HLoadClass` loading the given `type_index`. If `outer` is true,
+ // this method will use the outer class's dex file to lookup the type at
+ // `type_index`.
+ HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
+ uint32_t dex_pc,
+ bool check_access,
+ bool outer = false);
+
// Returns the outer-most compiling method's class.
mirror::Class* GetOutermostCompilingClass() const;
@@ -271,6 +292,7 @@
ArenaAllocator* const arena_;
HGraph* const graph_;
+ VariableSizedHandleScope* handles_;
// The dex file where the method being compiled is, and the bytecode data.
const DexFile* const dex_file_;
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index fc6ff7b..17d683f 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -145,7 +145,7 @@
if (!CheckInvokeType(intrinsic, invoke)) {
LOG(WARNING) << "Found an intrinsic with unexpected invoke type: "
<< intrinsic << " for "
- << invoke->GetDexFile().PrettyMethod(invoke->GetDexMethodIndex())
+ << art_method->PrettyMethod()
<< invoke->DebugName();
} else {
invoke->SetIntrinsic(intrinsic,
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index cda3185..f1ae549 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -752,8 +752,9 @@
FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
FRegister out = locations->Out().AsFpuRegister<FRegister>();
- // As a "quality of implementation", rather than pure "spec compliance", it is required that
- // Math.abs() clears the sign bit (but changes nothing else) for all numbers, including NaN.
+ // Note, as a "quality of implementation", rather than pure "spec compliance", we require that
+ // Math.abs() clears the sign bit (but changes nothing else) for all numbers, including NaN
+ // (signaling NaN may become quiet though).
//
// The ABS.fmt instructions (abs.s and abs.d) do exactly that when NAN2008=1 (R6). For this case,
// both regular floating point numbers and NAN values are treated alike, only the sign bit is
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 2856c3e..2d3c00f 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -943,6 +943,10 @@
HandleInvoke(invoke);
}
+ void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
HandleInvoke(clinit);
}
@@ -975,7 +979,7 @@
}
if (ref_info->IsSingletonAndRemovable() &&
!new_instance->IsFinalizable() &&
- !new_instance->NeedsAccessCheck()) {
+ !new_instance->NeedsChecks()) {
singleton_new_instances_.push_back(new_instance);
}
ArenaVector<HInstruction*>& heap_values =
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 9d73e29..9583838 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -161,26 +161,27 @@
void HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) {
for ( ; node != nullptr; node = node->next) {
+ // Visit inner loops first.
int current_induction_simplification_count = induction_simplication_count_;
if (node->inner != nullptr) {
TraverseLoopsInnerToOuter(node->inner);
}
- // Visit loop after its inner loops have been visited. If the induction of any inner
- // loop has been simplified, recompute the induction information of this loop first.
+ // Recompute induction information of this loop if the induction
+ // of any inner loop has been simplified.
if (current_induction_simplification_count != induction_simplication_count_) {
induction_range_.ReVisit(node->loop_info);
}
- // Repeat simplifications until no more changes occur. Note that since
- // each simplification consists of eliminating code (without introducing
- // new code), this process is always finite.
+ // Repeat simplifications in the body of this loop until no more changes occur.
+ // Note that since each simplification consists of eliminating code (without
+ // introducing new code), this process is always finite.
do {
simplified_ = false;
- SimplifyBlocks(node);
SimplifyInduction(node);
+ SimplifyBlocks(node);
} while (simplified_);
- // Remove inner loops when empty.
+ // Simplify inner loop.
if (node->inner == nullptr) {
- RemoveIfEmptyInnerLoop(node);
+ SimplifyInnerLoop(node);
}
}
}
@@ -198,7 +199,7 @@
iset_->clear();
int32_t use_count = 0;
if (IsPhiInduction(phi) &&
- IsOnlyUsedAfterLoop(node->loop_info, phi, &use_count) &&
+ IsOnlyUsedAfterLoop(node->loop_info, phi, /*collect_loop_uses*/ false, &use_count) &&
// No uses, or no early-exit with proper replacement.
(use_count == 0 ||
(!IsEarlyExit(node->loop_info) && TryReplaceWithLastValue(phi, preheader)))) {
@@ -206,7 +207,6 @@
RemoveFromCycle(i);
}
simplified_ = true;
- induction_simplication_count_++;
}
}
}
@@ -216,24 +216,14 @@
for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
// Remove dead instructions from the loop-body.
- for (HBackwardInstructionIterator i(block->GetInstructions()); !i.Done(); i.Advance()) {
- HInstruction* instruction = i.Current();
- if (instruction->IsDeadAndRemovable()) {
- simplified_ = true;
- block->RemoveInstruction(instruction);
- }
- }
+ RemoveDeadInstructions(block->GetPhis());
+ RemoveDeadInstructions(block->GetInstructions());
// Remove trivial control flow blocks from the loop-body.
- HBasicBlock* succ = nullptr;
- if (IsGotoBlock(block, &succ) && succ->GetPredecessors().size() == 1) {
- // Trivial goto block can be removed.
- HBasicBlock* pred = block->GetSinglePredecessor();
+ if (block->GetPredecessors().size() == 1 &&
+ block->GetSuccessors().size() == 1 &&
+ block->GetSingleSuccessor()->GetPredecessors().size() == 1) {
simplified_ = true;
- pred->ReplaceSuccessor(block, succ);
- block->RemoveDominatedBlock(succ);
- block->DisconnectAndDelete();
- pred->AddDominatedBlock(succ);
- succ->SetDominator(pred);
+ block->MergeWith(block->GetSingleSuccessor());
} else if (block->GetSuccessors().size() == 2) {
// Trivial if block can be bypassed to either branch.
HBasicBlock* succ0 = block->GetSuccessors()[0];
@@ -258,55 +248,66 @@
}
}
-void HLoopOptimization::RemoveIfEmptyInnerLoop(LoopNode* node) {
+bool HLoopOptimization::SimplifyInnerLoop(LoopNode* node) {
HBasicBlock* header = node->loop_info->GetHeader();
HBasicBlock* preheader = node->loop_info->GetPreHeader();
// Ensure loop header logic is finite.
- if (!induction_range_.IsFinite(node->loop_info)) {
- return;
+ int64_t tc = 0;
+ if (!induction_range_.IsFinite(node->loop_info, &tc)) {
+ return false;
}
// Ensure there is only a single loop-body (besides the header).
HBasicBlock* body = nullptr;
for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
if (it.Current() != header) {
if (body != nullptr) {
- return;
+ return false;
}
body = it.Current();
}
}
// Ensure there is only a single exit point.
if (header->GetSuccessors().size() != 2) {
- return;
+ return false;
}
HBasicBlock* exit = (header->GetSuccessors()[0] == body)
? header->GetSuccessors()[1]
: header->GetSuccessors()[0];
// Ensure exit can only be reached by exiting loop.
if (exit->GetPredecessors().size() != 1) {
- return;
+ return false;
}
- // Detect an empty loop: no side effects other than plain iteration. Replace
- // subsequent index uses, if any, with the last value and remove the loop.
+ // Detect either an empty loop (no side effects other than plain iteration) or
+ // a trivial loop (just iterating once). Replace subsequent index uses, if any,
+ // with the last value and remove the loop, possibly after unrolling its body.
+ HInstruction* phi = header->GetFirstPhi();
iset_->clear();
int32_t use_count = 0;
- if (IsEmptyHeader(header) &&
- IsEmptyBody(body) &&
- IsOnlyUsedAfterLoop(node->loop_info, header->GetFirstPhi(), &use_count) &&
- // No uses, or proper replacement.
- (use_count == 0 || TryReplaceWithLastValue(header->GetFirstPhi(), preheader))) {
- body->DisconnectAndDelete();
- exit->RemovePredecessor(header);
- header->RemoveSuccessor(exit);
- header->RemoveDominatedBlock(exit);
- header->DisconnectAndDelete();
- preheader->AddSuccessor(exit);
- preheader->AddInstruction(new (graph_->GetArena()) HGoto()); // global allocator
- preheader->AddDominatedBlock(exit);
- exit->SetDominator(preheader);
- // Update hierarchy.
- RemoveLoop(node);
+ if (IsEmptyHeader(header)) {
+ bool is_empty = IsEmptyBody(body);
+ if ((is_empty || tc == 1) &&
+ IsOnlyUsedAfterLoop(node->loop_info, phi, /*collect_loop_uses*/ true, &use_count) &&
+ // No uses, or proper replacement.
+ (use_count == 0 || TryReplaceWithLastValue(phi, preheader))) {
+ if (!is_empty) {
+ // Unroll the loop body, which sees initial value of the index.
+ phi->ReplaceWith(phi->InputAt(0));
+ preheader->MergeInstructionsWith(body);
+ }
+ body->DisconnectAndDelete();
+ exit->RemovePredecessor(header);
+ header->RemoveSuccessor(exit);
+ header->RemoveDominatedBlock(exit);
+ header->DisconnectAndDelete();
+ preheader->AddSuccessor(exit);
+ preheader->AddInstruction(new (graph_->GetArena()) HGoto()); // global allocator
+ preheader->AddDominatedBlock(exit);
+ exit->SetDominator(preheader);
+ RemoveLoop(node); // update hierarchy
+ return true;
+ }
}
+ return false;
}
bool HLoopOptimization::IsPhiInduction(HPhi* phi) {
@@ -374,12 +375,19 @@
bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
+ bool collect_loop_uses,
/*out*/ int32_t* use_count) {
for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
HInstruction* user = use.GetUser();
if (iset_->find(user) == iset_->end()) { // not excluded?
HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
if (other_loop_info != nullptr && other_loop_info->IsIn(*loop_info)) {
+ // If collect_loop_uses is set, simply keep adding those uses to the set.
+ // Otherwise, reject uses inside the loop that were not already in the set.
+ if (collect_loop_uses) {
+ iset_->insert(user);
+ continue;
+ }
return false;
}
++*use_count;
@@ -388,40 +396,48 @@
return true;
}
-void HLoopOptimization::ReplaceAllUses(HInstruction* instruction, HInstruction* replacement) {
- const HUseList<HInstruction*>& uses = instruction->GetUses();
- for (auto it = uses.begin(), end = uses.end(); it != end;) {
- HInstruction* user = it->GetUser();
- size_t index = it->GetIndex();
- ++it; // increment before replacing
- if (iset_->find(user) == iset_->end()) { // not excluded?
- user->ReplaceInput(replacement, index);
- induction_range_.Replace(user, instruction, replacement); // update induction
- }
- }
- const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
- for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
- HEnvironment* user = it->GetUser();
- size_t index = it->GetIndex();
- ++it; // increment before replacing
- if (iset_->find(user->GetHolder()) == iset_->end()) { // not excluded?
- user->RemoveAsUserOfInput(index);
- user->SetRawEnvAt(index, replacement);
- replacement->AddEnvUseAt(user, index);
- }
- }
-}
-
bool HLoopOptimization::TryReplaceWithLastValue(HInstruction* instruction, HBasicBlock* block) {
// Try to replace outside uses with the last value. Environment uses can consume this
// value too, since any first true use is outside the loop (although this may imply
// that de-opting may look "ahead" a bit on the phi value). If there are only environment
// uses, the value is dropped altogether, since the computations have no effect.
if (induction_range_.CanGenerateLastValue(instruction)) {
- ReplaceAllUses(instruction, induction_range_.GenerateLastValue(instruction, graph_, block));
+ HInstruction* replacement = induction_range_.GenerateLastValue(instruction, graph_, block);
+ const HUseList<HInstruction*>& uses = instruction->GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end;) {
+ HInstruction* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment before replacing
+ if (iset_->find(user) == iset_->end()) { // not excluded?
+ user->ReplaceInput(replacement, index);
+ induction_range_.Replace(user, instruction, replacement); // update induction
+ }
+ }
+ const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
+ for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
+ HEnvironment* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment before replacing
+ if (iset_->find(user->GetHolder()) == iset_->end()) { // not excluded?
+ user->RemoveAsUserOfInput(index);
+ user->SetRawEnvAt(index, replacement);
+ replacement->AddEnvUseAt(user, index);
+ }
+ }
+ induction_simplication_count_++;
return true;
}
return false;
}
+void HLoopOptimization::RemoveDeadInstructions(const HInstructionList& list) {
+ for (HBackwardInstructionIterator i(list); !i.Done(); i.Advance()) {
+ HInstruction* instruction = i.Current();
+ if (instruction->IsDeadAndRemovable()) {
+ simplified_ = true;
+ instruction->GetBlock()->RemoveInstructionOrPhi(instruction);
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 0f05b24..9ddab41 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -60,19 +60,21 @@
void TraverseLoopsInnerToOuter(LoopNode* node);
+ // Simplification.
void SimplifyInduction(LoopNode* node);
void SimplifyBlocks(LoopNode* node);
- void RemoveIfEmptyInnerLoop(LoopNode* node);
+ bool SimplifyInnerLoop(LoopNode* node);
+ // Helpers.
bool IsPhiInduction(HPhi* phi);
bool IsEmptyHeader(HBasicBlock* block);
bool IsEmptyBody(HBasicBlock* block);
-
bool IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
+ bool collect_loop_uses,
/*out*/ int32_t* use_count);
- void ReplaceAllUses(HInstruction* instruction, HInstruction* replacement);
bool TryReplaceWithLastValue(HInstruction* instruction, HBasicBlock* block);
+ void RemoveDeadInstructions(const HInstructionList& list);
// Range information based on prior induction variable analysis.
InductionVarRange induction_range_;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index d45fa11..d15145e 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1853,6 +1853,14 @@
SetGraph(nullptr);
}
+void HBasicBlock::MergeInstructionsWith(HBasicBlock* other) {
+ DCHECK(EndsWithControlFlowInstruction());
+ RemoveInstruction(GetLastInstruction());
+ instructions_.Add(other->GetInstructions());
+ other->instructions_.SetBlockOfInstructions(this);
+ other->instructions_.Clear();
+}
+
void HBasicBlock::MergeWith(HBasicBlock* other) {
DCHECK_EQ(GetGraph(), other->GetGraph());
DCHECK(ContainsElement(dominated_blocks_, other));
@@ -1861,11 +1869,7 @@
DCHECK(other->GetPhis().IsEmpty());
// Move instructions from `other` to `this`.
- DCHECK(EndsWithControlFlowInstruction());
- RemoveInstruction(GetLastInstruction());
- instructions_.Add(other->GetInstructions());
- other->instructions_.SetBlockOfInstructions(this);
- other->instructions_.Clear();
+ MergeInstructionsWith(other);
// Remove `other` from the loops it is included in.
for (HLoopInformationOutwardIterator it(*other); !it.Done(); it.Advance()) {
@@ -2387,6 +2391,14 @@
return !opt.GetDoesNotNeedEnvironment();
}
+const DexFile& HInvokeStaticOrDirect::GetDexFileForPcRelativeDexCache() const {
+ ArtMethod* caller = GetEnvironment()->GetMethod();
+ ScopedObjectAccess soa(Thread::Current());
+ // `caller` is null for a top-level graph representing a method whose declaring
+ // class was not resolved.
+ return caller == nullptr ? GetBlock()->GetGraph()->GetDexFile() : *caller->GetDexFile();
+}
+
bool HInvokeStaticOrDirect::NeedsDexCacheOfDeclaringClass() const {
if (GetMethodLoadKind() != MethodLoadKind::kDexCacheViaMethod) {
return false;
@@ -2430,17 +2442,6 @@
}
}
-// Helper for InstructionDataEquals to fetch the mirror Class out
-// from a kJitTableAddress LoadClass kind.
-// NO_THREAD_SAFETY_ANALYSIS because even though we're accessing
-// mirrors, they are stored in a variable size handle scope which is always
-// visited during a pause. Also, the only caller of this helper
-// only uses the mirror for pointer comparison.
-static inline mirror::Class* AsMirrorInternal(uint64_t address)
- NO_THREAD_SAFETY_ANALYSIS {
- return reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr();
-}
-
bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
const HLoadClass* other_load_class = other->AsLoadClass();
// TODO: To allow GVN for HLoadClass from different dex files, we should compare the type
@@ -2451,11 +2452,12 @@
}
switch (GetLoadKind()) {
case LoadKind::kBootImageAddress:
- return GetAddress() == other_load_class->GetAddress();
- case LoadKind::kJitTableAddress:
- return AsMirrorInternal(GetAddress()) == AsMirrorInternal(other_load_class->GetAddress());
+ case LoadKind::kJitTableAddress: {
+ ScopedObjectAccess soa(Thread::Current());
+ return GetClass().Get() == other_load_class->GetClass().Get();
+ }
default:
- DCHECK(HasTypeReference(GetLoadKind()) || HasDexCacheReference(GetLoadKind()));
+ DCHECK(HasTypeReference(GetLoadKind()));
return IsSameDexFile(GetDexFile(), other_load_class->GetDexFile());
}
}
@@ -2486,10 +2488,10 @@
return os << "BootImageLinkTimePcRelative";
case HLoadClass::LoadKind::kBootImageAddress:
return os << "BootImageAddress";
+ case HLoadClass::LoadKind::kBssEntry:
+ return os << "BssEntry";
case HLoadClass::LoadKind::kJitTableAddress:
return os << "JitTableAddress";
- case HLoadClass::LoadKind::kDexCachePcRelative:
- return os << "DexCachePcRelative";
case HLoadClass::LoadKind::kDexCacheViaMethod:
return os << "DexCacheViaMethod";
default:
@@ -2498,17 +2500,6 @@
}
}
-// Helper for InstructionDataEquals to fetch the mirror String out
-// from a kJitTableAddress LoadString kind.
-// NO_THREAD_SAFETY_ANALYSIS because even though we're accessing
-// mirrors, they are stored in a variable size handle scope which is always
-// visited during a pause. Also, the only caller of this helper
-// only uses the mirror for pointer comparison.
-static inline mirror::String* AsMirrorInternal(Handle<mirror::String> handle)
- NO_THREAD_SAFETY_ANALYSIS {
- return handle.Get();
-}
-
bool HLoadString::InstructionDataEquals(const HInstruction* other) const {
const HLoadString* other_load_string = other->AsLoadString();
// TODO: To allow GVN for HLoadString from different dex files, we should compare the strings
@@ -2519,8 +2510,10 @@
}
switch (GetLoadKind()) {
case LoadKind::kBootImageAddress:
- case LoadKind::kJitTableAddress:
- return AsMirrorInternal(GetString()) == AsMirrorInternal(other_load_string->GetString());
+ case LoadKind::kJitTableAddress: {
+ ScopedObjectAccess soa(Thread::Current());
+ return GetString().Get() == other_load_string->GetString().Get();
+ }
default:
return IsSameDexFile(GetDexFile(), other_load_string->GetDexFile());
}
@@ -2551,10 +2544,10 @@
return os << "BootImageAddress";
case HLoadString::LoadKind::kBssEntry:
return os << "BssEntry";
- case HLoadString::LoadKind::kDexCacheViaMethod:
- return os << "DexCacheViaMethod";
case HLoadString::LoadKind::kJitTableAddress:
return os << "JitTableAddress";
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ return os << "DexCacheViaMethod";
default:
LOG(FATAL) << "Unknown HLoadString::LoadKind: " << static_cast<int>(rhs);
UNREACHABLE();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 7d6f616..53b0fdd 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1097,6 +1097,9 @@
// with a control flow instruction).
void ReplaceWith(HBasicBlock* other);
+ // Merges the instructions of `other` at the end of `this`.
+ void MergeInstructionsWith(HBasicBlock* other);
+
// Merge `other` at the end of `this`. This method updates loops, reverse post
// order, links to predecessors, successors, dominators and deletes the block
// from the graph. The two blocks must be successive, i.e. `this` the only
@@ -1291,6 +1294,7 @@
M(InvokeInterface, Invoke) \
M(InvokeStaticOrDirect, Invoke) \
M(InvokeVirtual, Invoke) \
+ M(InvokePolymorphic, Invoke) \
M(LessThan, Condition) \
M(LessThanOrEqual, Condition) \
M(LoadClass, Instruction) \
@@ -1720,28 +1724,22 @@
public:
HEnvironment(ArenaAllocator* arena,
size_t number_of_vregs,
- const DexFile& dex_file,
- uint32_t method_idx,
+ ArtMethod* method,
uint32_t dex_pc,
- InvokeType invoke_type,
HInstruction* holder)
: vregs_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentVRegs)),
locations_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentLocations)),
parent_(nullptr),
- dex_file_(dex_file),
- method_idx_(method_idx),
+ method_(method),
dex_pc_(dex_pc),
- invoke_type_(invoke_type),
holder_(holder) {
}
HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy, HInstruction* holder)
: HEnvironment(arena,
to_copy.Size(),
- to_copy.GetDexFile(),
- to_copy.GetMethodIdx(),
+ to_copy.GetMethod(),
to_copy.GetDexPc(),
- to_copy.GetInvokeType(),
holder) {}
void SetAndCopyParentChain(ArenaAllocator* allocator, HEnvironment* parent) {
@@ -1790,16 +1788,8 @@
return dex_pc_;
}
- uint32_t GetMethodIdx() const {
- return method_idx_;
- }
-
- InvokeType GetInvokeType() const {
- return invoke_type_;
- }
-
- const DexFile& GetDexFile() const {
- return dex_file_;
+ ArtMethod* GetMethod() const {
+ return method_;
}
HInstruction* GetHolder() const {
@@ -1815,10 +1805,8 @@
ArenaVector<HUserRecord<HEnvironment*>> vregs_;
ArenaVector<Location> locations_;
HEnvironment* parent_;
- const DexFile& dex_file_;
- const uint32_t method_idx_;
+ ArtMethod* method_;
const uint32_t dex_pc_;
- const InvokeType invoke_type_;
// The instruction that holds this environment.
HInstruction* const holder_;
@@ -3774,24 +3762,20 @@
DISALLOW_COPY_AND_ASSIGN(HCompare);
};
-class HNewInstance FINAL : public HExpression<2> {
+class HNewInstance FINAL : public HExpression<1> {
public:
HNewInstance(HInstruction* cls,
- HCurrentMethod* current_method,
uint32_t dex_pc,
dex::TypeIndex type_index,
const DexFile& dex_file,
- bool needs_access_check,
bool finalizable,
QuickEntrypointEnum entrypoint)
: HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC(), dex_pc),
type_index_(type_index),
dex_file_(dex_file),
entrypoint_(entrypoint) {
- SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
SetPackedFlag<kFlagFinalizable>(finalizable);
SetRawInputAt(0, cls);
- SetRawInputAt(1, current_method);
}
dex::TypeIndex GetTypeIndex() const { return type_index_; }
@@ -3803,8 +3787,9 @@
// Can throw errors when out-of-memory or if it's not instantiable/accessible.
bool CanThrow() const OVERRIDE { return true; }
- // Needs to call into runtime to make sure it's instantiable/accessible.
- bool NeedsAccessCheck() const { return GetPackedFlag<kFlagNeedsAccessCheck>(); }
+ bool NeedsChecks() const {
+ return entrypoint_ == kQuickAllocObjectWithChecks;
+ }
bool IsFinalizable() const { return GetPackedFlag<kFlagFinalizable>(); }
@@ -3821,8 +3806,7 @@
DECLARE_INSTRUCTION(NewInstance);
private:
- static constexpr size_t kFlagNeedsAccessCheck = kNumberOfExpressionPackedBits;
- static constexpr size_t kFlagFinalizable = kFlagNeedsAccessCheck + 1;
+ static constexpr size_t kFlagFinalizable = kNumberOfExpressionPackedBits;
static constexpr size_t kNumberOfNewInstancePackedBits = kFlagFinalizable + 1;
static_assert(kNumberOfNewInstancePackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
@@ -3868,7 +3852,6 @@
Primitive::Type GetType() const OVERRIDE { return GetPackedField<ReturnTypeField>(); }
uint32_t GetDexMethodIndex() const { return dex_method_index_; }
- const DexFile& GetDexFile() const { return GetEnvironment()->GetDexFile(); }
InvokeType GetInvokeType() const {
return GetPackedField<InvokeTypeField>();
@@ -3985,6 +3968,28 @@
DISALLOW_COPY_AND_ASSIGN(HInvokeUnresolved);
};
+class HInvokePolymorphic FINAL : public HInvoke {
+ public:
+ HInvokePolymorphic(ArenaAllocator* arena,
+ uint32_t number_of_arguments,
+ Primitive::Type return_type,
+ uint32_t dex_pc,
+ uint32_t dex_method_index)
+ : HInvoke(arena,
+ number_of_arguments,
+ 0u /* number_of_other_inputs */,
+ return_type,
+ dex_pc,
+ dex_method_index,
+ nullptr,
+ kVirtual) {}
+
+ DECLARE_INSTRUCTION(InvokePolymorphic);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HInvokePolymorphic);
+};
+
class HInvokeStaticOrDirect FINAL : public HInvoke {
public:
// Requirements of this method call regarding the class
@@ -4166,6 +4171,8 @@
return dispatch_info_.method_load_data;
}
+ const DexFile& GetDexFileForPcRelativeDexCache() const;
+
ClinitCheckRequirement GetClinitCheckRequirement() const {
return GetPackedField<ClinitCheckRequirementField>();
}
@@ -5425,10 +5432,10 @@
HBoundsCheck(HInstruction* index,
HInstruction* length,
uint32_t dex_pc,
- uint32_t string_char_at_method_index = DexFile::kDexNoIndex)
- : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc),
- string_char_at_method_index_(string_char_at_method_index) {
+ bool string_char_at = false)
+ : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
DCHECK_EQ(Primitive::kPrimInt, Primitive::PrimitiveKind(index->GetType()));
+ SetPackedFlag<kFlagIsStringCharAt>(string_char_at);
SetRawInputAt(0, index);
SetRawInputAt(1, length);
}
@@ -5442,22 +5449,14 @@
bool CanThrow() const OVERRIDE { return true; }
- bool IsStringCharAt() const { return GetStringCharAtMethodIndex() != DexFile::kDexNoIndex; }
- uint32_t GetStringCharAtMethodIndex() const { return string_char_at_method_index_; }
+ bool IsStringCharAt() const { return GetPackedFlag<kFlagIsStringCharAt>(); }
HInstruction* GetIndex() const { return InputAt(0); }
DECLARE_INSTRUCTION(BoundsCheck);
private:
- // We treat a String as an array, creating the HBoundsCheck from String.charAt()
- // intrinsic in the instruction simplifier. We want to include the String.charAt()
- // in the stack trace if we actually throw the StringIndexOutOfBoundsException,
- // so we need to create an HEnvironment which will be translated to an InlineInfo
- // indicating the extra stack frame. Since we add this HEnvironment quite late,
- // in the PrepareForRegisterAllocation pass, we need to remember the method index
- // from the invoke as we don't want to look again at the dex bytecode.
- uint32_t string_char_at_method_index_; // DexFile::kDexNoIndex if regular array.
+ static constexpr size_t kFlagIsStringCharAt = kNumberOfExpressionPackedBits;
DISALLOW_COPY_AND_ASSIGN(HBoundsCheck);
};
@@ -5525,14 +5524,13 @@
// GetIncludePatchInformation().
kBootImageAddress,
+ // Load from an entry in the .bss section using a PC-relative load.
+ // Used for classes outside boot image when .bss is accessible with a PC-relative load.
+ kBssEntry,
+
// Load from the root table associated with the JIT compiled method.
kJitTableAddress,
- // Load from resolved types array in the dex cache using a PC-relative load.
- // Used for classes outside boot image when we know that we can access
- // the dex cache arrays using a PC-relative load.
- kDexCachePcRelative,
-
// Load from resolved types array accessed through the class loaded from
// the compiled method's own ArtMethod*. This is the default access type when
// all other types are unavailable.
@@ -5544,6 +5542,7 @@
HLoadClass(HCurrentMethod* current_method,
dex::TypeIndex type_index,
const DexFile& dex_file,
+ Handle<mirror::Class> klass,
bool is_referrers_class,
uint32_t dex_pc,
bool needs_access_check)
@@ -5551,6 +5550,7 @@
special_input_(HUserRecord<HInstruction*>(current_method)),
type_index_(type_index),
dex_file_(dex_file),
+ klass_(klass),
loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) {
// Referrers class should not need access check. We never inline unverified
// methods so we can't possibly end up in this situation.
@@ -5559,14 +5559,11 @@
SetPackedField<LoadKindField>(
is_referrers_class ? LoadKind::kReferrersClass : LoadKind::kDexCacheViaMethod);
SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
- SetPackedFlag<kFlagIsInDexCache>(false);
SetPackedFlag<kFlagIsInBootImage>(false);
SetPackedFlag<kFlagGenerateClInitCheck>(false);
}
- void SetLoadKindWithAddress(LoadKind load_kind, uint64_t address) {
- DCHECK(HasAddress(load_kind));
- load_data_.address = address;
+ void SetLoadKind(LoadKind load_kind) {
SetLoadKindInternal(load_kind);
}
@@ -5579,15 +5576,6 @@
SetLoadKindInternal(load_kind);
}
- void SetLoadKindWithDexCacheReference(LoadKind load_kind,
- const DexFile& dex_file,
- uint32_t element_index) {
- DCHECK(HasDexCacheReference(load_kind));
- DCHECK(IsSameDexFile(dex_file_, dex_file));
- load_data_.dex_cache_element_index = element_index;
- SetLoadKindInternal(load_kind);
- }
-
LoadKind GetLoadKind() const {
return GetPackedField<LoadKindField>();
}
@@ -5612,13 +5600,21 @@
}
bool CanCallRuntime() const {
- return MustGenerateClinitCheck() ||
- (!IsReferrersClass() && !IsInDexCache()) ||
- NeedsAccessCheck();
+ return NeedsAccessCheck() ||
+ MustGenerateClinitCheck() ||
+ GetLoadKind() == LoadKind::kDexCacheViaMethod ||
+ GetLoadKind() == LoadKind::kBssEntry;
}
bool CanThrow() const OVERRIDE {
- return CanCallRuntime();
+ return NeedsAccessCheck() ||
+ MustGenerateClinitCheck() ||
+ // If the class is in the boot image, the lookup in the runtime call cannot throw.
+ // This keeps CanThrow() consistent between non-PIC (using kBootImageAddress) and
+ // PIC and subsequently avoids a DCE behavior dependency on the PIC option.
+ ((GetLoadKind() == LoadKind::kDexCacheViaMethod ||
+ GetLoadKind() == LoadKind::kBssEntry) &&
+ !IsInBootImage());
}
ReferenceTypeInfo GetLoadedClassRTI() {
@@ -5634,15 +5630,8 @@
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
- uint32_t GetDexCacheElementOffset() const;
-
- uint64_t GetAddress() const {
- DCHECK(HasAddress(GetLoadKind()));
- return load_data_.address;
- }
-
bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
- return !IsReferrersClass();
+ return GetLoadKind() == LoadKind::kDexCacheViaMethod;
}
static SideEffects SideEffectsForArchRuntimeCalls() {
@@ -5651,17 +5640,9 @@
bool IsReferrersClass() const { return GetLoadKind() == LoadKind::kReferrersClass; }
bool NeedsAccessCheck() const { return GetPackedFlag<kFlagNeedsAccessCheck>(); }
- bool IsInDexCache() const { return GetPackedFlag<kFlagIsInDexCache>(); }
bool IsInBootImage() const { return GetPackedFlag<kFlagIsInBootImage>(); }
bool MustGenerateClinitCheck() const { return GetPackedFlag<kFlagGenerateClInitCheck>(); }
- void MarkInDexCache() {
- SetPackedFlag<kFlagIsInDexCache>(true);
- DCHECK(!NeedsEnvironment());
- RemoveEnvironment();
- SetSideEffects(SideEffects::None());
- }
-
void MarkInBootImage() {
SetPackedFlag<kFlagIsInBootImage>(true);
}
@@ -5678,12 +5659,15 @@
return Primitive::kPrimNot;
}
+ Handle<mirror::Class> GetClass() const {
+ return klass_;
+ }
+
DECLARE_INSTRUCTION(LoadClass);
private:
static constexpr size_t kFlagNeedsAccessCheck = kNumberOfGenericPackedBits;
- static constexpr size_t kFlagIsInDexCache = kFlagNeedsAccessCheck + 1;
- static constexpr size_t kFlagIsInBootImage = kFlagIsInDexCache + 1;
+ static constexpr size_t kFlagIsInBootImage = kFlagNeedsAccessCheck + 1;
// Whether this instruction must generate the initialization check.
// Used for code generation.
static constexpr size_t kFlagGenerateClInitCheck = kFlagIsInBootImage + 1;
@@ -5695,35 +5679,24 @@
using LoadKindField = BitField<LoadKind, kFieldLoadKind, kFieldLoadKindSize>;
static bool HasTypeReference(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageLinkTimeAddress ||
+ return load_kind == LoadKind::kReferrersClass ||
+ load_kind == LoadKind::kBootImageLinkTimeAddress ||
load_kind == LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == LoadKind::kDexCacheViaMethod ||
- load_kind == LoadKind::kReferrersClass;
- }
-
- static bool HasAddress(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageAddress ||
- load_kind == LoadKind::kJitTableAddress;
- }
-
- static bool HasDexCacheReference(LoadKind load_kind) {
- return load_kind == LoadKind::kDexCachePcRelative;
+ load_kind == LoadKind::kBssEntry ||
+ load_kind == LoadKind::kDexCacheViaMethod;
}
void SetLoadKindInternal(LoadKind load_kind);
// The special input is the HCurrentMethod for kDexCacheViaMethod or kReferrersClass.
// For other load kinds it's empty or possibly some architecture-specific instruction
- // for PC-relative loads, i.e. kDexCachePcRelative or kBootImageLinkTimePcRelative.
+ // for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
const dex::TypeIndex type_index_;
const DexFile& dex_file_;
- union {
- uint32_t dex_cache_element_index; // Only for dex cache reference.
- uint64_t address; // Up to 64-bit, needed for kJitTableAddress on 64-bit targets.
- } load_data_;
+ Handle<mirror::Class> klass_;
ReferenceTypeInfo loaded_class_rti_;
@@ -5732,19 +5705,13 @@
std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs);
// Note: defined outside class to see operator<<(., HLoadClass::LoadKind).
-inline uint32_t HLoadClass::GetDexCacheElementOffset() const {
- DCHECK(HasDexCacheReference(GetLoadKind())) << GetLoadKind();
- return load_data_.dex_cache_element_index;
-}
-
-// Note: defined outside class to see operator<<(., HLoadClass::LoadKind).
inline void HLoadClass::AddSpecialInput(HInstruction* special_input) {
// The special input is used for PC-relative loads on some architectures,
// including literal pool loads, which are PC-relative too.
DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
- GetLoadKind() == LoadKind::kDexCachePcRelative ||
GetLoadKind() == LoadKind::kBootImageLinkTimeAddress ||
- GetLoadKind() == LoadKind::kBootImageAddress) << GetLoadKind();
+ GetLoadKind() == LoadKind::kBootImageAddress ||
+ GetLoadKind() == LoadKind::kBssEntry) << GetLoadKind();
DCHECK(special_input_.GetInstruction() == nullptr);
special_input_ = HUserRecord<HInstruction*>(special_input);
special_input->AddUseAt(this, 0);
@@ -5772,15 +5739,15 @@
// Used for strings outside boot image when .bss is accessible with a PC-relative load.
kBssEntry,
+ // Load from the root table associated with the JIT compiled method.
+ kJitTableAddress,
+
// Load from resolved strings array accessed through the class loaded from
// the compiled method's own ArtMethod*. This is the default access type when
// all other types are unavailable.
kDexCacheViaMethod,
- // Load from the root table associated with the JIT compiled method.
- kJitTableAddress,
-
- kLast = kJitTableAddress,
+ kLast = kDexCacheViaMethod,
};
HLoadString(HCurrentMethod* current_method,
@@ -5872,7 +5839,7 @@
// The special input is the HCurrentMethod for kDexCacheViaMethod.
// For other load kinds it's empty or possibly some architecture-specific instruction
- // for PC-relative loads, i.e. kDexCachePcRelative or kBootImageLinkTimePcRelative.
+ // for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
dex::StringIndex string_index_;
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index 5d9a652..7686ba8 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -52,7 +52,7 @@
exit_block->AddInstruction(new (&allocator) HExit());
HEnvironment* environment = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic, null_check);
+ &allocator, 1, graph->GetArtMethod(), 0, null_check);
null_check->SetRawEnvironment(environment);
environment->SetRawEnvAt(0, parameter);
parameter->AddEnvUseAt(null_check->GetEnvironment(), 0);
@@ -137,7 +137,7 @@
ASSERT_TRUE(parameter1->GetUses().HasExactlyOneElement());
HEnvironment* environment = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic, with_environment);
+ &allocator, 1, graph->GetArtMethod(), 0, with_environment);
ArenaVector<HInstruction*> array(allocator.Adapter());
array.push_back(parameter1);
@@ -148,13 +148,13 @@
ASSERT_TRUE(parameter1->GetEnvUses().HasExactlyOneElement());
HEnvironment* parent1 = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic, nullptr);
+ &allocator, 1, graph->GetArtMethod(), 0, nullptr);
parent1->CopyFrom(array);
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 2u);
HEnvironment* parent2 = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic, nullptr);
+ &allocator, 1, graph->GetArtMethod(), 0, nullptr);
parent2->CopyFrom(array);
parent1->SetAndCopyParentChain(&allocator, parent2);
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index e321b9e..a0fdde1 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -62,8 +62,9 @@
HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
switch (load_kind) {
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadClass::LoadKind::kBootImageAddress:
+ case HLoadClass::LoadKind::kBssEntry:
// Add a base register for PC-relative literals on R2.
InitializePCRelativeBasePointer();
load_class->AddSpecialInput(base_);
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index b1fdb17..2befc8c 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -83,7 +83,7 @@
void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == HLoadClass::LoadKind::kDexCachePcRelative) {
+ load_kind == HLoadClass::LoadKind::kBssEntry) {
InitializePCRelativeBasePointer();
load_class->AddSpecialInput(base_);
}
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index f9ac3a0..efbaf6c 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -16,6 +16,9 @@
#include "prepare_for_register_allocation.h"
+#include "jni_internal.h"
+#include "well_known_classes.h"
+
namespace art {
void PrepareForRegisterAllocation::Run() {
@@ -42,16 +45,12 @@
if (check->IsStringCharAt()) {
// Add a fake environment for String.charAt() inline info as we want
// the exception to appear as being thrown from there.
- const DexFile& dex_file = check->GetEnvironment()->GetDexFile();
- DCHECK_STREQ(dex_file.PrettyMethod(check->GetStringCharAtMethodIndex()).c_str(),
- "char java.lang.String.charAt(int)");
+ ArtMethod* char_at_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
ArenaAllocator* arena = GetGraph()->GetArena();
HEnvironment* environment = new (arena) HEnvironment(arena,
/* number_of_vregs */ 0u,
- dex_file,
- check->GetStringCharAtMethodIndex(),
+ char_at_method,
/* dex_pc */ DexFile::kDexNoIndex,
- kVirtual,
check);
check->InsertRawEnvironment(environment);
}
@@ -134,39 +133,6 @@
}
}
-void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) {
- HLoadClass* load_class = instruction->InputAt(0)->AsLoadClass();
- const bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse();
- // Change the entrypoint to kQuickAllocObject if either:
- // - the class is finalizable (only kQuickAllocObject handles finalizable classes),
- // - the class needs access checks (we do not know if it's finalizable),
- // - or the load class has only one use.
- if (instruction->IsFinalizable() || has_only_one_use || load_class->NeedsAccessCheck()) {
- instruction->SetEntrypoint(kQuickAllocObject);
- instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex().index_), 0);
- if (has_only_one_use) {
- // We've just removed the only use of the HLoadClass. Since we don't run DCE after this pass,
- // do it manually if possible.
- if (!load_class->CanThrow()) {
- // If the load class can not throw, it has no side effects and can be removed if there is
- // only one use.
- load_class->GetBlock()->RemoveInstruction(load_class);
- } else if (!instruction->GetEnvironment()->IsFromInlinedInvoke() &&
- CanMoveClinitCheck(load_class, instruction)) {
- // The allocation entry point that deals with access checks does not work with inlined
- // methods, so we need to check whether this allocation comes from an inlined method.
- // We also need to make the same check as for moving clinit check, whether the HLoadClass
- // has the clinit check responsibility or not (HLoadClass can throw anyway).
- // If it needed access checks, we delegate the access check to the allocation.
- if (load_class->NeedsAccessCheck()) {
- instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck);
- }
- load_class->GetBlock()->RemoveInstruction(load_class);
- }
- }
- }
-}
-
bool PrepareForRegisterAllocation::CanEmitConditionAt(HCondition* condition,
HInstruction* user) const {
if (condition->GetNext() != user) {
@@ -232,8 +198,7 @@
return false;
}
if (user_environment->GetDexPc() != input_environment->GetDexPc() ||
- user_environment->GetMethodIdx() != input_environment->GetMethodIdx() ||
- !IsSameDexFile(user_environment->GetDexFile(), input_environment->GetDexFile())) {
+ user_environment->GetMethod() != input_environment->GetMethod()) {
return false;
}
user_environment = user_environment->GetParent();
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index a679148..c128227 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -44,7 +44,6 @@
void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
void VisitCondition(HCondition* condition) OVERRIDE;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
- void VisitNewInstance(HNewInstance* instruction) OVERRIDE;
bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index f8a4469..a4d59ab 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -295,13 +295,13 @@
}
if (check->IsIf()) {
- HBasicBlock* trueBlock = check->IsEqual()
+ HBasicBlock* trueBlock = compare->IsEqual()
? check->AsIf()->IfTrueSuccessor()
: check->AsIf()->IfFalseSuccessor();
BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti);
} else {
DCHECK(check->IsDeoptimize());
- if (check->IsEqual()) {
+ if (compare->IsEqual()) {
BoundTypeIn(receiver, check->GetBlock(), check, class_rti);
}
}
@@ -499,18 +499,19 @@
if (instr->IsInvokeStaticOrDirect() && instr->AsInvokeStaticOrDirect()->IsStringInit()) {
// Calls to String.<init> are replaced with a StringFactory.
if (kIsDebugBuild) {
- HInvoke* invoke = instr->AsInvoke();
+ HInvokeStaticOrDirect* invoke = instr->AsInvokeStaticOrDirect();
ClassLinker* cl = Runtime::Current()->GetClassLinker();
Thread* self = Thread::Current();
StackHandleScope<2> hs(self);
+ const DexFile& dex_file = *invoke->GetTargetMethod().dex_file;
Handle<mirror::DexCache> dex_cache(
- hs.NewHandle(FindDexCacheWithHint(self, invoke->GetDexFile(), hint_dex_cache_)));
+ hs.NewHandle(FindDexCacheWithHint(self, dex_file, hint_dex_cache_)));
// Use a null loader. We should probably use the compiling method's class loader,
// but then we would need to pass it to RTPVisitor just for this debug check. Since
// the method is from the String class, the null loader is good enough.
Handle<mirror::ClassLoader> loader;
ArtMethod* method = cl->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
- invoke->GetDexFile(), invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect);
+ dex_file, invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect);
DCHECK(method != nullptr);
mirror::Class* declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr);
@@ -619,14 +620,10 @@
void ReferenceTypePropagation::RTPVisitor::VisitLoadClass(HLoadClass* instr) {
ScopedObjectAccess soa(Thread::Current());
- // Get type from dex cache assuming it was populated by the verifier.
- mirror::Class* resolved_class = GetClassFromDexCache(soa.Self(),
- instr->GetDexFile(),
- instr->GetTypeIndex(),
- hint_dex_cache_);
- if (IsAdmissible(resolved_class)) {
+ Handle<mirror::Class> resolved_class = instr->GetClass();
+ if (IsAdmissible(resolved_class.Get())) {
instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(
- handle_cache_->NewHandle(resolved_class), /* is_exact */ true));
+ resolved_class, /* is_exact */ true));
}
instr->SetReferenceTypeInfo(
ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact */ true));
@@ -844,10 +841,8 @@
ScopedObjectAccess soa(Thread::Current());
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- mirror::DexCache* dex_cache =
- FindDexCacheWithHint(soa.Self(), instr->GetDexFile(), hint_dex_cache_);
PointerSize pointer_size = cl->GetImagePointerSize();
- ArtMethod* method = dex_cache->GetResolvedMethod(instr->GetDexMethodIndex(), pointer_size);
+ ArtMethod* method = instr->GetResolvedMethod();
mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(false, pointer_size);
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index dc8ee23..c529410 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -133,99 +133,18 @@
void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- const DexFile& dex_file = load_class->GetDexFile();
- dex::TypeIndex type_index = load_class->GetTypeIndex();
- Handle<mirror::DexCache> dex_cache = IsSameDexFile(dex_file, *compilation_unit_.GetDexFile())
- ? compilation_unit_.GetDexCache()
- : hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
- mirror::Class* cls = dex_cache->GetResolvedType(type_index);
- SharpenClass(load_class, cls, handles_, codegen_, compiler_driver_);
+ SharpenClass(load_class, codegen_, compiler_driver_);
}
void HSharpening::SharpenClass(HLoadClass* load_class,
- mirror::Class* klass,
- VariableSizedHandleScope* handles,
CodeGenerator* codegen,
CompilerDriver* compiler_driver) {
- ScopedAssertNoThreadSuspension sants("Sharpening class in compiler");
+ Handle<mirror::Class> klass = load_class->GetClass();
DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
<< load_class->GetLoadKind();
- DCHECK(!load_class->IsInDexCache()) << "HLoadClass should not be optimized before sharpening.";
DCHECK(!load_class->IsInBootImage()) << "HLoadClass should not be optimized before sharpening.";
- const DexFile& dex_file = load_class->GetDexFile();
- dex::TypeIndex type_index = load_class->GetTypeIndex();
-
- bool is_in_dex_cache = false;
- bool is_in_boot_image = false;
- HLoadClass::LoadKind desired_load_kind = static_cast<HLoadClass::LoadKind>(-1);
- uint64_t address = 0u; // Class or dex cache element address.
- Runtime* runtime = Runtime::Current();
- if (codegen->GetCompilerOptions().IsBootImage()) {
- // Compiling boot image. Check if the class is a boot image class.
- DCHECK(!runtime->UseJitCompilation());
- if (!compiler_driver->GetSupportBootImageFixup()) {
- // MIPS64 or compiler_driver_test. Do not sharpen.
- desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
- } else if ((klass != nullptr) && compiler_driver->IsImageClass(
- dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
- is_in_boot_image = true;
- is_in_dex_cache = true;
- desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
- ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
- : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
- } else {
- // Not a boot image class. We must go through the dex cache.
- DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
- desired_load_kind = HLoadClass::LoadKind::kDexCachePcRelative;
- }
- } else {
- is_in_boot_image = (klass != nullptr) && runtime->GetHeap()->ObjectIsInBootImageSpace(klass);
- if (runtime->UseJitCompilation()) {
- // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
- // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- is_in_dex_cache = (klass != nullptr);
- if (is_in_boot_image) {
- // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
- desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(klass);
- } else if (is_in_dex_cache) {
- desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
- // We store in the address field the location of the stack reference maintained
- // by the handle. We do this now so that the code generation does not need to figure
- // out which class loader to use.
- address = reinterpret_cast<uint64_t>(handles->NewHandle(klass).GetReference());
- } else {
- // Class not loaded yet. This happens when the dex code requesting
- // this `HLoadClass` hasn't been executed in the interpreter.
- // Fallback to the dex cache.
- // TODO(ngeoffray): Generate HDeoptimize instead.
- desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
- }
- } else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
- // AOT app compilation. Check if the class is in the boot image.
- desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(klass);
- } else {
- // Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
- // Use PC-relative load from the dex cache if the dex file belongs
- // to the oat file that we're currently compiling.
- desired_load_kind =
- ContainsElement(compiler_driver->GetDexFilesForOatFile(), &load_class->GetDexFile())
- ? HLoadClass::LoadKind::kDexCachePcRelative
- : HLoadClass::LoadKind::kDexCacheViaMethod;
- }
- }
- DCHECK_NE(desired_load_kind, static_cast<HLoadClass::LoadKind>(-1));
-
- if (is_in_boot_image) {
- load_class->MarkInBootImage();
- }
-
if (load_class->NeedsAccessCheck()) {
// We need to call the runtime anyway, so we simply get the class as that call's return value.
return;
@@ -239,29 +158,73 @@
return;
}
- if (is_in_dex_cache) {
- load_class->MarkInDexCache();
+ const DexFile& dex_file = load_class->GetDexFile();
+ dex::TypeIndex type_index = load_class->GetTypeIndex();
+
+ bool is_in_boot_image = false;
+ HLoadClass::LoadKind desired_load_kind = static_cast<HLoadClass::LoadKind>(-1);
+ Runtime* runtime = Runtime::Current();
+ if (codegen->GetCompilerOptions().IsBootImage()) {
+ // Compiling boot image. Check if the class is a boot image class.
+ DCHECK(!runtime->UseJitCompilation());
+ if (!compiler_driver->GetSupportBootImageFixup()) {
+ // compiler_driver_test. Do not sharpen.
+ desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ } else if ((klass.Get() != nullptr) && compiler_driver->IsImageClass(
+ dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
+ is_in_boot_image = true;
+ desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
+ ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
+ : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
+ } else {
+ // Not a boot image class.
+ DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
+ }
+ } else {
+ is_in_boot_image = (klass.Get() != nullptr) &&
+ runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
+ if (runtime->UseJitCompilation()) {
+ // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
+ // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
+ if (is_in_boot_image) {
+ // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
+ desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
+ } else if (klass.Get() != nullptr) {
+ desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
+ } else {
+ // Class not loaded yet. This happens when the dex code requesting
+ // this `HLoadClass` hasn't been executed in the interpreter.
+ // Fallback to the dex cache.
+ // TODO(ngeoffray): Generate HDeoptimize instead.
+ desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ }
+ } else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
+ // AOT app compilation. Check if the class is in the boot image.
+ desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
+ } else {
+ // Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
+ }
+ }
+ DCHECK_NE(desired_load_kind, static_cast<HLoadClass::LoadKind>(-1));
+
+ if (is_in_boot_image) {
+ load_class->MarkInBootImage();
}
HLoadClass::LoadKind load_kind = codegen->GetSupportedLoadClassKind(desired_load_kind);
switch (load_kind) {
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
case HLoadClass::LoadKind::kDexCacheViaMethod:
load_class->SetLoadKindWithTypeReference(load_kind, dex_file, type_index);
break;
case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
- DCHECK_NE(address, 0u);
- load_class->SetLoadKindWithAddress(load_kind, address);
+ load_class->SetLoadKind(load_kind);
break;
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- PointerSize pointer_size = InstructionSetPointerSize(codegen->GetInstructionSet());
- DexCacheArraysLayout layout(pointer_size, &dex_file);
- size_t element_index = layout.TypeOffset(type_index);
- load_class->SetLoadKindWithDexCacheReference(load_kind, dex_file, element_index);
- break;
- }
default:
LOG(FATAL) << "Unexpected load kind: " << load_kind;
UNREACHABLE();
@@ -274,7 +237,7 @@
const DexFile& dex_file = load_string->GetDexFile();
dex::StringIndex string_index = load_string->GetStringIndex();
- HLoadString::LoadKind desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
+ HLoadString::LoadKind desired_load_kind = static_cast<HLoadString::LoadKind>(-1);
{
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
@@ -297,8 +260,8 @@
? HLoadString::LoadKind::kBootImageLinkTimePcRelative
: HLoadString::LoadKind::kBootImageLinkTimeAddress;
} else {
- // MIPS64 or compiler_driver_test. Do not sharpen.
- DCHECK_EQ(desired_load_kind, HLoadString::LoadKind::kDexCacheViaMethod);
+ // compiler_driver_test. Do not sharpen.
+ desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
}
} else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
@@ -310,6 +273,8 @@
} else {
desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
}
+ } else {
+ desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
}
} else {
// AOT app compilation. Try to lookup the string without allocating if not found.
@@ -326,6 +291,7 @@
load_string->SetString(handles_->NewHandle(string));
}
}
+ DCHECK_NE(desired_load_kind, static_cast<HLoadString::LoadKind>(-1));
HLoadString::LoadKind load_kind = codegen_->GetSupportedLoadStringKind(desired_load_kind);
load_string->SetLoadKind(load_kind);
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index ae5ccb3..ae3d83e 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -49,8 +49,6 @@
// Used internally but also by the inliner.
static void SharpenClass(HLoadClass* load_class,
- mirror::Class* klass,
- VariableSizedHandleScope* handles,
CodeGenerator* codegen,
CompilerDriver* compiler_driver)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index fc8af64..6087e36 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -13,8 +13,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
#include "stack_map_stream.h"
+#include "art_method.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+
namespace art {
void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
@@ -98,15 +103,27 @@
current_dex_register_++;
}
-void StackMapStream::BeginInlineInfoEntry(uint32_t method_index,
+static bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
+ // Note: the runtime is null only for unit testing.
+ return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
+}
+
+void StackMapStream::BeginInlineInfoEntry(ArtMethod* method,
uint32_t dex_pc,
- InvokeType invoke_type,
- uint32_t num_dex_registers) {
+ uint32_t num_dex_registers,
+ const DexFile* outer_dex_file) {
DCHECK(!in_inline_frame_);
in_inline_frame_ = true;
- current_inline_info_.method_index = method_index;
+ if (EncodeArtMethodInInlineInfo(method)) {
+ current_inline_info_.method = method;
+ } else {
+ if (dex_pc != static_cast<uint32_t>(-1) && kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(IsSameDexFile(*outer_dex_file, *method->GetDexFile()));
+ }
+ current_inline_info_.method_index = method->GetDexMethodIndexUnchecked();
+ }
current_inline_info_.dex_pc = dex_pc;
- current_inline_info_.invoke_type = invoke_type;
current_inline_info_.num_dex_registers = num_dex_registers;
current_inline_info_.dex_register_locations_start_index = dex_register_locations_.size();
if (num_dex_registers != 0) {
@@ -229,25 +246,32 @@
void StackMapStream::ComputeInlineInfoEncoding() {
uint32_t method_index_max = 0;
uint32_t dex_pc_max = DexFile::kDexNoIndex;
- uint32_t invoke_type_max = 0;
+ uint32_t extra_data_max = 0;
uint32_t inline_info_index = 0;
for (const StackMapEntry& entry : stack_maps_) {
for (size_t j = 0; j < entry.inlining_depth; ++j) {
InlineInfoEntry inline_entry = inline_infos_[inline_info_index++];
- method_index_max = std::max(method_index_max, inline_entry.method_index);
+ if (inline_entry.method == nullptr) {
+ method_index_max = std::max(method_index_max, inline_entry.method_index);
+ extra_data_max = std::max(extra_data_max, 1u);
+ } else {
+ method_index_max = std::max(
+ method_index_max, High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
+ extra_data_max = std::max(
+ extra_data_max, Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
+ }
if (inline_entry.dex_pc != DexFile::kDexNoIndex &&
(dex_pc_max == DexFile::kDexNoIndex || dex_pc_max < inline_entry.dex_pc)) {
dex_pc_max = inline_entry.dex_pc;
}
- invoke_type_max = std::max(invoke_type_max, static_cast<uint32_t>(inline_entry.invoke_type));
}
}
DCHECK_EQ(inline_info_index, inline_infos_.size());
inline_info_encoding_.SetFromSizes(method_index_max,
dex_pc_max,
- invoke_type_max,
+ extra_data_max,
dex_register_maps_size_);
}
@@ -354,9 +378,20 @@
DCHECK_LE(entry.inline_infos_start_index + entry.inlining_depth, inline_infos_.size());
for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index];
- inline_info.SetMethodIndexAtDepth(inline_info_encoding_, depth, inline_entry.method_index);
+ if (inline_entry.method != nullptr) {
+ inline_info.SetMethodIndexAtDepth(
+ inline_info_encoding_,
+ depth,
+ High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
+ inline_info.SetExtraDataAtDepth(
+ inline_info_encoding_,
+ depth,
+ Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
+ } else {
+ inline_info.SetMethodIndexAtDepth(inline_info_encoding_, depth, inline_entry.method_index);
+ inline_info.SetExtraDataAtDepth(inline_info_encoding_, depth, 1);
+ }
inline_info.SetDexPcAtDepth(inline_info_encoding_, depth, inline_entry.dex_pc);
- inline_info.SetInvokeTypeAtDepth(inline_info_encoding_, depth, inline_entry.invoke_type);
if (inline_entry.num_dex_registers == 0) {
// No dex map available.
inline_info.SetDexRegisterMapOffsetAtDepth(inline_info_encoding_,
@@ -544,10 +579,13 @@
InlineInfoEntry inline_entry = inline_infos_[inline_info_index];
DCHECK_EQ(inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, d),
inline_entry.dex_pc);
- DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, d),
- inline_entry.method_index);
- DCHECK_EQ(inline_info.GetInvokeTypeAtDepth(encoding.inline_info_encoding, d),
- inline_entry.invoke_type);
+ if (inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, d)) {
+ DCHECK_EQ(inline_info.GetArtMethodAtDepth(encoding.inline_info_encoding, d),
+ inline_entry.method);
+ } else {
+ DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, d),
+ inline_entry.method_index);
+ }
CheckDexRegisterMap(code_info,
code_info.GetDexRegisterMapAtDepth(
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 53a9795..d6f42b3 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -109,8 +109,8 @@
struct InlineInfoEntry {
uint32_t dex_pc; // DexFile::kDexNoIndex for intrinsified native methods.
+ ArtMethod* method;
uint32_t method_index;
- InvokeType invoke_type;
uint32_t num_dex_registers;
BitVector* live_dex_registers_mask;
size_t dex_register_locations_start_index;
@@ -126,10 +126,10 @@
void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value);
- void BeginInlineInfoEntry(uint32_t method_index,
+ void BeginInlineInfoEntry(ArtMethod* method,
uint32_t dex_pc,
- InvokeType invoke_type,
- uint32_t num_dex_registers);
+ uint32_t num_dex_registers,
+ const DexFile* outer_dex_file = nullptr);
void EndInlineInfoEntry();
size_t GetNumberOfStackMaps() const {
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 967fd96..22810ea 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -16,6 +16,7 @@
#include "stack_map.h"
+#include "art_method.h"
#include "base/arena_bit_vector.h"
#include "stack_map_stream.h"
@@ -128,6 +129,7 @@
ArenaPool pool;
ArenaAllocator arena(&pool);
StackMapStream stream(&arena);
+ ArtMethod art_method;
ArenaBitVector sp_mask1(&arena, 0, true);
sp_mask1.SetBit(2);
@@ -137,9 +139,9 @@
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
- stream.BeginInlineInfoEntry(82, 3, kDirect, number_of_dex_registers_in_inline_info);
+ stream.BeginInlineInfoEntry(&art_method, 3, number_of_dex_registers_in_inline_info);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(42, 2, kStatic, number_of_dex_registers_in_inline_info);
+ stream.BeginInlineInfoEntry(&art_method, 2, number_of_dex_registers_in_inline_info);
stream.EndInlineInfoEntry();
stream.EndStackMapEntry();
@@ -238,12 +240,10 @@
ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
ASSERT_EQ(2u, inline_info.GetDepth(encoding.inline_info_encoding));
- ASSERT_EQ(82u, inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(42u, inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, 1));
ASSERT_EQ(3u, inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
ASSERT_EQ(2u, inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(kDirect, inline_info.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(kStatic, inline_info.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
}
// Second stack map.
@@ -662,6 +662,7 @@
ArenaPool pool;
ArenaAllocator arena(&pool);
StackMapStream stream(&arena);
+ ArtMethod art_method;
ArenaBitVector sp_mask1(&arena, 0, true);
sp_mask1.SetBit(2);
@@ -672,10 +673,10 @@
stream.AddDexRegisterEntry(Kind::kInStack, 0);
stream.AddDexRegisterEntry(Kind::kConstant, 4);
- stream.BeginInlineInfoEntry(42, 2, kStatic, 1);
+ stream.BeginInlineInfoEntry(&art_method, 2, 1);
stream.AddDexRegisterEntry(Kind::kInStack, 8);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(82, 3, kStatic, 3);
+ stream.BeginInlineInfoEntry(&art_method, 3, 3);
stream.AddDexRegisterEntry(Kind::kInStack, 16);
stream.AddDexRegisterEntry(Kind::kConstant, 20);
stream.AddDexRegisterEntry(Kind::kInRegister, 15);
@@ -688,15 +689,15 @@
stream.AddDexRegisterEntry(Kind::kInStack, 56);
stream.AddDexRegisterEntry(Kind::kConstant, 0);
- stream.BeginInlineInfoEntry(42, 2, kDirect, 1);
+ stream.BeginInlineInfoEntry(&art_method, 2, 1);
stream.AddDexRegisterEntry(Kind::kInStack, 12);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(82, 3, kStatic, 3);
+ stream.BeginInlineInfoEntry(&art_method, 3, 3);
stream.AddDexRegisterEntry(Kind::kInStack, 80);
stream.AddDexRegisterEntry(Kind::kConstant, 10);
stream.AddDexRegisterEntry(Kind::kInRegister, 5);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(52, 5, kVirtual, 0);
+ stream.BeginInlineInfoEntry(&art_method, 5, 0);
stream.EndInlineInfoEntry();
stream.EndStackMapEntry();
@@ -712,12 +713,12 @@
stream.AddDexRegisterEntry(Kind::kInStack, 56);
stream.AddDexRegisterEntry(Kind::kConstant, 0);
- stream.BeginInlineInfoEntry(42, 2, kVirtual, 0);
+ stream.BeginInlineInfoEntry(&art_method, 2, 0);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(52, 5, kInterface, 1);
+ stream.BeginInlineInfoEntry(&art_method, 5, 1);
stream.AddDexRegisterEntry(Kind::kInRegister, 2);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(52, 10, kStatic, 2);
+ stream.BeginInlineInfoEntry(&art_method, 10, 2);
stream.AddDexRegisterEntry(Kind::kNone, 0);
stream.AddDexRegisterEntry(Kind::kInRegister, 3);
stream.EndInlineInfoEntry();
@@ -743,11 +744,9 @@
InlineInfo if0 = ci.GetInlineInfoOf(sm0, encoding);
ASSERT_EQ(2u, if0.GetDepth(encoding.inline_info_encoding));
ASSERT_EQ(2u, if0.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(42u, if0.GetMethodIndexAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(kStatic, if0.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
ASSERT_EQ(3u, if0.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(82u, if0.GetMethodIndexAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(kStatic, if0.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, encoding, 1);
ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
@@ -769,14 +768,11 @@
InlineInfo if1 = ci.GetInlineInfoOf(sm1, encoding);
ASSERT_EQ(3u, if1.GetDepth(encoding.inline_info_encoding));
ASSERT_EQ(2u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(42u, if1.GetMethodIndexAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(kDirect, if1.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
ASSERT_EQ(3u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(82u, if1.GetMethodIndexAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(kStatic, if1.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
ASSERT_EQ(5u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_EQ(52u, if1.GetMethodIndexAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_EQ(kVirtual, if1.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 2));
+ ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 2));
DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, encoding, 1);
ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
@@ -810,14 +806,11 @@
InlineInfo if2 = ci.GetInlineInfoOf(sm3, encoding);
ASSERT_EQ(3u, if2.GetDepth(encoding.inline_info_encoding));
ASSERT_EQ(2u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(42u, if2.GetMethodIndexAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(kVirtual, if2.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
ASSERT_EQ(5u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(52u, if2.GetMethodIndexAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(kInterface, if2.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
ASSERT_EQ(10u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_EQ(52u, if2.GetMethodIndexAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_EQ(kStatic, if2.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 2));
+ ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 2));
ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(encoding.inline_info_encoding, 0));
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index ab4f9e9..5e55210 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -5610,7 +5610,7 @@
" 214: ecbd 8a10 vpop {s16-s31}\n",
" 218: e8bd 8de0 ldmia.w sp!, {r5, r6, r7, r8, sl, fp, pc}\n",
" 21c: 4660 mov r0, ip\n",
- " 21e: f8d9 c2b0 ldr.w ip, [r9, #688] ; 0x2b0\n",
+ " 21e: f8d9 c2c0 ldr.w ip, [r9, #704] ; 0x2c0\n",
" 222: 47e0 blx ip\n",
nullptr
};
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index a71ab4b..102c313 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1124,28 +1124,23 @@
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_rosalloc
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_resolved_rosalloc
// Fast path rosalloc allocation.
- // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
- // r2, r3, r12: free.
- ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
- // Load the class (r2)
- ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- cbz r2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
-
+ // r0: type/return value, r9: Thread::Current
+ // r1, r2, r3, r12: free.
ldr r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
// TODO: consider using ldrd.
ldr r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp r3, r12
- bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
- ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3)
+ ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3)
cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation. Also does the
// initialized and finalizable checks.
- bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size. Since the size is
// already aligned we can combine the
@@ -1159,7 +1154,7 @@
// Load the free list head (r3). This
// will be the return val.
ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
- cbz r3, .Lart_quick_alloc_object_rosalloc_slow_path
+ cbz r3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
ldr r1, [r3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
// and update the list head with the
@@ -1172,8 +1167,8 @@
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
- POISON_HEAP_REF r2
- str r2, [r3, #MIRROR_OBJECT_CLASS_OFFSET]
+ POISON_HEAP_REF r0
+ str r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET]
// Fence. This is "ish" not "ishst" so
// that it also ensures ordering of
// the class status load with respect
@@ -1204,20 +1199,20 @@
mov r0, r3 // Set the return value and return.
bx lr
-.Lart_quick_alloc_object_rosalloc_slow_path:
+.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r2, r9 @ pass Thread::Current
- bl artAllocObjectFromCodeRosAlloc @ (uint32_t type_idx, Method* method, Thread*)
+ mov r1, r9 @ pass Thread::Current
+ bl artAllocObjectFromCodeResolvedRosAlloc @ (mirror::Class* cls, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_rosalloc
+END art_quick_alloc_object_resolved_rosalloc
-// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
+// The common fast path code for art_quick_alloc_object_resolved_tlab
+// and art_quick_alloc_object_resolved_region_tlab.
//
-// r0: type_idx/return value, r1: ArtMethod*, r2: class, r9: Thread::Current, r3, r12: free.
-// Need to preserve r0 and r1 to the slow path.
-.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
- cbz r2, \slowPathLabel // Check null class
+// r0: type r9: Thread::Current, r1, r2, r3, r12: free.
+// Need to preserve r0 to the slow path.
+.macro ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH slowPathLabel
// Load thread_local_pos (r12) and
// thread_local_end (r3) with ldrd.
// Check constraints for ldrd.
@@ -1226,20 +1221,20 @@
#endif
ldrd r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET]
sub r12, r3, r12 // Compute the remaining buf size.
- ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3).
+ ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3).
cmp r3, r12 // Check if it fits.
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
// Reload old thread_local_pos (r0)
// for the return value.
- ldr r0, [r9, #THREAD_LOCAL_POS_OFFSET]
- add r1, r0, r3
+ ldr r2, [r9, #THREAD_LOCAL_POS_OFFSET]
+ add r1, r2, r3
str r1, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
ldr r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add r1, r1, #1
str r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
- POISON_HEAP_REF r2
- str r2, [r0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ POISON_HEAP_REF r0
+ str r0, [r2, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
// Fence. This is "ish" not "ishst" so
// that the code after this allocation
// site will see the right values in
@@ -1247,71 +1242,46 @@
// Alternatively we could use "ishst"
// if we use load-acquire for the
// object size load.)
+ mov r0, r2
dmb ish
bx lr
.endm
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
-ENTRY art_quick_alloc_object_tlab
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_tlab, TLAB).
+ENTRY art_quick_alloc_object_resolved_tlab
// Fast path tlab allocation.
- // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
- // r2, r3, r12: free.
+ // r0: type, r9: Thread::Current
+ // r1, r2, r3, r12: free.
#if defined(USE_READ_BARRIER)
mvn r0, #0 // Read barrier not supported here.
bx lr // Return -1.
#endif
- ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
- // Load the class (r2)
- ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
-.Lart_quick_alloc_object_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
+.Lart_quick_alloc_object_resolved_tlab_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
- mov r2, r9 // Pass Thread::Current.
- bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
+ mov r1, r9 // Pass Thread::Current.
+ bl artAllocObjectFromCodeResolvedTLAB // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_tlab
+END art_quick_alloc_object_resolved_tlab
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-ENTRY art_quick_alloc_object_region_tlab
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+ENTRY art_quick_alloc_object_resolved_region_tlab
// Fast path tlab allocation.
- // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current, r2, r3, r12: free.
+ // r0: type, r9: Thread::Current, r1, r2, r3, r12: free.
#if !defined(USE_READ_BARRIER)
eor r0, r0, r0 // Read barrier must be enabled here.
sub r0, r0, #1 // Return -1.
bx lr
#endif
- ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
- // Load the class (r2)
- ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- // Read barrier for class load.
- ldr r3, [r9, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz r3, .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_marking
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_marking:
- cbz r2, .Lart_quick_alloc_object_region_tlab_slow_path // Null check for loading lock word.
- // Check lock word for mark bit, if marked do the allocation.
- ldr r3, [r2, MIRROR_OBJECT_LOCK_WORD_OFFSET]
- ands r3, #LOCK_WORD_MARK_BIT_MASK_SHIFTED
- bne .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark
- // the class.
- push {r0, r1, r3, lr} // Save registers. r3 is pushed only
- // to align sp by 16 bytes.
- mov r0, r2 // Pass the class as the first param.
- bl artReadBarrierMark
- mov r2, r0 // Get the (marked) class back.
- pop {r0, r1, r3, lr}
- b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
+.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
- mov r2, r9 // Pass Thread::Current.
- bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*)
+ mov r1, r9 // Pass Thread::Current.
+ bl artAllocObjectFromCodeResolvedRegionTLAB // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_region_tlab
+END art_quick_alloc_object_resolved_region_tlab
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
@@ -2040,3 +2010,83 @@
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, r9
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, r10
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, r11
+
+.extern artInvokePolymorphic
+ENTRY art_quick_invoke_polymorphic
+ SETUP_SAVE_REFS_AND_ARGS_FRAME r2
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ mov r0, #0 @ initialize 64-bit JValue as zero.
+ str r0, [sp, #-4]!
+ .cfi_adjust_cfa_offset 4
+ str r0, [sp, #-4]!
+ .cfi_adjust_cfa_offset 4
+ mov r0, sp @ pass JValue for return result as first argument.
+ bl artInvokePolymorphic @ artInvokePolymorphic(JValue, receiver, Thread*, SP)
+ sub r0, 'A' @ return value is descriptor of handle's return type.
+ cmp r0, 'Z' - 'A' @ check if value is in bounds of handler table
+ bgt .Lcleanup_and_return @ and clean-up if not.
+ adr r1, .Lhandler_table
+ tbb [r0, r1] @ branch to handler for return value based on return type.
+
+.Lstart_of_handlers:
+.Lstore_boolean_result:
+ ldrb r0, [sp] @ Copy boolean value to return value of this function.
+ b .Lcleanup_and_return
+.Lstore_char_result:
+ ldrh r0, [sp] @ Copy char value to return value of this function.
+ b .Lcleanup_and_return
+.Lstore_float_result:
+ vldr s0, [sp] @ Copy float value from JValue result to the context restored by
+ vstr s0, [sp, #16] @ RESTORE_SAVE_REFS_AND_ARGS_FRAME.
+ b .Lcleanup_and_return
+.Lstore_double_result:
+ vldr d0, [sp] @ Copy double value from JValue result to the context restored by
+ vstr d0, [sp, #16] @ RESTORE_SAVE_REFS_AND_ARGS_FRAME.
+ b .Lcleanup_and_return
+.Lstore_long_result:
+ ldr r1, [sp, #4] @ Copy the upper bits from JValue result to the context restored by
+ str r1, [sp, #80] @ RESTORE_SAVE_REFS_AND_ARGS_FRAME.
+ // Fall-through for lower bits.
+.Lstore_int_result:
+ ldr r0, [sp] @ Copy int value to return value of this function.
+ // Fall-through to clean up and return.
+.Lcleanup_and_return:
+ add sp, #8
+ .cfi_adjust_cfa_offset -8
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ RETURN_OR_DELIVER_PENDING_EXCEPTION_REG r2
+
+.macro HANDLER_TABLE_OFFSET handler_label
+ .byte (\handler_label - .Lstart_of_handlers) / 2
+.endm
+
+.Lhandler_table:
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // A
+ HANDLER_TABLE_OFFSET(.Lstore_int_result) // B (byte)
+ HANDLER_TABLE_OFFSET(.Lstore_char_result) // C (char)
+ HANDLER_TABLE_OFFSET(.Lstore_double_result) // D (double)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // E
+ HANDLER_TABLE_OFFSET(.Lstore_float_result) // F (float)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // G
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // H
+ HANDLER_TABLE_OFFSET(.Lstore_int_result) // I (int)
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // J (long)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // K
+ HANDLER_TABLE_OFFSET(.Lstore_int_result) // L (object)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // M
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // N
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // O
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // P
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Q
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // R
+ HANDLER_TABLE_OFFSET(.Lstore_int_result) // S (short)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // T
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // U
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // V (void)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // W
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // X
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Y
+ HANDLER_TABLE_OFFSET(.Lstore_boolean_result) // Z (boolean)
+.purgem HANDLER_TABLE_OFFSET
+END art_quick_invoke_polymorphic
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index b88515f..3b3783c 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1669,7 +1669,6 @@
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
// Comment out allocators that have arm64 specific asm.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) implemented in asm
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
@@ -1682,27 +1681,23 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_rosalloc
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_resolved_rosalloc
// Fast path rosalloc allocation.
- // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
- // x2-x7: free.
- ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
- // Load the class (x2)
- ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- cbz x2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
+ // x0: type, xSELF(x19): Thread::Current
+ // x1-x7: free.
ldr x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
// ldp won't work due to large offset.
ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp x3, x4
- bhs .Lart_quick_alloc_object_rosalloc_slow_path
- ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3)
+ bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ ldr w3, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3)
cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation. Also does the
// finalizable and initialization
// checks.
- bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size. Since the size is
// already aligned we can combine the
@@ -1715,7 +1710,7 @@
// Load the free list head (x3). This
// will be the return val.
ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
- cbz x3, .Lart_quick_alloc_object_rosalloc_slow_path
+ cbz x3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
ldr x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
// and update the list head with the
@@ -1728,8 +1723,8 @@
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
- POISON_HEAP_REF w2
- str w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
+ POISON_HEAP_REF w0
+ str w0, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
// Fence. This is "ish" not "ishst" so
// that it also ensures ordering of
// the object size load with respect
@@ -1759,13 +1754,13 @@
mov x0, x3 // Set the return value and return.
ret
-.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
- mov x2, xSELF // pass Thread::Current
- bl artAllocObjectFromCodeRosAlloc // (uint32_t type_idx, Method* method, Thread*)
+.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
+ mov x1, xSELF // pass Thread::Current
+ bl artAllocObjectFromCodeResolvedRosAlloc // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_rosalloc
+END art_quick_alloc_object_resolved_rosalloc
// The common fast path code for art_quick_alloc_array_region_tlab.
@@ -1834,16 +1829,6 @@
ret
.endm
-// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
-//
-// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current
-// x3-x7: free.
-// Need to preserve x0 and x1 to the slow path.
-.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
- cbz x2, \slowPathLabel // Check null class
- ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED \slowPathLabel
-.endm
-
// TODO: delete ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since it is the same as
// ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED.
.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
@@ -1853,20 +1838,18 @@
.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel
ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
- ldr w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7).
+ ldr w7, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7).
add x6, x4, x7 // Add object size to tlab pos.
cmp x6, x5 // Check if it fits, overflow works
// since the tlab pos and end are 32
// bit values.
bhi \slowPathLabel
- // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
- mov x0, x4
str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add x5, x5, #1
str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
- POISON_HEAP_REF w2
- str w2, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ POISON_HEAP_REF w0
+ str w0, [x4, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
// Fence. This is "ish" not "ishst" so
// that the code after this allocation
// site will see the right values in
@@ -1874,91 +1857,52 @@
// Alternatively we could use "ishst"
// if we use load-acquire for the
// object size load.)
+ mov x0, x4
dmb ish
ret
.endm
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
-ENTRY art_quick_alloc_object_tlab
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB).
+ENTRY art_quick_alloc_object_resolved_tlab
// Fast path tlab allocation.
- // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
- // x2-x7: free.
+ // x0: type, xSELF(x19): Thread::Current
+ // x1-x7: free.
#if defined(USE_READ_BARRIER)
mvn x0, xzr // Read barrier not supported here.
ret // Return -1.
#endif
- ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
- // Load the class (x2)
- ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
-.Lart_quick_alloc_object_tlab_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
- mov x2, xSELF // Pass Thread::Current.
- bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
+ ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_object_resolved_tlab_slow_path
+.Lart_quick_alloc_object_resolved_tlab_slow_path:
+ SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
+ mov x1, xSELF // Pass Thread::Current.
+ bl artAllocObjectFromCodeResolvedTLAB // (mirror::Class*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_tlab
+END art_quick_alloc_object_resolved_tlab
// The common code for art_quick_alloc_object_*region_tlab
-.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved, read_barrier
+.macro GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB name, entrypoint, fast_path
ENTRY \name
// Fast path region tlab allocation.
- // x0: type_idx/resolved class/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
- // If is_resolved is 1 then x0 is the resolved type, otherwise it is the index.
- // x2-x7: free.
+ // x0: type, xSELF(x19): Thread::Current
+ // x1-x7: free.
#if !defined(USE_READ_BARRIER)
mvn x0, xzr // Read barrier must be enabled here.
ret // Return -1.
#endif
-.if \is_resolved
- mov x2, x0 // class is actually stored in x0 already
-.else
- ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
- // Load the class (x2)
- ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- // If the class is null, go slow path. The check is required to read the lock word.
- cbz w2, .Lslow_path\name
-.endif
-.if \read_barrier
- // Most common case: GC is not marking.
- ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz x3, .Lmarking\name
-.endif
.Ldo_allocation\name:
\fast_path .Lslow_path\name
-.Lmarking\name:
-.if \read_barrier
- // GC is marking, check the lock word of the class for the mark bit.
- // Class is not null, check mark bit in lock word.
- ldr w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
- // If the bit is not zero, do the allocation.
- tbnz w3, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name
- // The read barrier slow path. Mark
- // the class.
- SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 // Save registers (x0, x1, lr).
- SAVE_REG xLR, 24 // Align sp by 16 bytes.
- mov x0, x2 // Pass the class as the first param.
- bl artReadBarrierMark
- mov x2, x0 // Get the (marked) class back.
- RESTORE_REG xLR, 24
- RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 // Restore registers.
- b .Ldo_allocation\name
-.endif
.Lslow_path\name:
SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
- mov x2, xSELF // Pass Thread::Current.
- bl \entrypoint // (uint32_t type_idx, Method* method, Thread*)
+ mov x1, xSELF // Pass Thread::Current.
+ bl \entrypoint // (mirror::Class*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END \name
.endm
-// Use ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since the null check is already done in GENERATE_ALLOC_OBJECT_TLAB.
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 0, 1
-// No read barrier for the resolved or initialized cases since the caller is responsible for the
-// read barrier due to the to-space invariant.
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1, 0
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1, 0
+GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED
+GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED
// TODO: We could use this macro for the normal tlab allocator too.
@@ -2623,3 +2567,82 @@
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg27, w27, x27
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg28, w28, x28
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, w29, x29
+
+.extern artInvokePolymorphic
+ENTRY art_quick_invoke_polymorphic
+ SETUP_SAVE_REFS_AND_ARGS_FRAME // Save callee saves in case allocation triggers GC.
+ mov x2, xSELF
+ mov x3, sp
+ INCREASE_FRAME 16 // Reserve space for JValue result.
+ str xzr, [sp, #0] // Initialize result to zero.
+ mov x0, sp // Set r0 to point to result.
+ bl artInvokePolymorphic // ArtInvokePolymorphic(result, receiver, thread, save_area)
+ uxtb w0, w0 // Result is the return type descriptor as a char.
+ sub w0, w0, 'A' // Convert to zero based index.
+ cmp w0, 'Z' - 'A'
+ bhi .Lcleanup_and_return // Clean-up if out-of-bounds.
+ adrp x1, .Lhandler_table // Compute address of handler table.
+ add x1, x1, :lo12:.Lhandler_table
+ ldrb w0, [x1, w0, uxtw] // Lookup handler offset in handler table.
+ adr x1, .Lstart_of_handlers
+ add x0, x1, w0, sxtb #2 // Convert relative offset to absolute address.
+ br x0 // Branch to handler.
+
+.Lstart_of_handlers:
+.Lstore_boolean_result:
+ ldrb w0, [sp]
+ b .Lcleanup_and_return
+.Lstore_char_result:
+ ldrh w0, [sp]
+ b .Lcleanup_and_return
+.Lstore_float_result:
+ ldr s0, [sp]
+ str s0, [sp, #32]
+ b .Lcleanup_and_return
+.Lstore_double_result:
+ ldr d0, [sp]
+ str d0, [sp, #32]
+ b .Lcleanup_and_return
+.Lstore_long_result:
+ ldr x0, [sp]
+ // Fall-through
+.Lcleanup_and_return:
+ DECREASE_FRAME 16
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+
+ .section .rodata // Place handler table in read-only section away from text.
+ .align 2
+.macro HANDLER_TABLE_OFFSET handler_label
+ .byte (\handler_label - .Lstart_of_handlers) / 4
+.endm
+.Lhandler_table:
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // A
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // B (byte)
+ HANDLER_TABLE_OFFSET(.Lstore_char_result) // C (char)
+ HANDLER_TABLE_OFFSET(.Lstore_double_result) // D (double)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // E
+ HANDLER_TABLE_OFFSET(.Lstore_float_result) // F (float)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // G
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // H
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // I (int)
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // J (long)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // K
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // L (object - references are compressed and only 32-bits)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // M
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // N
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // O
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // P
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Q
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // R
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // S (short)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // T
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // U
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // V (void)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // W
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // X
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Y
+ HANDLER_TABLE_OFFSET(.Lstore_boolean_result) // Z (boolean)
+ .text
+
+END art_quick_invoke_polymorphic
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 3e8cdc9..3acc0a9 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1831,116 +1831,10 @@
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_rosalloc
- # Fast path rosalloc allocation
- # a0: type_idx
- # a1: ArtMethod*
- # s1: Thread::Current
- # -----------------------------
- # t0: class
- # t1: object size
- # t2: rosalloc run
- # t3: thread stack top offset
- # t4: thread stack bottom offset
- # v0: free list head
- #
- # t5, t6 : temps
-
- lw $t0, ART_METHOD_DEX_CACHE_TYPES_OFFSET_32($a1) # Load dex cache resolved types
- # array.
-
- sll $t5, $a0, COMPRESSED_REFERENCE_SIZE_SHIFT # Shift the value.
- addu $t5, $t0, $t5 # Compute the index.
- lw $t0, 0($t5) # Load class (t0).
- beqz $t0, .Lart_quick_alloc_object_rosalloc_slow_path
-
- li $t6, MIRROR_CLASS_STATUS_INITIALIZED
- lw $t5, MIRROR_CLASS_STATUS_OFFSET($t0) # Check class status.
- bne $t5, $t6, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Add a fake dependence from the following access flag and size loads to the status load. This
- # is to prevent those loads from being reordered above the status load and reading wrong values.
- xor $t5, $t5, $t5
- addu $t0, $t0, $t5
-
- lw $t5, MIRROR_CLASS_ACCESS_FLAGS_OFFSET($t0) # Check if access flags has
- li $t6, ACCESS_FLAGS_CLASS_IS_FINALIZABLE # kAccClassIsFinalizable.
- and $t6, $t5, $t6
- bnez $t6, .Lart_quick_alloc_object_rosalloc_slow_path
-
- lw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation
- lw $t4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # stack has any room left.
- bgeu $t3, $t4, .Lart_quick_alloc_object_rosalloc_slow_path
-
- lw $t1, MIRROR_CLASS_OBJECT_SIZE_OFFSET($t0) # Load object size (t1).
- li $t5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
- # allocation.
- bgtu $t1, $t5, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Compute the rosalloc bracket index from the size. Allign up the size by the rosalloc bracket
- # quantum size and divide by the quantum size and subtract by 1.
-
- addiu $t1, $t1, -1 # Decrease obj size and shift right
- srl $t1, $t1, ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT # by quantum.
-
- sll $t2, $t1, POINTER_SIZE_SHIFT
- addu $t2, $t2, $s1
- lw $t2, THREAD_ROSALLOC_RUNS_OFFSET($t2) # Load rosalloc run (t2).
-
- # Load the free list head (v0).
- # NOTE: this will be the return val.
-
- lw $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
- beqz $v0, .Lart_quick_alloc_object_rosalloc_slow_path
- nop
-
- # Load the next pointer of the head and update the list head with the next pointer.
-
- lw $t5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
- sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
-
- # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
- # asserted to match.
-
-#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
-#error "Class pointer needs to overwrite next pointer."
-#endif
-
- POISON_HEAP_REF $t0
- sw $t0, MIRROR_OBJECT_CLASS_OFFSET($v0)
-
- # Push the new object onto the thread local allocation stack and increment the thread local
- # allocation stack top.
-
- sw $v0, 0($t3)
- addiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
- sw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
-
- # Decrement the size of the free list.
-
- lw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
- addiu $t5, $t5, -1
- sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
-
- sync # Fence.
-
- jalr $zero, $ra
- nop
-
- .Lart_quick_alloc_object_rosalloc_slow_path:
-
- SETUP_SAVE_REFS_ONLY_FRAME
- la $t9, artAllocObjectFromCodeRosAlloc
- jalr $t9
- move $a2, $s1 # Pass self as argument.
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-
-END art_quick_alloc_object_rosalloc
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
@@ -2336,7 +2230,7 @@
li $v0, -1 # return -1;
sll $v0, $a2, 1 # $a0 += $a2 * 2
- addu $a0, $a0, $v0 # " " " " "
+ addu $a0, $a0, $v0 # " ditto "
move $v0, $a2 # Set i to fromIndex.
1:
@@ -2386,3 +2280,65 @@
j $ra
nop
END art_quick_string_compareto
+
+.extern artInvokePolymorphic
+ENTRY art_quick_invoke_polymorphic
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
+ move $a2, rSELF # Make $a2 an alias for the current Thread.
+ move $a3, $sp # Make $a3 a pointer to the saved frame context.
+ addiu $sp, $sp, -24 # Reserve space for JValue result and 4 words for callee.
+ .cfi_adjust_cfa_offset 24
+ sw $zero, 20($sp) # Initialize JValue result.
+ sw $zero, 16($sp)
+ addiu $a0, $sp, 16 # Make $a0 a pointer to the JValue result
+ la $t9, artInvokePolymorphic
+ jalr $t9 # (result, receiver, Thread*, context)
+ nop
+.macro MATCH_RETURN_TYPE c, handler
+ li $t0, \c
+ beq $v0, $t0, \handler
+.endm
+ MATCH_RETURN_TYPE 'V', .Lcleanup_and_return
+ MATCH_RETURN_TYPE 'L', .Lstore_int_result
+ MATCH_RETURN_TYPE 'I', .Lstore_int_result
+ MATCH_RETURN_TYPE 'J', .Lstore_long_result
+ MATCH_RETURN_TYPE 'B', .Lstore_int_result
+ MATCH_RETURN_TYPE 'C', .Lstore_char_result
+ MATCH_RETURN_TYPE 'D', .Lstore_double_result
+ MATCH_RETURN_TYPE 'F', .Lstore_float_result
+ MATCH_RETURN_TYPE 'S', .Lstore_int_result
+.purgem MATCH_RETURN_TYPE
+ nop
+ b .Lcleanup_and_return
+ nop
+.Lstore_boolean_result:
+ lbu $v0, 16($sp) # Move byte from JValue result to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_char_result:
+ lhu $v0, 16($sp) # Move char from JValue result to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_double_result:
+.Lstore_float_result:
+ LDu $f0, $f1, 16, $sp, $t0 # Move double/float from JValue result to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_long_result:
+ lw $v1, 20($sp) # Move upper bits from JValue result to return value register.
+ // Fall-through for lower bits.
+.Lstore_int_result:
+ lw $v0, 16($sp) # Move lower bits from JValue result to return value register.
+ // Fall-through to clean up and return.
+.Lcleanup_and_return:
+ addiu $sp, $sp, 24 # Remove space for JValue result and the 4 words for the callee.
+ .cfi_adjust_cfa_offset -24
+ lw $t7, THREAD_EXCEPTION_OFFSET(rSELF) # Load Thread::Current()->exception_
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ bnez $t7, 1f # Success if no exception is pending.
+ nop
+ jalr $zero, $ra
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+END art_quick_invoke_polymorphic
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
index 5606c1d..5757906 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64.cc
@@ -70,7 +70,7 @@
}
std::string Mips64InstructionSetFeatures::GetFeatureString() const {
- return "";
+ return "default";
}
std::unique_ptr<const InstructionSetFeatures>
diff --git a/runtime/arch/mips64/instruction_set_features_mips64_test.cc b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
index 1d03794..380c4e5 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64_test.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
@@ -27,7 +27,7 @@
ASSERT_TRUE(mips64_features.get() != nullptr) << error_msg;
EXPECT_EQ(mips64_features->GetInstructionSet(), kMips64);
EXPECT_TRUE(mips64_features->Equals(mips64_features.get()));
- EXPECT_STREQ("", mips64_features->GetFeatureString().c_str());
+ EXPECT_STREQ("default", mips64_features->GetFeatureString().c_str());
EXPECT_EQ(mips64_features->AsBitmap(), 0U);
}
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 0861d2d..ae786fe 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1775,107 +1775,9 @@
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_rosalloc
-
- # Fast path rosalloc allocation
- # a0: type_idx
- # a1: ArtMethod*
- # s1: Thread::Current
- # -----------------------------
- # t0: class
- # t1: object size
- # t2: rosalloc run
- # t3: thread stack top offset
- # a4: thread stack bottom offset
- # v0: free list head
- #
- # a5, a6 : temps
-
- ld $t0, ART_METHOD_DEX_CACHE_TYPES_OFFSET_64($a1) # Load dex cache resolved types array.
-
- dsll $a5, $a0, COMPRESSED_REFERENCE_SIZE_SHIFT # Shift the value.
- daddu $a5, $t0, $a5 # Compute the index.
- lwu $t0, 0($a5) # Load class (t0).
- beqzc $t0, .Lart_quick_alloc_object_rosalloc_slow_path
-
- li $a6, MIRROR_CLASS_STATUS_INITIALIZED
- lwu $a5, MIRROR_CLASS_STATUS_OFFSET($t0) # Check class status.
- bnec $a5, $a6, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Add a fake dependence from the following access flag and size loads to the status load. This
- # is to prevent those loads from being reordered above the status load and reading wrong values.
- xor $a5, $a5, $a5
- daddu $t0, $t0, $a5
-
- lwu $a5, MIRROR_CLASS_ACCESS_FLAGS_OFFSET($t0) # Check if access flags has
- li $a6, ACCESS_FLAGS_CLASS_IS_FINALIZABLE # kAccClassIsFinalizable.
- and $a6, $a5, $a6
- bnezc $a6, .Lart_quick_alloc_object_rosalloc_slow_path
-
- ld $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation stack
- ld $a4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # has any room left.
- bgeuc $t3, $a4, .Lart_quick_alloc_object_rosalloc_slow_path
-
- lwu $t1, MIRROR_CLASS_OBJECT_SIZE_OFFSET($t0) # Load object size (t1).
- li $a5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
- # allocation.
- bltuc $a5, $t1, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Compute the rosalloc bracket index from the size. Allign up the size by the rosalloc bracket
- # quantum size and divide by the quantum size and subtract by 1.
- daddiu $t1, $t1, -1 # Decrease obj size and shift right by
- dsrl $t1, $t1, ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT # quantum.
-
- dsll $t2, $t1, POINTER_SIZE_SHIFT
- daddu $t2, $t2, $s1
- ld $t2, THREAD_ROSALLOC_RUNS_OFFSET($t2) # Load rosalloc run (t2).
-
- # Load the free list head (v0).
- # NOTE: this will be the return val.
- ld $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
- beqzc $v0, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Load the next pointer of the head and update the list head with the next pointer.
- ld $a5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
- sd $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
-
- # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
- # asserted to match.
-
-#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
-#error "Class pointer needs to overwrite next pointer."
-#endif
-
- POISON_HEAP_REF $t0
- sw $t0, MIRROR_OBJECT_CLASS_OFFSET($v0)
-
- # Push the new object onto the thread local allocation stack and increment the thread local
- # allocation stack top.
- sd $v0, 0($t3)
- daddiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
- sd $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
-
- # Decrement the size of the free list.
- lw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
- addiu $a5, $a5, -1
- sw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
-
- sync # Fence.
-
- jalr $zero, $ra
- .cpreturn # Restore gp from t8 in branch delay slot.
-
-.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME
- jal artAllocObjectFromCodeRosAlloc
- move $a2 ,$s1 # Pass self as argument.
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-
-END art_quick_alloc_object_rosalloc
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
@@ -2203,7 +2105,7 @@
li $v0,-1 # return -1;
sll $v0,$a2,1 # $a0 += $a2 * 2
- daddu $a0,$a0,$v0 # " " " " "
+ daddu $a0,$a0,$v0 # " ditto "
move $v0,$a2 # Set i to fromIndex.
1:
@@ -2222,4 +2124,65 @@
nop
END art_quick_indexof
+.extern artInvokePolymorphic
+ENTRY art_quick_invoke_polymorphic
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
+ move $a2, rSELF # Make $a2 an alias for the current Thread.
+ move $a3, $sp # Make $a3 a pointer to the saved frame context.
+ daddiu $sp, $sp, -8 # Reserve space for JValue result.
+ .cfi_adjust_cfa_offset 8
+ sd $zero, 0($sp) # Initialize JValue result.
+ move $a0, $sp # Make $a0 a pointer to the JValue result
+ jal artInvokePolymorphic # (result, receiver, Thread*, context)
+ nop
+.macro MATCH_RETURN_TYPE c, handler
+ li $t0, \c
+ beq $v0, $t0, \handler
+.endm
+ MATCH_RETURN_TYPE 'V', .Lcleanup_and_return
+ MATCH_RETURN_TYPE 'L', .Lstore_ref_result
+ MATCH_RETURN_TYPE 'I', .Lstore_long_result
+ MATCH_RETURN_TYPE 'J', .Lstore_long_result
+ MATCH_RETURN_TYPE 'B', .Lstore_long_result
+ MATCH_RETURN_TYPE 'C', .Lstore_char_result
+ MATCH_RETURN_TYPE 'D', .Lstore_double_result
+ MATCH_RETURN_TYPE 'F', .Lstore_float_result
+ MATCH_RETURN_TYPE 'S', .Lstore_long_result
+.purgem MATCH_RETURN_TYPE
+ nop
+ b .Lcleanup_and_return
+ nop
+.Lstore_boolean_result:
+ lbu $v0, 0($sp) # Move byte from JValue result to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_char_result:
+ lhu $v0, 0($sp) # Move char from JValue result to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_double_result:
+.Lstore_float_result:
+ l.d $f0, 0($sp) # Move double/float from JValue result to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_ref_result:
+ lwu $v0, 0($sp) # Move zero extended lower 32-bits to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_long_result:
+ ld $v0, 0($sp) # Move long from JValue result to return value register.
+ // Fall-through to clean up and return.
+.Lcleanup_and_return:
+ daddiu $sp, $sp, 8 # Remove space for JValue result.
+ .cfi_adjust_cfa_offset -8
+ ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # Load Thread::Current()->exception_
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ bnez $t0, 1f # Success if no exception is pending.
+ nop
+ jalr $zero, $ra
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+END art_quick_invoke_polymorphic
+
.set pop
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index db2fdca..abd9046 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -15,15 +15,13 @@
*/
.macro GENERATE_ALLOC_ENTRYPOINTS c_suffix, cxx_suffix
-// Called by managed code to allocate an object.
-TWO_ARG_DOWNCALL art_quick_alloc_object\c_suffix, artAllocObjectFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an object of a resolved class.
-TWO_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an object of an initialized class.
-TWO_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an object when the caller doesn't know whether it has access
// to the created type.
-TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check\c_suffix, artAllocObjectFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks\c_suffix, artAllocObjectFromCodeWithChecks\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an array.
THREE_ARG_DOWNCALL art_quick_alloc_array\c_suffix, artAllocArrayFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an array of a resolve class.
@@ -61,14 +59,12 @@
// Generate the allocation entrypoints for each allocator. This is used as an alternative to
// GNERATE_ALL_ALLOC_ENTRYPOINTS for selectively implementing allocation fast paths in
// hand-written assembly.
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object ## c_suffix, artAllocObjectFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check ## c_suffix, artAllocObjectFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks ## c_suffix, artAllocObjectFromCodeWithChecks ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(c_suffix, cxx_suffix) \
THREE_ARG_DOWNCALL art_quick_alloc_array ## c_suffix, artAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \
@@ -93,8 +89,7 @@
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
@@ -109,8 +104,7 @@
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_TLAB_ALLOCATOR
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
@@ -129,7 +123,6 @@
.endm
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
@@ -142,7 +135,6 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
@@ -156,8 +148,7 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented)
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc, RosAlloc)
@@ -169,7 +160,6 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
@@ -182,7 +172,6 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
@@ -195,7 +184,6 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
@@ -208,7 +196,6 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
@@ -221,7 +208,6 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region, Region)
@@ -234,7 +220,6 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
@@ -247,7 +232,6 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 9e385f8..ee65fa8 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1062,12 +1062,8 @@
EXPECT_FALSE(self->IsExceptionPending());
{
- // Use an arbitrary method from c to use as referrer
- size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex().index_), // type_idx
- // arbitrary
- reinterpret_cast<size_t>(c->GetVirtualMethod(0, kRuntimePointerSize)),
- 0U,
- StubTest::GetEntrypoint(self, kQuickAllocObject),
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
+ StubTest::GetEntrypoint(self, kQuickAllocObjectWithChecks),
self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1078,8 +1074,6 @@
}
{
- // We can use null in the second argument as we do not need a method here (not used in
- // resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
self);
@@ -1092,8 +1086,6 @@
}
{
- // We can use null in the second argument as we do not need a method here (not used in
- // resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
self);
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index c6f4c03..1d979d8 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -468,7 +468,7 @@
* The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
* of the target Method* in r0 and method->code_ in r1.
*
- * If unsuccessful, the helper will return null/null will bea pending exception in the
+ * If unsuccessful, the helper will return null/null and there will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -956,52 +956,42 @@
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-DEFINE_FUNCTION art_quick_alloc_object_rosalloc
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
+DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
// Fast path rosalloc allocation.
- // eax: uint32_t type_idx/return value, ecx: ArtMethod*
- // ebx, edx: free
- PUSH edi
- movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
- // Load the class (edx)
- movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
- testl %edx, %edx // Check null class
- jz .Lart_quick_alloc_object_rosalloc_slow_path
-
+ // eax: type/return value
+ // ecx, ebx, edx: free
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
// Check if the thread local allocation
// stack has room
- movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %edi
- cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %edi
- jae .Lart_quick_alloc_object_rosalloc_slow_path
+ movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %ecx
+ cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %ecx
+ jae .Lart_quick_alloc_object_resolved_rosalloc_slow_path
- movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%edx), %edi // Load the object size (edi)
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%eax), %ecx // Load the object size (ecx)
// Check if the size is for a thread
// local allocation. Also does the
// finalizable and initialization check.
- cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %edi
- ja .Lart_quick_alloc_object_rosalloc_slow_path
- shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %edi // Calculate the rosalloc bracket index
+ cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %ecx
+ ja .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %ecx // Calculate the rosalloc bracket index
// from object size.
// Load thread local rosalloc run (ebx)
// Subtract __SIZEOF_POINTER__ to subtract
// one from edi as there is no 0 byte run
// and the size is already aligned.
- movl (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)(%ebx, %edi, __SIZEOF_POINTER__), %ebx
+ movl (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)(%ebx, %ecx, __SIZEOF_POINTER__), %ebx
// Load free_list head (edi),
// this will be the return value.
- movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %edi
- test %edi, %edi
- jz .Lart_quick_alloc_object_rosalloc_slow_path
+ movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %ecx
+ jecxz .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Point of no slow path. Won't go to
- // the slow path from here on. Ok to
- // clobber eax and ecx.
- movl %edi, %eax
+ // the slow path from here on.
// Load the next pointer of the head
// and update head of free list with
// next pointer
- movl ROSALLOC_SLOT_NEXT_OFFSET(%eax), %edi
- movl %edi, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx)
+ movl ROSALLOC_SLOT_NEXT_OFFSET(%ecx), %edx
+ movl %edx, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx)
// Decrement size of free list by 1
decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%ebx)
// Store the class pointer in the
@@ -1011,141 +1001,104 @@
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
- POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%eax)
+ POISON_HEAP_REF eax
+ movl %eax, MIRROR_OBJECT_CLASS_OFFSET(%ecx)
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
// Push the new object onto the thread
// local allocation stack and
// increment the thread local
// allocation stack top.
- movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %edi
- movl %eax, (%edi)
- addl LITERAL(COMPRESSED_REFERENCE_SIZE), %edi
- movl %edi, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx)
+ movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %eax
+ movl %ecx, (%eax)
+ addl LITERAL(COMPRESSED_REFERENCE_SIZE), %eax
+ movl %eax, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx)
// No fence needed for x86.
- POP edi
+ movl %ecx, %eax // Move object to return register
ret
-.Lart_quick_alloc_object_rosalloc_slow_path:
- POP edi
+.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH eax // alignment padding
+ subl LITERAL(8), %esp // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx
PUSH eax
- call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*)
+ call SYMBOL(artAllocObjectFromCodeResolvedRosAlloc) // cxx_name(arg0, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
-END_FUNCTION art_quick_alloc_object_rosalloc
+END_FUNCTION art_quick_alloc_object_resolved_rosalloc
-// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
+// The common fast path code for art_quick_alloc_object_resolved_tlab
+// and art_quick_alloc_object_resolved_region_tlab.
//
-// EAX: type_idx/return_value, ECX: ArtMethod*, EDX: the class.
-MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel)
- testl %edx, %edx // Check null class
- jz VAR(slowPathLabel)
+// EAX: type/return_value
+MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel)
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
movl THREAD_LOCAL_END_OFFSET(%ebx), %edi // Load thread_local_end.
subl THREAD_LOCAL_POS_OFFSET(%ebx), %edi // Compute the remaining buffer size.
- movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%edx), %esi // Load the object size.
- cmpl %edi, %esi // Check if it fits.
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%eax), %ecx // Load the object size.
+ cmpl %edi, %ecx // Check if it fits.
ja VAR(slowPathLabel)
- movl THREAD_LOCAL_POS_OFFSET(%ebx), %eax // Load thread_local_pos
+ movl THREAD_LOCAL_POS_OFFSET(%ebx), %edx // Load thread_local_pos
// as allocated object.
- addl %eax, %esi // Add the object size.
- movl %esi, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos.
+ addl %edx, %ecx // Add the object size.
+ movl %ecx, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos.
incl THREAD_LOCAL_OBJECTS_OFFSET(%ebx) // Increase thread_local_objects.
// Store the class pointer in the header.
// No fence needed for x86.
- POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%eax)
+ POISON_HEAP_REF eax
+ movl %eax, MIRROR_OBJECT_CLASS_OFFSET(%edx)
+ movl %edx, %eax
POP edi
- POP esi
ret // Fast path succeeded.
END_MACRO
-// The common slow path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
-MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name)
+// The common slow path code for art_quick_alloc_object_resolved_tlab
+// and art_quick_alloc_object_resolved_region_tlab.
+MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH, cxx_name)
POP edi
- POP esi
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH eax // alignment padding
+ subl LITERAL(8), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx
PUSH eax
- call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
addl LITERAL(16), %esp
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). May be called
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be called
// for CC if the GC is not marking.
-DEFINE_FUNCTION art_quick_alloc_object_tlab
+DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab
// Fast path tlab allocation.
- // EAX: uint32_t type_idx/return value, ECX: ArtMethod*.
- // EBX, EDX: free.
- PUSH esi
+ // EAX: type
+ // EBX, ECX, EDX: free.
PUSH edi
- movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
- // Might need to break down into multiple instructions to get the base address in a register.
- // Load the class
- movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
-.Lart_quick_alloc_object_tlab_slow_path:
- ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeTLAB
-END_FUNCTION art_quick_alloc_object_tlab
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
+.Lart_quick_alloc_object_resolved_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedTLAB
+END_FUNCTION art_quick_alloc_object_resolved_tlab
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB).
-DEFINE_FUNCTION art_quick_alloc_object_region_tlab
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB).
+DEFINE_FUNCTION art_quick_alloc_object_resolved_region_tlab
// Fast path region tlab allocation.
- // EAX: uint32_t type_idx/return value, ECX: ArtMethod*.
- // EBX, EDX: free.
+ // EAX: type/return value
+ // EBX, ECX, EDX: free.
#if !defined(USE_READ_BARRIER)
int3
int3
#endif
- PUSH esi
PUSH edi
- movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
- // Might need to break down into multiple instructions to get the base address in a register.
- // Load the class
- movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
- // Read barrier for class load.
- cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
- jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
- // Null check so that we can load the lock word.
- testl %edx, %edx
- jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
- // Check the mark bit, if it is 1 return.
- testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
- jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark the class.
- PUSH eax
- PUSH ecx
- // Outgoing argument set up
- subl MACRO_LITERAL(8), %esp // Alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- PUSH edx // Pass the class as the first param.
- call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
- movl %eax, %edx
- addl MACRO_LITERAL(12), %esp
- CFI_ADJUST_CFA_OFFSET(-12)
- POP ecx
- POP eax
- jmp .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_slow_path:
- ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeRegionTLAB
-END_FUNCTION art_quick_alloc_object_region_tlab
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
+.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB
+END_FUNCTION art_quick_alloc_object_resolved_region_tlab
+
DEFINE_FUNCTION art_quick_resolve_string
SETUP_SAVE_EVERYTHING_FRAME ebx, ebx
@@ -2270,5 +2223,99 @@
jmp *%ebx
END_FUNCTION art_quick_osr_stub
+DEFINE_FUNCTION art_quick_invoke_polymorphic
+ SETUP_SAVE_REFS_AND_ARGS_FRAME ebx, ebx // Save frame.
+ mov %esp, %edx // Remember SP.
+ subl LITERAL(16), %esp // Make space for JValue result.
+ CFI_ADJUST_CFA_OFFSET(16)
+ movl LITERAL(0), (%esp) // Initialize result to zero.
+ movl LITERAL(0), 4(%esp)
+ mov %esp, %eax // Store pointer to JValue result in eax.
+ PUSH edx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH ecx // pass receiver (method handle)
+ PUSH eax // pass JResult
+ call SYMBOL(artInvokePolymorphic) // (result, receiver, Thread*, SP)
+ subl LITERAL('A'), %eax // Eliminate out of bounds options
+ cmpb LITERAL('Z' - 'A'), %al
+ ja .Lcleanup_and_return
+ movzbl %al, %eax
+ call .Lput_eip_in_ecx
+.Lbranch_start:
+ movl %ecx, %edx
+ add $(.Lhandler_table - .Lbranch_start), %edx // Make EDX point to handler_table.
+ leal (%edx, %eax, 2), %eax // Calculate address of entry in table.
+ movzwl (%eax), %eax // Lookup relative branch in table.
+ addl %ecx, %eax // Add EIP relative offset.
+ jmp *%eax // Branch to handler.
+
+ // Handlers for different return types.
+.Lstore_boolean_result:
+ movzbl 16(%esp), %eax // Copy boolean result to the accumulator.
+ jmp .Lcleanup_and_return
+.Lstore_char_result:
+ movzwl 16(%esp), %eax // Copy char result to the accumulator.
+ jmp .Lcleanup_and_return
+.Lstore_float_result:
+ movd 16(%esp), %xmm0 // Copy float result to the context restored by
+ movd %xmm0, 36(%esp) // RESTORE_SAVE_REFS_ONLY_FRAME.
+ jmp .Lcleanup_and_return
+.Lstore_double_result:
+ movsd 16(%esp), %xmm0 // Copy double result to the context restored by
+ movsd %xmm0, 36(%esp) // RESTORE_SAVE_REFS_ONLY_FRAME.
+ jmp .Lcleanup_and_return
+.Lstore_long_result:
+ movl 20(%esp), %edx // Copy upper-word of result to the context restored by
+ movl %edx, 72(%esp) // RESTORE_SAVE_REFS_ONLY_FRAME.
+ // Fall-through for lower bits.
+.Lstore_int_result:
+ movl 16(%esp), %eax // Copy int result to the accumulator.
+ // Fall-through to clean up and return.
+.Lcleanup_and_return:
+ addl LITERAL(32), %esp // Pop arguments and stack allocated JValue result.
+ CFI_ADJUST_CFA_OFFSET(-32)
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ RETURN_OR_DELIVER_PENDING_EXCEPTION
+
+.Lput_eip_in_ecx: // Internal function that puts address of
+ movl 0(%esp), %ecx // next instruction into ECX when CALL
+ ret
+
+ // Handler table to handlers for given type.
+.Lhandler_table:
+MACRO1(HANDLER_TABLE_ENTRY, handler_label)
+ // NB some tools require 16-bits for relocations. Shouldn't need adjusting.
+ .word RAW_VAR(handler_label) - .Lbranch_start
+END_MACRO
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // A
+ HANDLER_TABLE_ENTRY(.Lstore_int_result) // B (byte)
+ HANDLER_TABLE_ENTRY(.Lstore_char_result) // C (char)
+ HANDLER_TABLE_ENTRY(.Lstore_double_result) // D (double)
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // E
+ HANDLER_TABLE_ENTRY(.Lstore_float_result) // F (float)
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // G
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // H
+ HANDLER_TABLE_ENTRY(.Lstore_int_result) // I (int)
+ HANDLER_TABLE_ENTRY(.Lstore_long_result) // J (long)
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // K
+ HANDLER_TABLE_ENTRY(.Lstore_int_result) // L (object)
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // M
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // N
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // O
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // P
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // Q
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // R
+ HANDLER_TABLE_ENTRY(.Lstore_int_result) // S (short)
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // T
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // U
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // V (void)
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // W
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // X
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // Y
+ HANDLER_TABLE_ENTRY(.Lstore_boolean_result) // Z (boolean)
+
+END_FUNCTION art_quick_invoke_polymorphic
+
// TODO: implement these!
UNIMPLEMENTED art_quick_memcmp16
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 4c46b08..28034c9 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -983,7 +983,6 @@
// Comment out allocators that have x86_64 specific asm.
// Region TLAB:
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
@@ -996,11 +995,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
// Normal TLAB:
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
@@ -1009,29 +1006,25 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-DEFINE_FUNCTION art_quick_alloc_object_rosalloc
+
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
+DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
// Fast path rosalloc allocation.
- // RDI: type_idx, RSI: ArtMethod*, RAX: return value
- // RDX, RCX, R8, R9: free.
- movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
- // Load the class (edx)
- movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
- testl %edx, %edx // Check null class
- jz .Lart_quick_alloc_object_rosalloc_slow_path
+ // RDI: mirror::Class*, RAX: return value
+ // RSI, RDX, RCX, R8, R9: free.
// Check if the thread local
// allocation stack has room.
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
movq THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%r8), %rcx // rcx = alloc stack top.
cmpq THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%r8), %rcx
- jae .Lart_quick_alloc_object_rosalloc_slow_path
+ jae .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Load the object size
- movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdx), %eax
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdi), %eax
// Check if the size is for a thread
// local allocation. Also does the
// initialized and finalizable checks.
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %eax
- ja .Lart_quick_alloc_object_rosalloc_slow_path
+ ja .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size.
shrq LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %rax
@@ -1045,7 +1038,7 @@
// will be the return val.
movq (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9), %rax
testq %rax, %rax
- jz .Lart_quick_alloc_object_rosalloc_slow_path
+ jz .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber rdi and rsi.
// Push the new object onto the thread
// local allocation stack and
@@ -1066,17 +1059,17 @@
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
- POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
+ POISON_HEAP_REF edi
+ movl %edi, MIRROR_OBJECT_CLASS_OFFSET(%rax)
// Decrement the size of the free list
decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%r9)
// No fence necessary for x86.
ret
-.Lart_quick_alloc_object_rosalloc_slow_path:
+.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*)
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call SYMBOL(artAllocObjectFromCodeResolvedRosAlloc) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_FUNCTION art_quick_alloc_object_rosalloc
@@ -1095,19 +1088,19 @@
// TODO: delete ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH since it is the same as
// ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH.
//
-// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
-// RCX: scratch, r8: Thread::Current().
+// RDI: the class, RAX: return value.
+// RCX, RSI, RDX: scratch, r8: Thread::Current().
MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel)
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH(RAW_VAR(slowPathLabel))
END_MACRO
// The fast path code for art_quick_alloc_object_initialized_region_tlab.
//
-// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
-// RCX: scratch, r8: Thread::Current().
+// RDI: the class, RSI: ArtMethod*, RAX: return value.
+// RCX, RSI, RDX: scratch, r8: Thread::Current().
MACRO1(ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH, slowPathLabel)
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
- movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdx), %ecx // Load the object size.
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdi), %ecx // Load the object size.
movq THREAD_LOCAL_POS_OFFSET(%r8), %rax
addq %rax, %rcx // Add size to pos, note that these
// are both 32 bit ints, overflow
@@ -1120,8 +1113,8 @@
// Store the class pointer in the
// header.
// No fence needed for x86.
- POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
+ POISON_HEAP_REF edi
+ movl %edi, MIRROR_OBJECT_CLASS_OFFSET(%rax)
ret // Fast path succeeded.
END_MACRO
@@ -1164,12 +1157,14 @@
ret // Fast path succeeded.
END_MACRO
-// The common slow path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
+
+// The common slow path code for art_quick_alloc_object_{resolved, initialized}_tlab
+// and art_quick_alloc_object_{resolved, initialized}_region_tlab.
MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name)
SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
@@ -1184,26 +1179,11 @@
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). May be
-// called with CC if the GC is not active.
-DEFINE_FUNCTION art_quick_alloc_object_tlab
- // RDI: uint32_t type_idx, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
- movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
- // Might need to break down into multiple instructions to get the base address in a register.
- // Load the class
- movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
-.Lart_quick_alloc_object_tlab_slow_path:
- ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeTLAB
-END_FUNCTION art_quick_alloc_object_tlab
-
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be
// called with CC if the GC is not active.
DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab
- // RDI: mirror::Class* klass, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
- movq %rdi, %rdx
+ // RDI: mirror::Class* klass
+ // RDX, RSI, RCX, R8, R9: free. RAX: return val.
ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
.Lart_quick_alloc_object_resolved_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedTLAB
@@ -1212,9 +1192,8 @@
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB).
// May be called with CC if the GC is not active.
DEFINE_FUNCTION art_quick_alloc_object_initialized_tlab
- // RDI: mirror::Class* klass, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
- movq %rdi, %rdx
+ // RDI: mirror::Class* klass
+ // RDX, RSI, RCX, R8, R9: free. RAX: return val.
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH .Lart_quick_alloc_object_initialized_tlab_slow_path
.Lart_quick_alloc_object_initialized_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeInitializedTLAB
@@ -1292,49 +1271,12 @@
ALLOC_ARRAY_TLAB_SLOW_PATH artAllocArrayFromCodeResolvedRegionTLAB
END_FUNCTION art_quick_alloc_array_resolved_region_tlab
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB).
-DEFINE_FUNCTION art_quick_alloc_object_region_tlab
- // Fast path region tlab allocation.
- // RDI: uint32_t type_idx, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
- ASSERT_USE_READ_BARRIER
- movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
- movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx // Load the class
- // Null check so that we can load the lock word.
- testl %edx, %edx
- jz .Lart_quick_alloc_object_region_tlab_slow_path
- // Since we have allocation entrypoint switching, we know the GC is marking.
- // Check the mark bit, if it is 0, do the read barrier mark.
- testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
- jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
- // Use resolved one since we already did the null check.
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark the class.
- PUSH rdi
- PUSH rsi
- subq LITERAL(8), %rsp // 16 byte alignment
- // Outgoing argument set up
- movq %rdx, %rdi // Pass the class as the first param.
- call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
- movq %rax, %rdx
- addq LITERAL(8), %rsp
- POP rsi
- POP rdi
- jmp .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_slow_path:
- ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeRegionTLAB
-END_FUNCTION art_quick_alloc_object_region_tlab
-
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB).
DEFINE_FUNCTION art_quick_alloc_object_resolved_region_tlab
// Fast path region tlab allocation.
- // RDI: mirror::Class* klass, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
+ // RDI: mirror::Class* klass
+ // RDX, RSI, RCX, R8, R9: free. RAX: return val.
ASSERT_USE_READ_BARRIER
- // No read barrier since the caller is responsible for that.
- movq %rdi, %rdx
ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB
@@ -1343,10 +1285,9 @@
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB).
DEFINE_FUNCTION art_quick_alloc_object_initialized_region_tlab
// Fast path region tlab allocation.
- // RDI: mirror::Class* klass, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
+ // RDI: mirror::Class* klass
+ // RDX, RSI, RCX, R8, R9: free. RAX: return val.
ASSERT_USE_READ_BARRIER
- movq %rdi, %rdx
// No read barrier since the caller is responsible for that.
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH .Lart_quick_alloc_object_initialized_region_tlab_slow_path
.Lart_quick_alloc_object_initialized_region_tlab_slow_path:
@@ -2453,3 +2394,79 @@
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
jmp *%rdx
END_FUNCTION art_quick_osr_stub
+
+DEFINE_FUNCTION art_quick_invoke_polymorphic
+ SETUP_SAVE_REFS_AND_ARGS_FRAME // save callee saves
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread
+ movq %rsp, %rcx // pass SP
+ subq LITERAL(16), %rsp // make space for JValue result
+ CFI_ADJUST_CFA_OFFSET(16)
+ movq LITERAL(0), (%rsp) // initialize result
+ movq %rsp, %rdi // store pointer to JValue result
+ call SYMBOL(artInvokePolymorphic) // artInvokePolymorphic(result, receiver, Thread*, SP)
+ // save the code pointer
+ subq LITERAL('A'), %rax // Convert type descriptor character value to a zero based index.
+ cmpb LITERAL('Z' - 'A'), %al // Eliminate out of bounds options
+ ja .Lcleanup_and_return
+ movzbq %al, %rax
+ leaq .Lhandler_table(%rip), %rcx // Get the address of the handler table
+ movslq (%rcx, %rax, 4), %rax // Lookup handler offset relative to table
+ addq %rcx, %rax // Add table address to yield handler address.
+ jmpq *%rax // Jump to handler.
+
+.align 4
+.Lhandler_table: // Table of type descriptor to handlers.
+MACRO1(HANDLER_TABLE_OFFSET, handle_label)
+ // NB some tools require 32-bits for relocations. Shouldn't need adjusting.
+ .long RAW_VAR(handle_label) - .Lhandler_table
+END_MACRO
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // A
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // B (byte)
+ HANDLER_TABLE_OFFSET(.Lstore_char_result) // C (char)
+ HANDLER_TABLE_OFFSET(.Lstore_double_result) // D (double)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // E
+ HANDLER_TABLE_OFFSET(.Lstore_float_result) // F (float)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // G
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // H
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // I (int)
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // J (long)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // K
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // L (object - references are compressed and only 32-bits)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // M
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // N
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // O
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // P
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Q
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // R
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // S (short)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // T
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // U
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // V (void)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // W
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // X
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Y
+ HANDLER_TABLE_OFFSET(.Lstore_boolean_result) // Z (boolean)
+
+.Lstore_boolean_result:
+ movzbq (%rsp), %rax // Copy boolean result to the accumulator
+ jmp .Lcleanup_and_return
+.Lstore_char_result:
+ movzwq (%rsp), %rax // Copy char result to the accumulator
+ jmp .Lcleanup_and_return
+.Lstore_float_result:
+ movd (%rsp), %xmm0 // Copy float result to the context restored by
+ movd %xmm0, 32(%rsp) // RESTORE_SAVE_REFS_AND_ARGS_FRAME.
+ jmp .Lcleanup_and_return
+.Lstore_double_result:
+ movsd (%rsp), %xmm0 // Copy double result to the context restored by
+ movsd %xmm0, 32(%rsp) // RESTORE_SAVE_REFS_AND_ARGS_FRAME.
+ jmp .Lcleanup_and_return
+.Lstore_long_result:
+ movq (%rsp), %rax // Copy long result to the accumulator.
+ // Fall-through
+.Lcleanup_and_return:
+ addq LITERAL(16), %rsp // Pop space for JValue result.
+ CFI_ADJUST_CFA_OFFSET(16)
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ RETURN_OR_DELIVER_PENDING_EXCEPTION
+END_FUNCTION art_quick_invoke_polymorphic
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 96976d9..9c20740 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -121,7 +121,7 @@
inline uint32_t ArtMethod::GetDexMethodIndex() {
DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
GetDeclaringClass()->IsErroneous());
- return dex_method_index_;
+ return GetDexMethodIndexUnchecked();
}
inline ArtMethod** ArtMethod::GetDexCacheResolvedMethods(PointerSize pointer_size) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 11dcc35..2c31f6c 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -322,6 +322,9 @@
// Number of 32bit registers that would be required to hold all the arguments
static size_t NumArgRegisters(const StringPiece& shorty);
+ ALWAYS_INLINE uint32_t GetDexMethodIndexUnchecked() {
+ return dex_method_index_;
+ }
ALWAYS_INLINE uint32_t GetDexMethodIndex() REQUIRES_SHARED(Locks::mutator_lock_);
void SetDexMethodIndex(uint32_t new_idx) {
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index e4972da..4b15a22 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -90,7 +90,7 @@
art::Thread::SelfOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_pos.
-#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 198 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 34 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
art::Thread::ThreadLocalPosOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_end.
@@ -98,11 +98,13 @@
ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
art::Thread::ThreadLocalEndOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_objects.
-#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_END_OFFSET + 2 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_END_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
art::Thread::ThreadLocalObjectsOffset<POINTER_SIZE>().Int32Value())
+
// Offset of field Thread::tlsPtr_.mterp_current_ibase.
-#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__)
+#define THREAD_CURRENT_IBASE_OFFSET \
+ (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 164) * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
art::Thread::MterpCurrentIBaseOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_default_ibase.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c9b2cc8..448b460 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -351,7 +351,7 @@
array_iftable_(nullptr),
find_array_class_cache_next_victim_(0),
init_done_(false),
- log_new_class_table_roots_(false),
+ log_new_roots_(false),
intern_table_(intern_table),
quick_resolution_trampoline_(nullptr),
quick_imt_conflict_trampoline_(nullptr),
@@ -1865,12 +1865,10 @@
<< reinterpret_cast<const void*>(section_end);
}
}
- if (!oat_file->GetBssGcRoots().empty()) {
- // Insert oat file to class table for visiting .bss GC roots.
- class_table->InsertOatFile(oat_file);
- }
- } else {
- DCHECK(oat_file->GetBssGcRoots().empty());
+ }
+ if (!oat_file->GetBssGcRoots().empty()) {
+ // Insert oat file to class table for visiting .bss GC roots.
+ class_table->InsertOatFile(oat_file);
}
if (added_class_table) {
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -1934,14 +1932,27 @@
// Concurrent moving GC marked new roots through the to-space invariant.
CHECK_EQ(new_ref, old_ref);
}
+ for (const OatFile* oat_file : new_bss_roots_boot_oat_files_) {
+ for (GcRoot<mirror::Object>& root : oat_file->GetBssGcRoots()) {
+ ObjPtr<mirror::Object> old_ref = root.Read<kWithoutReadBarrier>();
+ if (old_ref != nullptr) {
+ DCHECK(old_ref->IsClass());
+ root.VisitRoot(visitor, RootInfo(kRootStickyClass));
+ ObjPtr<mirror::Object> new_ref = root.Read<kWithoutReadBarrier>();
+ // Concurrent moving GC marked new roots through the to-space invariant.
+ CHECK_EQ(new_ref, old_ref);
+ }
+ }
+ }
}
if ((flags & kVisitRootFlagClearRootLog) != 0) {
new_class_roots_.clear();
+ new_bss_roots_boot_oat_files_.clear();
}
if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
- log_new_class_table_roots_ = true;
+ log_new_roots_ = true;
} else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
- log_new_class_table_roots_ = false;
+ log_new_roots_ = false;
}
// We deliberately ignore the class roots in the image since we
// handle image roots by using the MS/CMS rescanning of dirty cards.
@@ -2700,10 +2711,6 @@
CHECK(h_new_class.Get() != nullptr) << descriptor;
CHECK(h_new_class->IsResolved()) << descriptor;
- // Update the dex cache of where the class is defined. Inlining depends on having
- // this filled.
- h_new_class->GetDexCache()->SetResolvedType(h_new_class->GetDexTypeIndex(), h_new_class.Get());
-
// Instrumentation may have updated entrypoints for all methods of all
// classes. However it could not update methods of this class while we
// were loading it. Now the class is resolved, we can update entrypoints
@@ -3307,6 +3314,7 @@
ReaderMutexLock mu(self, *Locks::dex_lock_);
ObjPtr<mirror::DexCache> dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
+ // TODO: Check if the dex file was registered with the same class loader. Bug: 34193123
return dex_cache.Ptr();
}
}
@@ -3651,7 +3659,7 @@
// This is necessary because we need to have the card dirtied for remembered sets.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
}
- if (log_new_class_table_roots_) {
+ if (log_new_roots_) {
new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
}
}
@@ -3664,6 +3672,14 @@
return nullptr;
}
+void ClassLinker::WriteBarrierForBootOatFileBssRoots(const OatFile* oat_file) {
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ DCHECK(!oat_file->GetBssGcRoots().empty()) << oat_file->GetLocation();
+ if (log_new_roots_ && !ContainsElement(new_bss_roots_boot_oat_files_, oat_file)) {
+ new_bss_roots_boot_oat_files_.push_back(oat_file);
+ }
+}
+
// TODO This should really be in mirror::Class.
void ClassLinker::UpdateClassMethods(ObjPtr<mirror::Class> klass,
LengthPrefixedArray<ArtMethod>* new_methods) {
@@ -5161,7 +5177,7 @@
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
}
CHECK_EQ(existing, klass.Get());
- if (log_new_class_table_roots_) {
+ if (log_new_roots_) {
new_class_roots_.push_back(GcRoot<mirror::Class>(h_new_class.Get()));
}
}
@@ -6933,7 +6949,7 @@
method_alignment_);
const size_t old_methods_ptr_size = (old_methods != nullptr) ? old_size : 0;
auto* methods = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
- class_linker_->GetAllocatorForClassLoader(klass_->GetClassLoader())->Realloc(
+ Runtime::Current()->GetLinearAlloc()->Realloc(
self_, old_methods, old_methods_ptr_size, new_size));
CHECK(methods != nullptr); // Native allocation failure aborts.
@@ -6953,13 +6969,19 @@
StrideIterator<ArtMethod> out(methods->begin(method_size_, method_alignment_) + old_method_count);
// Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and
// we want the roots of the miranda methods to get visited.
- for (ArtMethod* mir_method : miranda_methods_) {
+ for (size_t i = 0; i < miranda_methods_.size(); ++i) {
+ ArtMethod* mir_method = miranda_methods_[i];
ArtMethod& new_method = *out;
new_method.CopyFrom(mir_method, pointer_size);
new_method.SetAccessFlags(new_method.GetAccessFlags() | kAccMiranda | kAccCopied);
DCHECK_NE(new_method.GetAccessFlags() & kAccAbstract, 0u)
<< "Miranda method should be abstract!";
move_table_.emplace(mir_method, &new_method);
+ // Update the entry in the method array, as the array will be used for future lookups,
+ // where thread suspension is allowed.
+ // As such, the array should not contain locally allocated ArtMethod, otherwise the GC
+ // would not see them.
+ miranda_methods_[i] = &new_method;
++out;
}
// We need to copy the default methods into our own method table since the runtime requires that
@@ -6968,9 +6990,10 @@
// interface but will have different ArtMethod*s for them. This also means we cannot compare a
// default method found on a class with one found on the declaring interface directly and must
// look at the declaring class to determine if they are the same.
- for (const ScopedArenaVector<ArtMethod*>& methods_vec : {default_methods_,
- overriding_default_methods_}) {
- for (ArtMethod* def_method : methods_vec) {
+ for (ScopedArenaVector<ArtMethod*>* methods_vec : {&default_methods_,
+ &overriding_default_methods_}) {
+ for (size_t i = 0; i < methods_vec->size(); ++i) {
+ ArtMethod* def_method = (*methods_vec)[i];
ArtMethod& new_method = *out;
new_method.CopyFrom(def_method, pointer_size);
// Clear the kAccSkipAccessChecks flag if it is present. Since this class hasn't been
@@ -6981,12 +7004,18 @@
constexpr uint32_t kMaskFlags = ~kAccSkipAccessChecks;
new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
move_table_.emplace(def_method, &new_method);
+ // Update the entry in the method array, as the array will be used for future lookups,
+ // where thread suspension is allowed.
+ // As such, the array should not contain locally allocated ArtMethod, otherwise the GC
+ // would not see them.
+ (*methods_vec)[i] = &new_method;
++out;
}
}
- for (const ScopedArenaVector<ArtMethod*>& methods_vec : {default_conflict_methods_,
- overriding_default_conflict_methods_}) {
- for (ArtMethod* conf_method : methods_vec) {
+ for (ScopedArenaVector<ArtMethod*>* methods_vec : {&default_conflict_methods_,
+ &overriding_default_conflict_methods_}) {
+ for (size_t i = 0; i < methods_vec->size(); ++i) {
+ ArtMethod* conf_method = (*methods_vec)[i];
ArtMethod& new_method = *out;
new_method.CopyFrom(conf_method, pointer_size);
// This is a type of default method (there are default method impls, just a conflict) so
@@ -7002,6 +7031,11 @@
// that the compiler will not invoke the implementation of whatever method we copied from.
EnsureThrowsInvocationError(class_linker_, &new_method);
move_table_.emplace(conf_method, &new_method);
+ // Update the entry in the method array, as the array will be used for future lookups,
+ // where thread suspension is allowed.
+ // As such, the array should not contain locally allocated ArtMethod, otherwise the GC
+ // would not see them.
+ (*methods_vec)[i] = &new_method;
++out;
}
}
@@ -7034,12 +7068,7 @@
default_conflict_methods_,
miranda_methods_}) {
// These are the functions that are not already in the vtable!
- for (ArtMethod* new_method : methods_vec) {
- auto translated_method_it = move_table_.find(new_method);
- CHECK(translated_method_it != move_table_.end())
- << "We must have a translation for methods added to the classes methods_ array! We "
- << "could not find the ArtMethod added for " << ArtMethod::PrettyMethod(new_method);
- ArtMethod* new_vtable_method = translated_method_it->second;
+ for (ArtMethod* new_vtable_method : methods_vec) {
// Leave the declaring class alone the method's dex_code_item_offset_ and dex_method_index_
// fields are references into the dex file the method was defined in. Since the ArtMethod
// does not store that information it uses declaring_class_->dex_cache_.
@@ -7056,7 +7085,6 @@
ArtMethod* translated_method = vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size);
// Try and find what we need to change this method to.
auto translation_it = default_translations.find(i);
- bool found_translation = false;
if (translation_it != default_translations.end()) {
if (translation_it->second.IsInConflict()) {
// Find which conflict method we are to use for this method.
@@ -7080,30 +7108,28 @@
// Normal default method (changed from an older default or abstract interface method).
DCHECK(translation_it->second.IsTranslation());
translated_method = translation_it->second.GetTranslation();
+ auto it = move_table_.find(translated_method);
+ DCHECK(it != move_table_.end());
+ translated_method = it->second;
}
- found_translation = true;
+ } else {
+ auto it = move_table_.find(translated_method);
+ translated_method = (it != move_table_.end()) ? it->second : nullptr;
}
- DCHECK(translated_method != nullptr);
- auto it = move_table_.find(translated_method);
- if (it != move_table_.end()) {
- auto* new_method = it->second;
- DCHECK(new_method != nullptr);
+
+ if (translated_method != nullptr) {
// Make sure the new_methods index is set.
- if (new_method->GetMethodIndexDuringLinking() != i) {
+ if (translated_method->GetMethodIndexDuringLinking() != i) {
if (kIsDebugBuild) {
auto* methods = klass_->GetMethodsPtr();
CHECK_LE(reinterpret_cast<uintptr_t>(&*methods->begin(method_size_, method_alignment_)),
- reinterpret_cast<uintptr_t>(new_method));
- CHECK_LT(reinterpret_cast<uintptr_t>(new_method),
+ reinterpret_cast<uintptr_t>(translated_method));
+ CHECK_LT(reinterpret_cast<uintptr_t>(translated_method),
reinterpret_cast<uintptr_t>(&*methods->end(method_size_, method_alignment_)));
}
- new_method->SetMethodIndex(0xFFFF & i);
+ translated_method->SetMethodIndex(0xFFFF & i);
}
- vtable->SetElementPtrSize(i, new_method, pointer_size);
- } else {
- // If it was not going to be updated we wouldn't have put it into the default_translations
- // map.
- CHECK(!found_translation) << "We were asked to update this vtable entry. Must not fail.";
+ vtable->SetElementPtrSize(i, translated_method, pointer_size);
}
}
klass_->SetVTable(vtable.Ptr());
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 77322ed..cad674e 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -64,6 +64,7 @@
template<typename T> class LengthPrefixedArray;
template<class T> class MutableHandle;
class InternTable;
+class OatFile;
template<class T> class ObjectLock;
class Runtime;
class ScopedObjectAccessAlreadyRunnable;
@@ -535,6 +536,12 @@
REQUIRES(!Locks::classlinker_classes_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Add an oat file with .bss GC roots to be visited again at the end of GC
+ // for collector types that need it.
+ void WriteBarrierForBootOatFileBssRoots(const OatFile* oat_file)
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
mirror::ObjectArray<mirror::Class>* GetClassRoots() REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
DCHECK(class_roots != nullptr);
@@ -638,6 +645,10 @@
mirror::Class* GetHoldingClassOfCopiedMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns null if not found.
+ ClassTable* ClassTableForClassLoader(ObjPtr<mirror::ClassLoader> class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
struct DexCacheData {
// Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
// not work properly.
@@ -1032,10 +1043,6 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::classlinker_classes_lock_);
- // Returns null if not found.
- ClassTable* ClassTableForClassLoader(ObjPtr<mirror::ClassLoader> class_loader)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Insert a new class table if not found.
ClassTable* InsertClassTableForClassLoader(ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -1138,6 +1145,10 @@
// New class roots, only used by CMS since the GC needs to mark these in the pause.
std::vector<GcRoot<mirror::Class>> new_class_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
+ // Boot image oat files with new .bss GC roots to be visited in the pause by CMS.
+ std::vector<const OatFile*> new_bss_roots_boot_oat_files_
+ GUARDED_BY(Locks::classlinker_classes_lock_);
+
// Number of times we've searched dex caches for a class. After a certain number of misses we move
// the classes into the class_table_ to avoid dex cache based searches.
Atomic<uint32_t> failed_dex_cache_class_lookups_;
@@ -1155,7 +1166,7 @@
size_t find_array_class_cache_next_victim_;
bool init_done_;
- bool log_new_class_table_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
+ bool log_new_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
InternTable* intern_table_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 42108d8..bbe7280 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -460,7 +460,6 @@
protected:
virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
CommonRuntimeTest::SetUpRuntimeOptions(options);
- options->push_back(std::make_pair("-Xexperimental:method-handles", nullptr));
}
};
@@ -757,6 +756,7 @@
struct EmulatedStackFrameOffsets : public CheckOffsets<mirror::EmulatedStackFrame> {
EmulatedStackFrameOffsets() : CheckOffsets<mirror::EmulatedStackFrame>(
false, "Ldalvik/system/EmulatedStackFrame;") {
+ addOffset(OFFSETOF_MEMBER(mirror::EmulatedStackFrame, callsite_type_), "callsiteType");
addOffset(OFFSETOF_MEMBER(mirror::EmulatedStackFrame, references_), "references");
addOffset(OFFSETOF_MEMBER(mirror::EmulatedStackFrame, stack_frame_), "stackFrame");
addOffset(OFFSETOF_MEMBER(mirror::EmulatedStackFrame, type_), "type");
@@ -899,7 +899,6 @@
dex::TypeIndex type_idx = klass->GetClassDef()->class_idx_;
ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache();
const DexFile& dex_file = klass->GetDexFile();
- EXPECT_OBJ_PTR_EQ(dex_cache->GetResolvedType(type_idx), klass);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader.Get()),
klass);
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index c30272e..a44f79e 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -428,6 +428,8 @@
case Instruction::INVOKE_VIRTUAL_RANGE:
case Instruction::INVOKE_INTERFACE:
case Instruction::INVOKE_INTERFACE_RANGE:
+ case Instruction::INVOKE_POLYMORPHIC:
+ case Instruction::INVOKE_POLYMORPHIC_RANGE:
case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
// Without inlining, we could just check that the offset is the class offset.
@@ -551,6 +553,12 @@
case Instruction::INVOKE_INTERFACE_RANGE:
ThrowNullPointerExceptionForMethodAccess(instr->VRegB_3rc(), kInterface);
break;
+ case Instruction::INVOKE_POLYMORPHIC:
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_45cc(), kVirtual);
+ break;
+ case Instruction::INVOKE_POLYMORPHIC_RANGE:
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_4rcc(), kVirtual);
+ break;
case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
// Since we replaced the method index, we ask the verifier to tell us which
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 7b8974f..37f3ac9 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -358,7 +358,7 @@
}
break;
case k35c: {
- uint32_t arg[5];
+ uint32_t arg[kMaxVarArgRegs];
GetVarArgs(arg);
switch (Opcode()) {
case FILLED_NEW_ARRAY:
@@ -443,8 +443,50 @@
}
break;
}
+ case k45cc: {
+ uint32_t arg[kMaxVarArgRegs];
+ GetVarArgs(arg);
+ uint32_t method_idx = VRegB_45cc();
+ uint32_t proto_idx = VRegH_45cc();
+ os << opcode << " {";
+ for (int i = 0; i < VRegA_45cc(); ++i) {
+ if (i != 0) {
+ os << ", ";
+ }
+ os << "v" << arg[i];
+ }
+ os << "}";
+ if (file != nullptr) {
+ os << ", " << file->PrettyMethod(method_idx) << ", " << file->GetShorty(proto_idx)
+ << " // ";
+ } else {
+ os << ", ";
+ }
+ os << "method@" << method_idx << ", proto@" << proto_idx;
+ break;
+ }
+ case k4rcc:
+ switch (Opcode()) {
+ case INVOKE_POLYMORPHIC_RANGE: {
+ if (file != nullptr) {
+ uint32_t method_idx = VRegB_4rcc();
+ uint32_t proto_idx = VRegH_4rcc();
+ os << opcode << ", {v" << VRegC_4rcc() << " .. v" << (VRegC_4rcc() + VRegA_4rcc())
+ << "}, " << file->PrettyMethod(method_idx) << ", " << file->GetShorty(proto_idx)
+ << " // method@" << method_idx << ", proto@" << proto_idx;
+ break;
+ }
+ }
+ FALLTHROUGH_INTENDED;
+ default: {
+ uint32_t method_idx = VRegB_4rcc();
+ uint32_t proto_idx = VRegH_4rcc();
+ os << opcode << ", {v" << VRegC_4rcc() << " .. v" << (VRegC_4rcc() + VRegA_4rcc())
+ << "}, method@" << method_idx << ", proto@" << proto_idx;
+ }
+ }
+ break;
case k51l: os << StringPrintf("%s v%d, #%+" PRId64, opcode, VRegA_51l(), VRegB_51l()); break;
- default: os << " unknown format (" << DumpHex(5) << ")"; break;
}
return os.str();
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 14c9c21..7d6f866 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -52,21 +52,19 @@
// suspended while executing it.
ScopedAssertNoThreadSuspension sants(__FUNCTION__);
+ if (inline_info.EncodesArtMethodAtDepth(encoding, inlining_depth)) {
+ return inline_info.GetArtMethodAtDepth(encoding, inlining_depth);
+ }
+
uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, inlining_depth);
- InvokeType invoke_type = static_cast<InvokeType>(
- inline_info.GetInvokeTypeAtDepth(encoding, inlining_depth));
- ArtMethod* inlined_method = outer_method->GetDexCacheResolvedMethod(method_index,
- kRuntimePointerSize);
- if (!inlined_method->IsRuntimeMethod()) {
+ if (inline_info.GetDexPcAtDepth(encoding, inlining_depth) == static_cast<uint32_t>(-1)) {
+ // "charAt" special case. It is the only non-leaf method we inline across dex files.
+ ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
+ DCHECK_EQ(inlined_method->GetDexMethodIndex(), method_index);
return inlined_method;
}
- // The method in the dex cache is the runtime method responsible for invoking
- // the stub that will then update the dex cache. Therefore, we need to do the
- // resolution ourselves.
-
- // We first find the dex cache of our caller. If it is the outer method, we can directly
- // use its dex cache. Otherwise, we also need to resolve our caller.
+ // Find which method did the call in the inlining hierarchy.
ArtMethod* caller = outer_method;
if (inlining_depth != 0) {
caller = GetResolvedMethod(outer_method,
@@ -74,96 +72,56 @@
encoding,
inlining_depth - 1);
}
- DCHECK_EQ(caller->GetDexCache(), outer_method->GetDexCache())
- << "Compiler only supports inlining calls within the same dex cache";
- const DexFile* dex_file = outer_method->GetDexFile();
- const DexFile::MethodId& method_id = dex_file->GetMethodId(method_index);
- if (inline_info.GetDexPcAtDepth(encoding, inlining_depth) == static_cast<uint32_t>(-1)) {
- // "charAt" special case. It is the only non-leaf method we inline across dex files.
- if (kIsDebugBuild) {
- const char* name = dex_file->StringDataByIdx(method_id.name_idx_);
- DCHECK_EQ(std::string(name), "charAt");
- DCHECK_EQ(std::string(dex_file->GetMethodShorty(method_id)), "CI")
- << std::string(dex_file->GetMethodShorty(method_id));
- DCHECK_EQ(std::string(dex_file->StringByTypeIdx(method_id.class_idx_)), "Ljava/lang/String;")
- << std::string(dex_file->StringByTypeIdx(method_id.class_idx_));
- }
- mirror::Class* cls =
- Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangString);
- // Update the dex cache for future lookups.
- caller->GetDexCache()->SetResolvedType(method_id.class_idx_, cls);
- inlined_method = cls->FindVirtualMethod("charAt", "(I)C", kRuntimePointerSize);
- } else {
- mirror::Class* klass = caller->GetDexCache()->GetResolvedType(method_id.class_idx_);
- DCHECK_EQ(klass->GetDexCache(), caller->GetDexCache())
- << "Compiler only supports inlining calls within the same dex cache";
- switch (invoke_type) {
- case kDirect:
- case kStatic:
- inlined_method =
- klass->FindDirectMethod(klass->GetDexCache(), method_index, kRuntimePointerSize);
- break;
- case kSuper:
- case kVirtual:
- inlined_method =
- klass->FindVirtualMethod(klass->GetDexCache(), method_index, kRuntimePointerSize);
- break;
- default:
- LOG(FATAL) << "Unimplemented inlined invocation type: " << invoke_type;
- UNREACHABLE();
+ // Lookup the declaring class of the inlined method.
+ const DexFile* dex_file = caller->GetDexFile();
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(method_index);
+ const char* descriptor = dex_file->StringByTypeIdx(method_id.class_idx_);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Thread* self = Thread::Current();
+ mirror::ClassLoader* class_loader = caller->GetDeclaringClass()->GetClassLoader();
+ mirror::Class* klass = class_linker->LookupClass(self, descriptor, class_loader);
+ if (klass == nullptr) {
+ LOG(FATAL) << "Could not find an inlined method from an .oat file: "
+ << "the class " << descriptor << " was not found in the class loader of "
+ << caller->PrettyMethod() << ". "
+ << "This must be due to playing wrongly with class loaders";
+ }
+
+ // Lookup the method.
+ const char* method_name = dex_file->GetMethodName(method_id);
+ const Signature signature = dex_file->GetMethodSignature(method_id);
+
+ ArtMethod* inlined_method =
+ klass->FindDeclaredDirectMethod(method_name, signature, kRuntimePointerSize);
+ if (inlined_method == nullptr) {
+ inlined_method = klass->FindDeclaredVirtualMethod(method_name, signature, kRuntimePointerSize);
+ if (inlined_method == nullptr) {
+ LOG(FATAL) << "Could not find an inlined method from an .oat file: "
+ << "the class " << descriptor << " does not have "
+ << method_name << signature << " declared. "
+ << "This must be due to duplicate classes or playing wrongly with class loaders";
}
}
- // Update the dex cache for future lookups. Note that for static methods, this is safe
- // when the class is being initialized, as the entrypoint for the ArtMethod is at
- // this point still the resolution trampoline.
- outer_method->SetDexCacheResolvedMethod(method_index, inlined_method, kRuntimePointerSize);
return inlined_method;
}
-inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type) {
- return GetCalleeSaveMethodCaller(
- self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */);
-}
-
-template <const bool kAccessCheck>
-ALWAYS_INLINE
-inline mirror::Class* CheckObjectAlloc(dex::TypeIndex type_idx,
- ArtMethod* method,
- Thread* self,
- bool* slow_path) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- PointerSize pointer_size = class_linker->GetImagePointerSize();
- mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, pointer_size);
- if (UNLIKELY(klass == nullptr)) {
- klass = class_linker->ResolveType(type_idx, method);
+ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(mirror::Class* klass,
+ Thread* self,
+ bool* slow_path)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_) {
+ if (UNLIKELY(!klass->IsInstantiable())) {
+ self->ThrowNewException("Ljava/lang/InstantiationError;", klass->PrettyDescriptor().c_str());
*slow_path = true;
- if (klass == nullptr) {
- DCHECK(self->IsExceptionPending());
- return nullptr; // Failure
- } else {
- DCHECK(!self->IsExceptionPending());
- }
+ return nullptr; // Failure
}
- if (kAccessCheck) {
- if (UNLIKELY(!klass->IsInstantiable())) {
- self->ThrowNewException("Ljava/lang/InstantiationError;", klass->PrettyDescriptor().c_str());
- *slow_path = true;
- return nullptr; // Failure
- }
- if (UNLIKELY(klass->IsClassClass())) {
- ThrowIllegalAccessError(nullptr, "Class %s is inaccessible",
- klass->PrettyDescriptor().c_str());
- *slow_path = true;
- return nullptr; // Failure
- }
- mirror::Class* referrer = method->GetDeclaringClass();
- if (UNLIKELY(!referrer->CanAccess(klass))) {
- ThrowIllegalAccessErrorClass(referrer, klass);
- *slow_path = true;
- return nullptr; // Failure
- }
+ if (UNLIKELY(klass->IsClassClass())) {
+ ThrowIllegalAccessError(nullptr, "Class %s is inaccessible",
+ klass->PrettyDescriptor().c_str());
+ *slow_path = true;
+ return nullptr; // Failure
}
if (UNLIKELY(!klass->IsInitialized())) {
StackHandleScope<1> hs(self);
@@ -191,7 +149,9 @@
ALWAYS_INLINE
inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self,
- bool* slow_path) {
+ bool* slow_path)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_) {
if (UNLIKELY(!klass->IsInitialized())) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_class(hs.NewHandle(klass));
@@ -213,18 +173,15 @@
return klass;
}
-// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
-// cannot be resolved, throw an error. If it can, use it to create an instance.
-// When verification/compiler hasn't been able to verify access, optionally perform an access
-// check.
-template <bool kAccessCheck, bool kInstrumented>
+// Allocate an instance of klass. Throws InstantationError if klass is not instantiable,
+// or IllegalAccessError if klass is j.l.Class. Performs a clinit check too.
+template <bool kInstrumented>
ALWAYS_INLINE
-inline mirror::Object* AllocObjectFromCode(dex::TypeIndex type_idx,
- ArtMethod* method,
+inline mirror::Object* AllocObjectFromCode(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type) {
bool slow_path = false;
- mirror::Class* klass = CheckObjectAlloc<kAccessCheck>(type_idx, method, self, &slow_path);
+ klass = CheckObjectAlloc(klass, self, &slow_path);
if (UNLIKELY(slow_path)) {
if (klass == nullptr) {
return nullptr;
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 5390165..b17e1a8 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -261,11 +261,8 @@
return true;
}
-ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
- Runtime::CalleeSaveType type,
- bool do_caller_check)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+static inline std::pair<ArtMethod*, uintptr_t> DoGetCalleeSaveMethodOuterCallerAndPc(
+ ArtMethod** sp, Runtime::CalleeSaveType type) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
@@ -275,6 +272,13 @@
uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
(reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
ArtMethod* outer_method = *caller_sp;
+ return std::make_pair(outer_method, caller_pc);
+}
+
+static inline ArtMethod* DoGetCalleeSaveMethodCaller(ArtMethod* outer_method,
+ uintptr_t caller_pc,
+ bool do_caller_check)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* caller = outer_method;
if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
if (outer_method != nullptr) {
@@ -308,8 +312,33 @@
visitor.WalkStack();
caller = visitor.caller;
}
-
return caller;
}
+ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
+ Runtime::CalleeSaveType type,
+ bool do_caller_check)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+ auto outer_caller_and_pc = DoGetCalleeSaveMethodOuterCallerAndPc(sp, type);
+ ArtMethod* outer_method = outer_caller_and_pc.first;
+ uintptr_t caller_pc = outer_caller_and_pc.second;
+ ArtMethod* caller = DoGetCalleeSaveMethodCaller(outer_method, caller_pc, do_caller_check);
+ return caller;
+}
+
+CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self,
+ Runtime::CalleeSaveType type) {
+ CallerAndOuterMethod result;
+ ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ auto outer_caller_and_pc = DoGetCalleeSaveMethodOuterCallerAndPc(sp, type);
+ result.outer_method = outer_caller_and_pc.first;
+ uintptr_t caller_pc = outer_caller_and_pc.second;
+ result.caller =
+ DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check */ true);
+ return result;
+}
+
+
} // namespace art
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 7cc136e..d4cf83c 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -45,27 +45,10 @@
class ScopedObjectAccessAlreadyRunnable;
class Thread;
-template <const bool kAccessCheck>
-ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(dex::TypeIndex type_idx,
- ArtMethod* method,
- Thread* self,
- bool* slow_path)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Roles::uninterruptible_);
-
-ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
- Thread* self,
- bool* slow_path)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Roles::uninterruptible_);
-
// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
// cannot be resolved, throw an error. If it can, use it to create an instance.
-// When verification/compiler hasn't been able to verify access, optionally perform an access
-// check.
-template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(dex::TypeIndex type_idx,
- ArtMethod* method,
+template <bool kInstrumented>
+ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -218,7 +201,13 @@
bool do_caller_check = false)
REQUIRES_SHARED(Locks::mutator_lock_);
-ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type)
+struct CallerAndOuterMethod {
+ ArtMethod* caller;
+ ArtMethod* outer_method;
+};
+
+CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self,
+ Runtime::CalleeSaveType type)
REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 82bb8e5..2d06508 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -29,87 +29,58 @@
static constexpr bool kUseTlabFastPath = true;
+template <bool kInitialized,
+ bool kFinalize,
+ bool kInstrumented,
+ gc::AllocatorType allocator_type>
+static ALWAYS_INLINE inline mirror::Object* artAllocObjectFromCode(
+ mirror::Class* klass,
+ Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
+ DCHECK(klass != nullptr);
+ if (kUseTlabFastPath && !kInstrumented && allocator_type == gc::kAllocatorTypeTLAB) {
+ if (kInitialized || klass->IsInitialized()) {
+ if (!kFinalize || !klass->IsFinalizable()) {
+ size_t byte_count = klass->GetObjectSize();
+ byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment);
+ mirror::Object* obj;
+ if (LIKELY(byte_count < self->TlabSize())) {
+ obj = self->AllocTlab(byte_count);
+ DCHECK(obj != nullptr) << "AllocTlab can't fail";
+ obj->SetClass(klass);
+ if (kUseBakerReadBarrier) {
+ obj->AssertReadBarrierState();
+ }
+ QuasiAtomic::ThreadFenceForConstructor();
+ return obj;
+ }
+ }
+ }
+ }
+ if (kInitialized) {
+ return AllocObjectFromCodeInitialized<kInstrumented>(klass, self, allocator_type);
+ } else if (!kFinalize) {
+ return AllocObjectFromCodeResolved<kInstrumented>(klass, self, allocator_type);
+ } else {
+ return AllocObjectFromCode<kInstrumented>(klass, self, allocator_type);
+ }
+}
+
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
-extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
- uint32_t type_idx, ArtMethod* method, Thread* self) \
+extern "C" mirror::Object* artAllocObjectFromCodeWithChecks##suffix##suffix2( \
+ mirror::Class* klass, Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
- ScopedQuickEntrypointChecks sqec(self); \
- if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
- mirror::Class* klass = method->GetDexCacheResolvedType<false>(dex::TypeIndex(type_idx), \
- kRuntimePointerSize); \
- if (LIKELY(klass != nullptr && klass->IsInitialized() && !klass->IsFinalizable())) { \
- size_t byte_count = klass->GetObjectSize(); \
- byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
- mirror::Object* obj; \
- if (LIKELY(byte_count < self->TlabSize())) { \
- obj = self->AllocTlab(byte_count); \
- DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
- obj->SetClass(klass); \
- if (kUseBakerReadBarrier) { \
- obj->AssertReadBarrierState(); \
- } \
- QuasiAtomic::ThreadFenceForConstructor(); \
- return obj; \
- } \
- } \
- } \
- return AllocObjectFromCode<false, instrumented_bool>(dex::TypeIndex(type_idx), \
- method, \
- self, \
- allocator_type); \
+ return artAllocObjectFromCode<false, true, instrumented_bool, allocator_type>(klass, self); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
+ mirror::Class* klass, Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
- ScopedQuickEntrypointChecks sqec(self); \
- if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
- if (LIKELY(klass->IsInitialized())) { \
- size_t byte_count = klass->GetObjectSize(); \
- byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
- mirror::Object* obj; \
- if (LIKELY(byte_count < self->TlabSize())) { \
- obj = self->AllocTlab(byte_count); \
- DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
- obj->SetClass(klass); \
- if (kUseBakerReadBarrier) { \
- obj->AssertReadBarrierState(); \
- } \
- QuasiAtomic::ThreadFenceForConstructor(); \
- return obj; \
- } \
- } \
- } \
- return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \
+ return artAllocObjectFromCode<false, false, instrumented_bool, allocator_type>(klass, self); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
+ mirror::Class* klass, Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
- ScopedQuickEntrypointChecks sqec(self); \
- if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
- size_t byte_count = klass->GetObjectSize(); \
- byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
- mirror::Object* obj; \
- if (LIKELY(byte_count < self->TlabSize())) { \
- obj = self->AllocTlab(byte_count); \
- DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
- obj->SetClass(klass); \
- if (kUseBakerReadBarrier) { \
- obj->AssertReadBarrierState(); \
- } \
- QuasiAtomic::ThreadFenceForConstructor(); \
- return obj; \
- } \
- } \
- return AllocObjectFromCodeInitialized<instrumented_bool>(klass, self, allocator_type); \
-} \
-extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
- uint32_t type_idx, ArtMethod* method, Thread* self) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- ScopedQuickEntrypointChecks sqec(self); \
- return AllocObjectFromCode<true, instrumented_bool>(dex::TypeIndex(type_idx), \
- method, \
- self, \
- allocator_type); \
+ return artAllocObjectFromCode<true, false, instrumented_bool, allocator_type>(klass, self); \
} \
extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
@@ -220,10 +191,9 @@
extern "C" void* art_quick_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_array_resolved##suffix(mirror::Class* klass, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass); \
+extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass); \
+extern "C" void* art_quick_alloc_object_with_checks##suffix(mirror::Class* klass); \
extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_string_from_bytes##suffix(void*, int32_t, int32_t, int32_t); \
@@ -233,9 +203,9 @@
extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(mirror::Class* klass, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass); \
+extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass); \
+extern "C" void* art_quick_alloc_object_with_checks##suffix##_instrumented(mirror::Class* klass); \
extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_string_from_bytes##suffix##_instrumented(void*, int32_t, int32_t, int32_t); \
@@ -246,10 +216,9 @@
qpoints->pAllocArray = art_quick_alloc_array##suffix##_instrumented; \
qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix##_instrumented; \
qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix##_instrumented; \
- qpoints->pAllocObject = art_quick_alloc_object##suffix##_instrumented; \
qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix##_instrumented; \
qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix##_instrumented; \
- qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix##_instrumented; \
+ qpoints->pAllocObjectWithChecks = art_quick_alloc_object_with_checks##suffix##_instrumented; \
qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix##_instrumented; \
qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented; \
qpoints->pAllocStringFromBytes = art_quick_alloc_string_from_bytes##suffix##_instrumented; \
@@ -259,10 +228,9 @@
qpoints->pAllocArray = art_quick_alloc_array##suffix; \
qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix; \
qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix; \
- qpoints->pAllocObject = art_quick_alloc_object##suffix; \
qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix; \
qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix; \
- qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix; \
+ qpoints->pAllocObjectWithChecks = art_quick_alloc_object_with_checks##suffix; \
qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix; \
qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix; \
qpoints->pAllocStringFromBytes = art_quick_alloc_string_from_bytes##suffix; \
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index 64030f3..2d0932a 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -109,8 +109,13 @@
extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
+
extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+// Invoke polymorphic entrypoint. Return type is dynamic and may be void, a primitive value, or
+// reference return type.
+extern "C" void art_quick_invoke_polymorphic(uint32_t, void*);
+
// Thread entrypoints.
extern "C" void art_quick_test_suspend();
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 78dad94..8ce61c1 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -106,6 +106,7 @@
art_quick_invoke_super_trampoline_with_access_check;
qpoints->pInvokeVirtualTrampolineWithAccessCheck =
art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokePolymorphic = art_quick_invoke_polymorphic;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 5dad43e..5b1b287 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -31,22 +31,56 @@
namespace art {
+static inline void BssWriteBarrier(ArtMethod* outer_method) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // For AOT code, we need a write barrier for the class loader that holds
+ // the GC roots in the .bss.
+ const DexFile* dex_file = outer_method->GetDexFile();
+ if (dex_file != nullptr &&
+ dex_file->GetOatDexFile() != nullptr &&
+ !dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
+ mirror::ClassLoader* class_loader = outer_method->GetClassLoader();
+ if (class_loader != nullptr) {
+ DCHECK(!class_loader->GetClassTable()->InsertOatFile(dex_file->GetOatDexFile()->GetOatFile()))
+ << "Oat file with .bss GC roots was not registered in class table: "
+ << dex_file->GetOatDexFile()->GetOatFile()->GetLocation();
+ // Note that we emit the barrier before the compiled code stores the String or Class
+ // as a GC root. This is OK as there is no suspend point point in between.
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+ } else {
+ Runtime::Current()->GetClassLinker()->WriteBarrierForBootOatFileBssRoots(
+ dex_file->GetOatDexFile()->GetOatFile());
+ }
+ }
+}
+
extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
- return ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, true, false);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly);
+ ArtMethod* caller = caller_and_outer.caller;
+ mirror::Class* result =
+ ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, true, false);
+ if (LIKELY(result != nullptr)) {
+ BssWriteBarrier(caller_and_outer.outer_method);
+ }
+ return result;
}
extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
- return ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, false);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly);
+ ArtMethod* caller = caller_and_outer.caller;
+ mirror::Class* result =
+ ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, false);
+ if (LIKELY(result != nullptr)) {
+ BssWriteBarrier(caller_and_outer.outer_method);
+ }
+ return result;
}
extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
@@ -54,36 +88,28 @@
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
- return ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, true);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly);
+ ArtMethod* caller = caller_and_outer.caller;
+ mirror::Class* result =
+ ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, true);
+ if (LIKELY(result != nullptr)) {
+ BssWriteBarrier(caller_and_outer.outer_method);
+ }
+ return result;
}
extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(
self,
// TODO: Change art_quick_resolve_string on MIPS and MIPS64 to kSaveEverything.
(kRuntimeISA == kMips || kRuntimeISA == kMips64) ? Runtime::kSaveRefsOnly
: Runtime::kSaveEverything);
+ ArtMethod* caller = caller_and_outer.caller;
mirror::String* result = ResolveStringFromCode(caller, dex::StringIndex(string_idx));
if (LIKELY(result != nullptr)) {
- // For AOT code, we need a write barrier for the class loader that holds
- // the GC roots in the .bss.
- const DexFile* dex_file = caller->GetDexFile();
- if (dex_file != nullptr &&
- dex_file->GetOatDexFile() != nullptr &&
- !dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
- mirror::ClassLoader* class_loader = caller->GetDeclaringClass()->GetClassLoader();
- DCHECK(class_loader != nullptr); // We do not use .bss GC roots for boot image.
- DCHECK(
- !class_loader->GetClassTable()->InsertOatFile(dex_file->GetOatDexFile()->GetOatFile()))
- << "Oat file with .bss GC roots was not registered in class table: "
- << dex_file->GetOatDexFile()->GetOatFile()->GetLocation();
- // Note that we emit the barrier before the compiled code stores the string as GC root.
- // This is OK as there is no suspend point point in between.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
- }
+ BssWriteBarrier(caller_and_outer.outer_method);
}
return result;
}
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index a1c5082..4d5d6de 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -23,10 +23,9 @@
V(AllocArray, void*, uint32_t, int32_t, ArtMethod*) \
V(AllocArrayResolved, void*, mirror::Class*, int32_t, ArtMethod*) \
V(AllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*) \
- V(AllocObject, void*, uint32_t, ArtMethod*) \
- V(AllocObjectResolved, void*, mirror::Class*, ArtMethod*) \
- V(AllocObjectInitialized, void*, mirror::Class*, ArtMethod*) \
- V(AllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*) \
+ V(AllocObjectResolved, void*, mirror::Class*) \
+ V(AllocObjectInitialized, void*, mirror::Class*) \
+ V(AllocObjectWithChecks, void*, mirror::Class*) \
V(CheckAndAllocArray, void*, uint32_t, int32_t, ArtMethod*) \
V(CheckAndAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*) \
V(AllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t) \
@@ -134,6 +133,7 @@
V(InvokeStaticTrampolineWithAccessCheck, void, uint32_t, void*) \
V(InvokeSuperTrampolineWithAccessCheck, void, uint32_t, void*) \
V(InvokeVirtualTrampolineWithAccessCheck, void, uint32_t, void*) \
+ V(InvokePolymorphic, void, uint32_t, void*) \
\
V(TestSuspend, void, void) \
\
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index a3e5b55..eb76fb6 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -27,10 +27,12 @@
#include "imtable-inl.h"
#include "interpreter/interpreter.h"
#include "linear_alloc.h"
+#include "method_handles.h"
#include "method_reference.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/method.h"
+#include "mirror/method_handle_impl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "oat_quick_method_header.h"
@@ -39,6 +41,7 @@
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "debugger.h"
+#include "well_known_classes.h"
namespace art {
@@ -2391,4 +2394,121 @@
reinterpret_cast<uintptr_t>(method));
}
+// Returns shorty type so the caller can determine how to put |result|
+// into expected registers. The shorty type is static so the compiler
+// could call different flavors of this code path depending on the
+// shorty type though this would require different entry points for
+// each type.
+extern "C" uintptr_t artInvokePolymorphic(
+ JValue* result,
+ mirror::Object* raw_method_handle,
+ Thread* self,
+ ArtMethod** sp)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
+ DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs));
+
+ // Start new JNI local reference state
+ JNIEnvExt* env = self->GetJniEnv();
+ ScopedObjectAccessUnchecked soa(env);
+ ScopedJniEnvLocalRefState env_state(env);
+ const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
+
+ // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
+ ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
+ uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
+ const DexFile::CodeItem* code = caller_method->GetCodeItem();
+ const Instruction* inst = Instruction::At(&code->insns_[dex_pc]);
+ DCHECK(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC ||
+ inst->Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
+ const DexFile* dex_file = caller_method->GetDexFile();
+ const uint32_t proto_idx = inst->VRegH();
+ const char* shorty = dex_file->GetShorty(proto_idx);
+ const size_t shorty_length = strlen(shorty);
+ static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static.
+ RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa);
+ gc_visitor.VisitArguments();
+
+ // Wrap raw_method_handle in a Handle for safety.
+ StackHandleScope<5> hs(self);
+ Handle<mirror::MethodHandleImpl> method_handle(
+ hs.NewHandle(ObjPtr<mirror::MethodHandleImpl>::DownCast(MakeObjPtr(raw_method_handle))));
+ raw_method_handle = nullptr;
+ self->EndAssertNoThreadSuspension(old_cause);
+
+ // Resolve method - it's either MethodHandle.invoke() or MethodHandle.invokeExact().
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::kForceICCECheck>(self,
+ inst->VRegB(),
+ caller_method,
+ kVirtual);
+ DCHECK((resolved_method ==
+ jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact)) ||
+ (resolved_method ==
+ jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invoke)));
+ if (UNLIKELY(method_handle.IsNull())) {
+ ThrowNullPointerExceptionForMethodAccess(resolved_method, InvokeType::kVirtual);
+ return static_cast<uintptr_t>('V');
+ }
+
+ Handle<mirror::Class> caller_class(hs.NewHandle(caller_method->GetDeclaringClass()));
+ Handle<mirror::MethodType> method_type(hs.NewHandle(linker->ResolveMethodType(
+ *dex_file, proto_idx,
+ hs.NewHandle<mirror::DexCache>(caller_class->GetDexCache()),
+ hs.NewHandle<mirror::ClassLoader>(caller_class->GetClassLoader()))));
+ // This implies we couldn't resolve one or more types in this method handle.
+ if (UNLIKELY(method_type.IsNull())) {
+ CHECK(self->IsExceptionPending());
+ return static_cast<uintptr_t>('V');
+ }
+
+ DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst->VRegA());
+ DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic);
+
+ // Fix references before constructing the shadow frame.
+ gc_visitor.FixupReferences();
+
+ // Construct shadow frame placing arguments consecutively from |first_arg|.
+ const bool is_range = (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
+ const size_t num_vregs = is_range ? inst->VRegA_4rcc() : inst->VRegA_45cc();
+ const size_t first_arg = 0;
+ ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
+ CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc);
+ ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
+ ScopedStackedShadowFramePusher
+ frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
+ BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
+ kMethodIsStatic,
+ shorty,
+ strlen(shorty),
+ shadow_frame,
+ first_arg);
+ shadow_frame_builder.VisitArguments();
+
+ // Push a transition back into managed code onto the linked list in thread.
+ ManagedStack fragment;
+ self->PushManagedStackFragment(&fragment);
+
+ // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in
+ // consecutive order.
+ uint32_t unused_args[Instruction::kMaxVarArgRegs] = {};
+ uint32_t first_callee_arg = first_arg + 1;
+ const bool do_assignability_check = false;
+ if (!DoInvokePolymorphic<true /* is_range */, do_assignability_check>(self,
+ resolved_method,
+ *shadow_frame,
+ method_handle,
+ method_type,
+ unused_args,
+ first_callee_arg,
+ result)) {
+ DCHECK(self->IsExceptionPending());
+ }
+
+ // Pop transition record.
+ self->PopManagedStackFragment(fragment);
+
+ return static_cast<uintptr_t>(shorty[0]);
+}
+
} // namespace art
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 1283660..6301f93 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -117,15 +117,14 @@
sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, checkpoint_function, active_suspend_barriers,
sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, active_suspend_barriers, jni_entrypoints,
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, active_suspend_barriers, thread_local_start,
sizeof(Thread::tls_ptr_sized_values::active_suspend_barriers));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, jni_entrypoints, sizeof(size_t));
// Skip across the entrypoints structures.
-
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_start, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_objects, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, mterp_current_ibase, sizeof(size_t));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_alt_ibase, rosalloc_runs, sizeof(void*));
@@ -156,13 +155,13 @@
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArray, pAllocArrayResolved, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayResolved, pAllocArrayWithAccessCheck,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayWithAccessCheck, pAllocObject, sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObject, pAllocObjectResolved, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayWithAccessCheck, pAllocObjectResolved,
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectResolved, pAllocObjectInitialized,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectInitialized, pAllocObjectWithAccessCheck,
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectInitialized, pAllocObjectWithChecks,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectWithAccessCheck, pCheckAndAllocArray,
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectWithChecks, pCheckAndAllocArray,
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckAndAllocArray, pCheckAndAllocArrayWithAccessCheck,
sizeof(void*));
@@ -286,6 +285,8 @@
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeSuperTrampolineWithAccessCheck,
pInvokeVirtualTrampolineWithAccessCheck, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeVirtualTrampolineWithAccessCheck,
+ pInvokePolymorphic, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokePolymorphic,
pTestSuspend, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pTestSuspend, pDeliverException, sizeof(void*));
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index e1117e6..7b86339 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2406,16 +2406,29 @@
}
}
-bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
+bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+ bool do_atomic_update) {
mirror::Object* from_ref = field->AsMirrorPtr();
+ if (from_ref == nullptr) {
+ return true;
+ }
mirror::Object* to_ref = IsMarked(from_ref);
if (to_ref == nullptr) {
return false;
}
if (from_ref != to_ref) {
- QuasiAtomic::ThreadFenceRelease();
- field->Assign(to_ref);
- QuasiAtomic::ThreadFenceSequentiallyConsistent();
+ if (do_atomic_update) {
+ do {
+ if (field->AsMirrorPtr() != from_ref) {
+ // Concurrently overwritten by a mutator.
+ break;
+ }
+ } while (!field->CasWeakRelaxed(from_ref, to_ref));
+ } else {
+ QuasiAtomic::ThreadFenceRelease();
+ field->Assign(to_ref);
+ QuasiAtomic::ThreadFenceSequentiallyConsistent();
+ }
}
return true;
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 5b8a557..844bb45 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -183,7 +183,8 @@
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+ bool do_atomic_update) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 5b51399..0177e2a 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -187,7 +187,10 @@
// and will be used for reading system weaks while the GC is running.
virtual mirror::Object* IsMarked(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
+ // Returns true if the given heap reference is null or is already marked. If it's already marked,
+ // update the reference (uses a CAS if do_atomic_update is true. Otherwise, returns false.
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Used by reference processor.
virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index ddcb6c0..85e6783 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -472,9 +472,15 @@
return mark_bitmap_->Test(object) ? object : nullptr;
}
-bool MarkCompact::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr) {
+bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr,
+ // MarkCompact does the GC in a pause. No CAS needed.
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
// Side effect free since we call this before ever moving objects.
- return IsMarked(ref_ptr->AsMirrorPtr()) != nullptr;
+ mirror::Object* obj = ref_ptr->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
+ return IsMarked(obj) != nullptr;
}
void MarkCompact::SweepSystemWeaks() {
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 564f85b..6d52d5d 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -175,7 +175,8 @@
virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update) OVERRIDE
REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_,
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 06ed029..f00da73 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -390,8 +390,13 @@
}
}
-bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) {
- return IsMarked(ref->AsMirrorPtr());
+bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
+ mirror::Object* obj = ref->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
+ return IsMarked(obj);
}
class MarkSweep::MarkObjectSlowPath {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 02cf462..a6e2d61 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -188,7 +188,8 @@
void VerifyIsLive(const mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index f2aa5a7..cb9e7e2 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -765,8 +765,13 @@
return mark_bitmap_->Test(obj) ? obj : nullptr;
}
-bool SemiSpace::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) {
+bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+ // SemiSpace does the GC in a pause. No CAS needed.
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
mirror::Object* obj = object->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
mirror::Object* new_obj = IsMarked(obj);
if (new_obj == nullptr) {
return false;
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 4cebcc3..52b5e5f 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -166,7 +166,8 @@
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+ bool do_atomic_update) OVERRIDE
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 081be96..c154836 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -203,7 +203,9 @@
DCHECK(klass != nullptr);
DCHECK(klass->IsTypeOfReferenceClass());
mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
- if (referent->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent)) {
+ // do_atomic_update needs to be true because this happens outside of the reference processing
+ // phase.
+ if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
Thread* self = Thread::Current();
// TODO: Remove these locks, and use atomic stacks for storing references?
// We need to check that the references haven't already been enqueued since we can end up
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index a0eb197..734caea 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -129,8 +129,9 @@
while (!IsEmpty()) {
ObjPtr<mirror::Reference> ref = DequeuePendingReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr &&
- !collector->IsMarkedHeapReference(referent_addr)) {
+ // do_atomic_update is false because this happens during the reference processing phase where
+ // Reference.clear() would block.
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
// Referent is white, clear it.
if (Runtime::Current()->IsActiveTransaction()) {
ref->ClearReferent<true>();
@@ -147,8 +148,9 @@
while (!IsEmpty()) {
ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr &&
- !collector->IsMarkedHeapReference(referent_addr)) {
+ // do_atomic_update is false because this happens during the reference processing phase where
+ // Reference.clear() would block.
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index b0d7fb2..d7dfcd4 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -508,9 +508,8 @@
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
} else {
- obj = AllocObjectFromCode<do_access_check, true>(
- dex::TypeIndex(inst->VRegB_21c()),
- shadow_frame.GetMethod(),
+ obj = AllocObjectFromCode<true>(
+ c.Ptr(),
self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index c8c1563..369c261 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -375,10 +375,9 @@
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
} else {
- obj = AllocObjectFromCode<false, true>(dex::TypeIndex(inst->VRegB_21c()),
- shadow_frame->GetMethod(),
- self,
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ obj = AllocObjectFromCode<true>(c,
+ self,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
}
if (UNLIKELY(obj == nullptr)) {
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index f80c43d..e0f28ad 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -566,7 +566,10 @@
return nullptr;
}
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
- while (UNLIKELY(!MayAccessWeakGlobals(self))) {
+ // CMS needs this to block for concurrent reference processing because an object allocated during
+ // the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
+ // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
+ while (!kUseReadBarrier && UNLIKELY(!MayAccessWeakGlobals(self))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpoint();
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 916f1cf..8f978e1 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -35,7 +35,6 @@
protected:
virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
CommonRuntimeTest::SetUpRuntimeOptions(options);
- options->push_back(std::make_pair("-Xexperimental:method-handles", nullptr));
}
};
diff --git a/runtime/mirror/emulated_stack_frame.cc b/runtime/mirror/emulated_stack_frame.cc
index d607040..978cc32 100644
--- a/runtime/mirror/emulated_stack_frame.cc
+++ b/runtime/mirror/emulated_stack_frame.cc
@@ -195,6 +195,7 @@
// Step 5: Construct the EmulatedStackFrame object.
Handle<EmulatedStackFrame> sf(hs.NewHandle(
ObjPtr<EmulatedStackFrame>::DownCast(StaticClass()->AllocObject(self))));
+ sf->SetFieldObject<false>(CallsiteTypeOffset(), caller_type.Get());
sf->SetFieldObject<false>(TypeOffset(), callee_type.Get());
sf->SetFieldObject<false>(ReferencesOffset(), references.Get());
sf->SetFieldObject<false>(StackFrameOffset(), stack_frame.Get());
diff --git a/runtime/mirror/emulated_stack_frame.h b/runtime/mirror/emulated_stack_frame.h
index d83a536..ddd84a1 100644
--- a/runtime/mirror/emulated_stack_frame.h
+++ b/runtime/mirror/emulated_stack_frame.h
@@ -81,6 +81,10 @@
OFFSET_OF_OBJECT_MEMBER(EmulatedStackFrame, stack_frame_));
}
+ static MemberOffset CallsiteTypeOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(EmulatedStackFrame, callsite_type_));
+ }
+
static MemberOffset TypeOffset() {
return MemberOffset(OFFSETOF_MEMBER(EmulatedStackFrame, type_));
}
@@ -93,6 +97,7 @@
return MemberOffset(OFFSETOF_MEMBER(EmulatedStackFrame, stack_frame_));
}
+ HeapReference<mirror::MethodType> callsite_type_;
HeapReference<mirror::ObjectArray<mirror::Object>> references_;
HeapReference<mirror::ByteArray> stack_frame_;
HeapReference<mirror::MethodType> type_;
diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h
index e70b936..22fb83c 100644
--- a/runtime/mirror/object_reference-inl.h
+++ b/runtime/mirror/object_reference-inl.h
@@ -34,6 +34,15 @@
return HeapReference<MirrorType>(ptr.Ptr());
}
+template<class MirrorType>
+bool HeapReference<MirrorType>::CasWeakRelaxed(MirrorType* expected_ptr, MirrorType* new_ptr) {
+ HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_ptr));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_ptr));
+ Atomic<uint32_t>* atomic_reference = reinterpret_cast<Atomic<uint32_t>*>(&this->reference_);
+ return atomic_reference->CompareExchangeWeakRelaxed(expected_ref.reference_,
+ new_ref.reference_);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 71f34c6..a96a120 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -94,6 +94,9 @@
static HeapReference<MirrorType> FromObjPtr(ObjPtr<MirrorType> ptr)
REQUIRES_SHARED(Locks::mutator_lock_);
+ bool CasWeakRelaxed(MirrorType* old_ptr, MirrorType* new_ptr)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
explicit HeapReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 893abd5..9c09275 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1361,8 +1361,10 @@
void MonitorList::Add(Monitor* m) {
Thread* self = Thread::Current();
MutexLock mu(self, monitor_list_lock_);
- while (UNLIKELY((!kUseReadBarrier && !allow_new_monitors_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ // CMS needs this to block for concurrent reference processing because an object allocated during
+ // the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
+ // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
+ while (!kUseReadBarrier && UNLIKELY(!allow_new_monitors_)) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpoint();
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 36825cb..268d71a 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -17,6 +17,7 @@
#include "dalvik_system_VMStack.h"
#include "art_method-inl.h"
+#include "gc/task_processor.h"
#include "jni_internal.h"
#include "nth_caller_visitor.h"
#include "mirror/class-inl.h"
@@ -31,9 +32,18 @@
static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject peer)
REQUIRES_SHARED(Locks::mutator_lock_) {
jobject trace = nullptr;
- if (soa.Decode<mirror::Object>(peer) == soa.Self()->GetPeer()) {
+ ObjPtr<mirror::Object> decoded_peer = soa.Decode<mirror::Object>(peer);
+ if (decoded_peer == soa.Self()->GetPeer()) {
trace = soa.Self()->CreateInternalStackTrace<false>(soa);
} else {
+ // Never allow suspending the heap task thread since it may deadlock if allocations are
+ // required for the stack trace.
+ Thread* heap_task_thread =
+ Runtime::Current()->GetHeap()->GetTaskProcessor()->GetRunningThread();
+ // heap_task_thread could be null if the daemons aren't yet started.
+ if (heap_task_thread != nullptr && decoded_peer == heap_task_thread->GetPeer()) {
+ return nullptr;
+ }
// Suspend thread to build stack trace.
ScopedThreadSuspension sts(soa.Self(), kNative);
ThreadList* thread_list = Runtime::Current()->GetThreadList();
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 3341f53..5438a6d 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -428,6 +428,10 @@
}
auto ret = hs.NewHandle(mirror::ObjectArray<mirror::Method>::Alloc(
soa.Self(), mirror::Method::ArrayClass(), num_methods));
+ if (ret.Get() == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
num_methods = 0;
for (auto& m : klass->GetDeclaredMethods(kRuntimePointerSize)) {
auto modifiers = m.GetAccessFlags();
diff --git a/runtime/oat.h b/runtime/oat.h
index 1fd906d..ab03252 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '9', '4', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '9', '8', '\0' }; // art::Thread fields reorder
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 38df427..d47f1b5 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -323,8 +323,10 @@
}
PointerSize pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
- uint8_t* dex_cache_arrays = bss_begin_;
- uint8_t* dex_cache_arrays_end = (bss_roots_ != nullptr) ? bss_roots_ : bss_end_;
+ uint8_t* dex_cache_arrays = (bss_begin_ == bss_roots_) ? nullptr : bss_begin_;
+ uint8_t* dex_cache_arrays_end =
+ (bss_begin_ == bss_roots_) ? nullptr : (bss_roots_ != nullptr) ? bss_roots_ : bss_end_;
+ DCHECK_EQ(dex_cache_arrays != nullptr, dex_cache_arrays_end != nullptr);
uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
oat_dex_files_storage_.reserve(dex_file_count);
for (size_t i = 0; i < dex_file_count; i++) {
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index b757b21..42fed50 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -29,6 +29,8 @@
"ti_properties.cc",
"ti_stack.cc",
"ti_redefine.cc",
+ "ti_thread.cc",
+ "ti_timers.cc",
"transform.cc"],
include_dirs: ["art/runtime"],
shared_libs: [
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index c52dd76..4aedec9 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -30,6 +30,7 @@
*/
#include <string>
+#include <type_traits>
#include <vector>
#include <jni.h>
@@ -37,6 +38,7 @@
#include "openjdkjvmti/jvmti.h"
#include "art_jvmti.h"
+#include "base/logging.h"
#include "base/mutex.h"
#include "events-inl.h"
#include "jni_env_ext-inl.h"
@@ -55,6 +57,8 @@
#include "ti_properties.h"
#include "ti_redefine.h"
#include "ti_stack.h"
+#include "ti_thread.h"
+#include "ti_timers.h"
#include "transform.h"
// TODO Remove this at some point by annotating all the methods. It was put in to make the skeleton
@@ -117,15 +121,15 @@
}
static jvmtiError GetThreadState(jvmtiEnv* env, jthread thread, jint* thread_state_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::GetThreadState(env, thread, thread_state_ptr);
}
static jvmtiError GetCurrentThread(jvmtiEnv* env, jthread* thread_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::GetCurrentThread(env, thread_ptr);
}
static jvmtiError GetAllThreads(jvmtiEnv* env, jint* threads_count_ptr, jthread** threads_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::GetAllThreads(env, threads_count_ptr, threads_ptr);
}
static jvmtiError SuspendThread(jvmtiEnv* env, jthread thread) {
@@ -159,7 +163,7 @@
}
static jvmtiError GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::GetThreadInfo(env, thread, info_ptr);
}
static jvmtiError GetOwnedMonitorInfo(jvmtiEnv* env,
@@ -237,7 +241,7 @@
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr,
jint* thread_count_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::GetAllStackTraces(env, max_frame_count, stack_info_ptr, thread_count_ptr);
}
static jvmtiError GetThreadListStackTraces(jvmtiEnv* env,
@@ -245,11 +249,15 @@
const jthread* thread_list,
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::GetThreadListStackTraces(env,
+ thread_count,
+ thread_list,
+ max_frame_count,
+ stack_info_ptr);
}
static jvmtiError GetFrameCount(jvmtiEnv* env, jthread thread, jint* count_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::GetFrameCount(env, thread, count_ptr);
}
static jvmtiError PopFrame(jvmtiEnv* env, jthread thread) {
@@ -261,7 +269,7 @@
jint depth,
jmethodID* method_ptr,
jlocation* location_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::GetFrameLocation(env, thread, depth, method_ptr, location_ptr);
}
static jvmtiError NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
@@ -530,7 +538,7 @@
jobject initiating_loader,
jint* class_count_ptr,
jclass** classes_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ClassUtil::GetClassLoaderClasses(env, initiating_loader, class_count_ptr, classes_ptr);
}
static jvmtiError GetClassSignature(jvmtiEnv* env,
@@ -621,7 +629,17 @@
static jvmtiError RedefineClasses(jvmtiEnv* env,
jint class_count,
const jvmtiClassDefinition* class_definitions) {
- return ERR(NOT_IMPLEMENTED);
+ std::string error_msg;
+ jvmtiError res = Redefiner::RedefineClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
+ art::Runtime::Current(),
+ art::Thread::Current(),
+ class_count,
+ class_definitions,
+ &error_msg);
+ if (res != OK) {
+ LOG(WARNING) << "FAILURE TO REDEFINE " << error_msg;
+ }
+ return res;
}
static jvmtiError GetObjectSize(jvmtiEnv* env, jobject object, jlong* size_ptr) {
@@ -1016,15 +1034,15 @@
}
static jvmtiError GetTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return TimerUtil::GetTimerInfo(env, info_ptr);
}
static jvmtiError GetTime(jvmtiEnv* env, jlong* nanos_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return TimerUtil::GetTime(env, nanos_ptr);
}
static jvmtiError GetAvailableProcessors(jvmtiEnv* env, jint* processor_count_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return TimerUtil::GetAvailableProcessors(env, processor_count_ptr);
}
static jvmtiError AddToBootstrapClassLoaderSearch(jvmtiEnv* env, const char* segment) {
@@ -1153,7 +1171,57 @@
}
static jvmtiError SetVerboseFlag(jvmtiEnv* env, jvmtiVerboseFlag flag, jboolean value) {
- return ERR(NOT_IMPLEMENTED);
+ if (flag == jvmtiVerboseFlag::JVMTI_VERBOSE_OTHER) {
+ // OTHER is special, as it's 0, so can't do a bit check.
+ bool val = (value == JNI_TRUE) ? true : false;
+
+ art::gLogVerbosity.collector = val;
+ art::gLogVerbosity.compiler = val;
+ art::gLogVerbosity.deopt = val;
+ art::gLogVerbosity.heap = val;
+ art::gLogVerbosity.jdwp = val;
+ art::gLogVerbosity.jit = val;
+ art::gLogVerbosity.monitor = val;
+ art::gLogVerbosity.oat = val;
+ art::gLogVerbosity.profiler = val;
+ art::gLogVerbosity.signals = val;
+ art::gLogVerbosity.simulator = val;
+ art::gLogVerbosity.startup = val;
+ art::gLogVerbosity.third_party_jni = val;
+ art::gLogVerbosity.threads = val;
+ art::gLogVerbosity.verifier = val;
+ art::gLogVerbosity.image = val;
+
+ // Note: can't switch systrace_lock_logging. That requires changing entrypoints.
+
+ art::gLogVerbosity.agents = val;
+ } else {
+ // Spec isn't clear whether "flag" is a mask or supposed to be single. We implement the mask
+ // semantics.
+ constexpr std::underlying_type<jvmtiVerboseFlag>::type kMask =
+ jvmtiVerboseFlag::JVMTI_VERBOSE_GC |
+ jvmtiVerboseFlag::JVMTI_VERBOSE_CLASS |
+ jvmtiVerboseFlag::JVMTI_VERBOSE_JNI;
+ if ((flag & ~kMask) != 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+
+ bool val = (value == JNI_TRUE) ? true : false;
+
+ if ((flag & jvmtiVerboseFlag::JVMTI_VERBOSE_GC) != 0) {
+ art::gLogVerbosity.gc = val;
+ }
+
+ if ((flag & jvmtiVerboseFlag::JVMTI_VERBOSE_CLASS) != 0) {
+ art::gLogVerbosity.class_linker = val;
+ }
+
+ if ((flag & jvmtiVerboseFlag::JVMTI_VERBOSE_JNI) != 0) {
+ art::gLogVerbosity.jni = val;
+ }
+ }
+
+ return ERR(NONE);
}
static jvmtiError GetJLocationFormat(jvmtiEnv* env, jvmtiJlocationFormat* format_ptr) {
@@ -1169,34 +1237,6 @@
return RetransformClassesWithHook(reinterpret_cast<ArtJvmTiEnv*>(env), classes, hook);
}
- static jvmtiError RedefineClassDirect(ArtJvmTiEnv* env,
- jclass klass,
- jint dex_size,
- unsigned char* dex_file) {
- if (!IsValidEnv(env)) {
- return ERR(INVALID_ENVIRONMENT);
- }
- jvmtiError ret = OK;
- std::string location;
- if ((ret = GetClassLocation(env, klass, &location)) != OK) {
- // TODO Do something more here? Maybe give log statements?
- return ret;
- }
- std::string error;
- ret = Redefiner::RedefineClass(env,
- art::Runtime::Current(),
- art::Thread::Current(),
- klass,
- location,
- dex_size,
- reinterpret_cast<uint8_t*>(dex_file),
- &error);
- if (ret != OK) {
- LOG(WARNING) << "FAILURE TO REDEFINE " << error;
- }
- return ret;
- }
-
// TODO This will be called by the event handler for the art::ti Event Load Event
static jvmtiError RetransformClassesWithHook(ArtJvmTiEnv* env,
const std::vector<jclass>& classes,
@@ -1241,14 +1281,13 @@
/*out*/&new_dex_data);
// Check if anything actually changed.
if ((new_data_len != 0 || new_dex_data != nullptr) && new_dex_data != dex_data) {
- res = Redefiner::RedefineClass(env,
- art::Runtime::Current(),
- art::Thread::Current(),
- klass,
- location,
- new_data_len,
- new_dex_data,
- &error);
+ jvmtiClassDefinition def = { klass, new_data_len, new_dex_data };
+ res = Redefiner::RedefineClasses(env,
+ art::Runtime::Current(),
+ art::Thread::Current(),
+ 1,
+ &def,
+ &error);
env->Deallocate(new_dex_data);
}
// Deallocate the old dex data.
@@ -1307,10 +1346,7 @@
reinterpret_cast<void*>(JvmtiFunctions::RetransformClassWithHook),
// nullptr, // reserved1
JvmtiFunctions::SetEventNotificationMode,
- // SPECIAL FUNCTION: RedefineClassDirect Is normally reserved3
- // TODO Remove once we have events working.
- reinterpret_cast<void*>(JvmtiFunctions::RedefineClassDirect),
- // nullptr, // reserved3
+ nullptr, // reserved3
JvmtiFunctions::GetAllThreads,
JvmtiFunctions::SuspendThread,
JvmtiFunctions::ResumeThread,
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index 0d1704c..d1324bc 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -32,7 +32,10 @@
#include "ti_class.h"
#include "art_jvmti.h"
+#include "class_table-inl.h"
+#include "class_linker.h"
#include "jni_internal.h"
+#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
@@ -328,4 +331,90 @@
return ERR(NONE);
}
+jvmtiError ClassUtil::GetClassLoaderClasses(jvmtiEnv* env,
+ jobject initiating_loader,
+ jint* class_count_ptr,
+ jclass** classes_ptr) {
+ UNUSED(env, initiating_loader, class_count_ptr, classes_ptr);
+
+ if (class_count_ptr == nullptr || classes_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+ art::Thread* self = art::Thread::Current();
+ if (!self->GetJniEnv()->IsInstanceOf(initiating_loader,
+ art::WellKnownClasses::java_lang_ClassLoader)) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ if (self->GetJniEnv()->IsInstanceOf(initiating_loader,
+ art::WellKnownClasses::java_lang_BootClassLoader)) {
+ // Need to use null for the BootClassLoader.
+ initiating_loader = nullptr;
+ }
+
+ art::ScopedObjectAccess soa(self);
+ art::ObjPtr<art::mirror::ClassLoader> class_loader =
+ soa.Decode<art::mirror::ClassLoader>(initiating_loader);
+
+ art::ClassLinker* class_linker = art::Runtime::Current()->GetClassLinker();
+
+ art::ReaderMutexLock mu(self, *art::Locks::classlinker_classes_lock_);
+
+ art::ClassTable* class_table = class_linker->ClassTableForClassLoader(class_loader);
+ if (class_table == nullptr) {
+ // Nothing loaded.
+ *class_count_ptr = 0;
+ *classes_ptr = nullptr;
+ return ERR(NONE);
+ }
+
+ struct ClassTableCount {
+ bool operator()(art::ObjPtr<art::mirror::Class> klass) {
+ DCHECK(klass != nullptr);
+ ++count;
+ return true;
+ }
+
+ size_t count = 0;
+ };
+ ClassTableCount ctc;
+ class_table->Visit(ctc);
+
+ if (ctc.count == 0) {
+ // Nothing loaded.
+ *class_count_ptr = 0;
+ *classes_ptr = nullptr;
+ return ERR(NONE);
+ }
+
+ unsigned char* data;
+ jvmtiError data_result = env->Allocate(ctc.count * sizeof(jclass), &data);
+ if (data_result != ERR(NONE)) {
+ return data_result;
+ }
+ jclass* class_array = reinterpret_cast<jclass*>(data);
+
+ struct ClassTableFill {
+ bool operator()(art::ObjPtr<art::mirror::Class> klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK(klass != nullptr);
+ DCHECK_LT(count, ctc_ref.count);
+ local_class_array[count++] = soa_ptr->AddLocalReference<jclass>(klass);
+ return true;
+ }
+
+ jclass* local_class_array;
+ const ClassTableCount& ctc_ref;
+ art::ScopedObjectAccess* soa_ptr;
+ size_t count;
+ };
+ ClassTableFill ctf = { class_array, ctc, &soa, 0 };
+ class_table->Visit(ctf);
+ DCHECK_EQ(ctc.count, ctf.count);
+
+ *class_count_ptr = ctc.count;
+ *classes_ptr = class_array;
+
+ return ERR(NONE);
+}
+
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_class.h b/runtime/openjdkjvmti/ti_class.h
index 577fc8e..7a0fafb 100644
--- a/runtime/openjdkjvmti/ti_class.h
+++ b/runtime/openjdkjvmti/ti_class.h
@@ -65,6 +65,11 @@
static jvmtiError GetClassLoader(jvmtiEnv* env, jclass klass, jobject* classloader_ptr);
+ static jvmtiError GetClassLoaderClasses(jvmtiEnv* env,
+ jobject initiating_loader,
+ jint* class_count_ptr,
+ jclass** classes_ptr);
+
static jvmtiError IsInterface(jvmtiEnv* env, jclass klass, jboolean* is_interface_ptr);
static jvmtiError IsArrayClass(jvmtiEnv* env, jclass klass, jboolean* is_array_class_ptr);
};
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index 5bf8445..14477a1 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -53,6 +53,7 @@
#include "object_lock.h"
#include "runtime.h"
#include "ScopedLocalRef.h"
+#include "transform.h"
namespace openjdkjvmti {
@@ -207,7 +208,7 @@
// Moves dex data to an anonymous, read-only mmap'd region.
std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location,
jint data_len,
- unsigned char* dex_data,
+ const unsigned char* dex_data,
std::string* error_msg) {
std::unique_ptr<art::MemMap> map(art::MemMap::MapAnonymous(
StringPrintf("%s-transformed", original_location.c_str()).c_str(),
@@ -227,34 +228,87 @@
return map;
}
+Redefiner::ClassRedefinition::ClassRedefinition(Redefiner* driver,
+ jclass klass,
+ const art::DexFile* redefined_dex_file,
+ const char* class_sig) :
+ driver_(driver), klass_(klass), dex_file_(redefined_dex_file), class_sig_(class_sig) {
+ GetMirrorClass()->MonitorEnter(driver_->self_);
+}
+
+Redefiner::ClassRedefinition::~ClassRedefinition() {
+ if (driver_ != nullptr) {
+ GetMirrorClass()->MonitorExit(driver_->self_);
+ }
+}
+
// TODO This should handle doing multiple classes at once so we need to do less cleanup when things
// go wrong.
-jvmtiError Redefiner::RedefineClass(ArtJvmTiEnv* env,
- art::Runtime* runtime,
- art::Thread* self,
- jclass klass,
- const std::string& original_dex_location,
- jint data_len,
- unsigned char* dex_data,
- std::string* error_msg) {
+jvmtiError Redefiner::RedefineClasses(ArtJvmTiEnv* env,
+ art::Runtime* runtime,
+ art::Thread* self,
+ jint class_count,
+ const jvmtiClassDefinition* definitions,
+ std::string* error_msg) {
+ if (env == nullptr) {
+ *error_msg = "env was null!";
+ return ERR(INVALID_ENVIRONMENT);
+ } else if (class_count < 0) {
+ *error_msg = "class_count was less then 0";
+ return ERR(ILLEGAL_ARGUMENT);
+ } else if (class_count == 0) {
+ // We don't actually need to do anything. Just return OK.
+ return OK;
+ } else if (definitions == nullptr) {
+ *error_msg = "null definitions!";
+ return ERR(NULL_POINTER);
+ }
+ // Stop JIT for the duration of this redefine since the JIT might concurrently compile a method we
+ // are going to redefine.
+ art::jit::ScopedJitSuspend suspend_jit;
+ // Get shared mutator lock so we can lock all the classes.
+ art::ScopedObjectAccess soa(self);
+ std::vector<Redefiner::ClassRedefinition> redefinitions;
+ redefinitions.reserve(class_count);
+ Redefiner r(runtime, self, error_msg);
+ for (jint i = 0; i < class_count; i++) {
+ jvmtiError res = r.AddRedefinition(env, definitions[i]);
+ if (res != OK) {
+ return res;
+ }
+ }
+ return r.Run();
+}
+
+jvmtiError Redefiner::AddRedefinition(ArtJvmTiEnv* env, const jvmtiClassDefinition& def) {
+ std::string original_dex_location;
+ jvmtiError ret = OK;
+ if ((ret = GetClassLocation(env, def.klass, &original_dex_location))) {
+ *error_msg_ = "Unable to get original dex file location!";
+ return ret;
+ }
std::unique_ptr<art::MemMap> map(MoveDataToMemMap(original_dex_location,
- data_len,
- dex_data,
- error_msg));
+ def.class_byte_count,
+ def.class_bytes,
+ error_msg_));
std::ostringstream os;
char* generic_ptr_unused = nullptr;
char* signature_ptr = nullptr;
- if (env->GetClassSignature(klass, &signature_ptr, &generic_ptr_unused) != OK) {
- signature_ptr = const_cast<char*>("<UNKNOWN CLASS>");
+ if (env->GetClassSignature(def.klass, &signature_ptr, &generic_ptr_unused) != OK) {
+ *error_msg_ = "A jclass passed in does not seem to be valid";
+ return ERR(INVALID_CLASS);
}
+ // These will make sure we deallocate the signature.
+ JvmtiUniquePtr sig_unique_ptr(MakeJvmtiUniquePtr(env, signature_ptr));
+ JvmtiUniquePtr generic_unique_ptr(MakeJvmtiUniquePtr(env, generic_ptr_unused));
if (map.get() == nullptr) {
os << "Failed to create anonymous mmap for modified dex file of class " << signature_ptr
- << "in dex file " << original_dex_location << " because: " << *error_msg;
- *error_msg = os.str();
+ << "in dex file " << original_dex_location << " because: " << *error_msg_;
+ *error_msg_ = os.str();
return ERR(OUT_OF_MEMORY);
}
if (map->Size() < sizeof(art::DexFile::Header)) {
- *error_msg = "Could not read dex file header because dex_data was too short";
+ *error_msg_ = "Could not read dex file header because dex_data was too short";
return ERR(INVALID_CLASS_FORMAT);
}
uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
@@ -263,28 +317,21 @@
std::move(map),
/*verify*/true,
/*verify_checksum*/true,
- error_msg));
+ error_msg_));
if (dex_file.get() == nullptr) {
- os << "Unable to load modified dex file for " << signature_ptr << ": " << *error_msg;
- *error_msg = os.str();
+ os << "Unable to load modified dex file for " << signature_ptr << ": " << *error_msg_;
+ *error_msg_ = os.str();
return ERR(INVALID_CLASS_FORMAT);
}
- // Stop JIT for the duration of this redefine since the JIT might concurrently compile a method we
- // are going to redefine.
- art::jit::ScopedJitSuspend suspend_jit;
- // Get shared mutator lock.
- art::ScopedObjectAccess soa(self);
- art::StackHandleScope<1> hs(self);
- Redefiner r(runtime, self, klass, signature_ptr, dex_file, error_msg);
- // Lock around this class to avoid races.
- art::ObjectLock<art::mirror::Class> lock(self, hs.NewHandle(r.GetMirrorClass()));
- return r.Run();
+ redefinitions_.push_back(
+ Redefiner::ClassRedefinition(this, def.klass, dex_file.release(), signature_ptr));
+ return OK;
}
// TODO *MAJOR* This should return the actual source java.lang.DexFile object for the klass.
// TODO Make mirror of DexFile and associated types to make this less hellish.
// TODO Make mirror of BaseDexClassLoader and associated types to make this less hellish.
-art::mirror::Object* Redefiner::FindSourceDexFileObject(
+art::mirror::Object* Redefiner::ClassRedefinition::FindSourceDexFileObject(
art::Handle<art::mirror::ClassLoader> loader) {
const char* dex_path_list_element_array_name = "[Ldalvik/system/DexPathList$Element;";
const char* dex_path_list_element_name = "Ldalvik/system/DexPathList$Element;";
@@ -292,14 +339,14 @@
const char* dex_path_list_name = "Ldalvik/system/DexPathList;";
const char* dex_class_loader_name = "Ldalvik/system/BaseDexClassLoader;";
- CHECK(!self_->IsExceptionPending());
- art::StackHandleScope<11> hs(self_);
- art::ClassLinker* class_linker = runtime_->GetClassLinker();
+ CHECK(!driver_->self_->IsExceptionPending());
+ art::StackHandleScope<11> hs(driver_->self_);
+ art::ClassLinker* class_linker = driver_->runtime_->GetClassLinker();
art::Handle<art::mirror::ClassLoader> null_loader(hs.NewHandle<art::mirror::ClassLoader>(
nullptr));
art::Handle<art::mirror::Class> base_dex_loader_class(hs.NewHandle(class_linker->FindClass(
- self_, dex_class_loader_name, null_loader)));
+ driver_->self_, dex_class_loader_name, null_loader)));
// Get all the ArtFields so we can look in the BaseDexClassLoader
art::ArtField* path_list_field = base_dex_loader_class->FindDeclaredInstanceField(
@@ -307,12 +354,12 @@
CHECK(path_list_field != nullptr);
art::ArtField* dex_path_list_element_field =
- class_linker->FindClass(self_, dex_path_list_name, null_loader)
+ class_linker->FindClass(driver_->self_, dex_path_list_name, null_loader)
->FindDeclaredInstanceField("dexElements", dex_path_list_element_array_name);
CHECK(dex_path_list_element_field != nullptr);
art::ArtField* element_dex_file_field =
- class_linker->FindClass(self_, dex_path_list_element_name, null_loader)
+ class_linker->FindClass(driver_->self_, dex_path_list_element_name, null_loader)
->FindDeclaredInstanceField("dexFile", dex_file_name);
CHECK(element_dex_file_field != nullptr);
@@ -327,11 +374,11 @@
art::Handle<art::mirror::Object> path_list(
hs.NewHandle(path_list_field->GetObject(loader.Get())));
CHECK(path_list.Get() != nullptr);
- CHECK(!self_->IsExceptionPending());
+ CHECK(!driver_->self_->IsExceptionPending());
art::Handle<art::mirror::ObjectArray<art::mirror::Object>> dex_elements_list(hs.NewHandle(
dex_path_list_element_field->GetObject(path_list.Get())->
AsObjectArray<art::mirror::Object>()));
- CHECK(!self_->IsExceptionPending());
+ CHECK(!driver_->self_->IsExceptionPending());
CHECK(dex_elements_list.Get() != nullptr);
size_t num_elements = dex_elements_list->GetLength();
art::MutableHandle<art::mirror::Object> current_element(
@@ -343,9 +390,9 @@
for (size_t i = 0; i < num_elements; i++) {
current_element.Assign(dex_elements_list->Get(i));
CHECK(current_element.Get() != nullptr);
- CHECK(!self_->IsExceptionPending());
+ CHECK(!driver_->self_->IsExceptionPending());
CHECK(dex_elements_list.Get() != nullptr);
- CHECK_EQ(current_element->GetClass(), class_linker->FindClass(self_,
+ CHECK_EQ(current_element->GetClass(), class_linker->FindClass(driver_->self_,
dex_path_list_element_name,
null_loader));
// TODO It would be cleaner to put the art::DexFile into the dalvik.system.DexFile the class
@@ -360,22 +407,23 @@
return nullptr;
}
-art::mirror::Class* Redefiner::GetMirrorClass() {
- return self_->DecodeJObject(klass_)->AsClass();
+art::mirror::Class* Redefiner::ClassRedefinition::GetMirrorClass() {
+ return driver_->self_->DecodeJObject(klass_)->AsClass();
}
-art::mirror::ClassLoader* Redefiner::GetClassLoader() {
+art::mirror::ClassLoader* Redefiner::ClassRedefinition::GetClassLoader() {
return GetMirrorClass()->GetClassLoader();
}
-art::mirror::DexCache* Redefiner::CreateNewDexCache(art::Handle<art::mirror::ClassLoader> loader) {
- return runtime_->GetClassLinker()->RegisterDexFile(*dex_file_, loader.Get());
+art::mirror::DexCache* Redefiner::ClassRedefinition::CreateNewDexCache(
+ art::Handle<art::mirror::ClassLoader> loader) {
+ return driver_->runtime_->GetClassLinker()->RegisterDexFile(*dex_file_, loader.Get());
}
// TODO Really wishing I had that mirror of java.lang.DexFile now.
-art::mirror::LongArray* Redefiner::AllocateDexFileCookie(
+art::mirror::LongArray* Redefiner::ClassRedefinition::AllocateDexFileCookie(
art::Handle<art::mirror::Object> java_dex_file_obj) {
- art::StackHandleScope<2> hs(self_);
+ art::StackHandleScope<2> hs(driver_->self_);
// mCookie is nulled out if the DexFile has been closed but mInternalCookie sticks around until
// the object is finalized. Since they always point to the same array if mCookie is not null we
// just use the mInternalCookie field. We will update one or both of these fields later.
@@ -390,9 +438,9 @@
CHECK(cookie.Get() != nullptr);
CHECK_GE(cookie->GetLength(), 1);
art::Handle<art::mirror::LongArray> new_cookie(
- hs.NewHandle(art::mirror::LongArray::Alloc(self_, cookie->GetLength() + 1)));
+ hs.NewHandle(art::mirror::LongArray::Alloc(driver_->self_, cookie->GetLength() + 1)));
if (new_cookie.Get() == nullptr) {
- self_->AssertPendingOOMException();
+ driver_->self_->AssertPendingOOMException();
return nullptr;
}
// Copy the oat-dex field at the start.
@@ -405,19 +453,21 @@
return new_cookie.Get();
}
-void Redefiner::RecordFailure(jvmtiError result, const std::string& error_msg) {
+void Redefiner::RecordFailure(jvmtiError result,
+ const std::string& class_sig,
+ const std::string& error_msg) {
*error_msg_ = StringPrintf("Unable to perform redefinition of '%s': %s",
- class_sig_,
+ class_sig.c_str(),
error_msg.c_str());
result_ = result;
}
-bool Redefiner::FinishRemainingAllocations(
+bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
/*out*/art::MutableHandle<art::mirror::ClassLoader>* source_class_loader,
/*out*/art::MutableHandle<art::mirror::Object>* java_dex_file_obj,
/*out*/art::MutableHandle<art::mirror::LongArray>* new_dex_file_cookie,
/*out*/art::MutableHandle<art::mirror::DexCache>* new_dex_cache) {
- art::StackHandleScope<4> hs(self_);
+ art::StackHandleScope<4> hs(driver_->self_);
// This shouldn't allocate
art::Handle<art::mirror::ClassLoader> loader(hs.NewHandle(GetClassLoader()));
if (loader.Get() == nullptr) {
@@ -433,15 +483,15 @@
}
art::Handle<art::mirror::LongArray> new_cookie(hs.NewHandle(AllocateDexFileCookie(dex_file_obj)));
if (new_cookie.Get() == nullptr) {
- self_->AssertPendingOOMException();
- self_->ClearException();
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
return false;
}
art::Handle<art::mirror::DexCache> dex_cache(hs.NewHandle(CreateNewDexCache(loader)));
if (dex_cache.Get() == nullptr) {
- self_->AssertPendingOOMException();
- self_->ClearException();
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate DexCache");
return false;
}
@@ -453,13 +503,11 @@
}
struct CallbackCtx {
- Redefiner* const r;
art::LinearAlloc* allocator;
std::unordered_map<art::ArtMethod*, art::ArtMethod*> obsolete_map;
std::unordered_set<art::ArtMethod*> obsolete_methods;
- CallbackCtx(Redefiner* self, art::LinearAlloc* alloc)
- : r(self), allocator(alloc) {}
+ explicit CallbackCtx(art::LinearAlloc* alloc) : allocator(alloc) {}
};
void DoAllocateObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SAFETY_ANALYSIS {
@@ -472,11 +520,12 @@
// This creates any ArtMethod* structures needed for obsolete methods and ensures that the stack is
// updated so they will be run.
-void Redefiner::FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) {
+// TODO Rewrite so we can do this only once regardless of how many redefinitions there are.
+void Redefiner::ClassRedefinition::FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) {
art::ScopedAssertNoThreadSuspension ns("No thread suspension during thread stack walking");
art::mirror::ClassExt* ext = art_klass->GetExtData();
CHECK(ext->GetObsoleteMethods() != nullptr);
- CallbackCtx ctx(this, art_klass->GetClassLoader()->GetAllocator());
+ CallbackCtx ctx(art_klass->GetClassLoader()->GetAllocator());
// Add all the declared methods to the map
for (auto& m : art_klass->GetDeclaredMethods(art::kRuntimePointerSize)) {
ctx.obsolete_methods.insert(&m);
@@ -484,7 +533,7 @@
DCHECK(!m.IsIntrinsic());
}
{
- art::MutexLock mu(self_, *art::Locks::thread_list_lock_);
+ art::MutexLock mu(driver_->self_, *art::Locks::thread_list_lock_);
art::ThreadList* list = art::Runtime::Current()->GetThreadList();
list->ForEach(DoAllocateObsoleteMethodsCallback, static_cast<void*>(&ctx));
}
@@ -493,7 +542,7 @@
// Fills the obsolete method map in the art_klass's extData. This is so obsolete methods are able to
// figure out their DexCaches.
-void Redefiner::FillObsoleteMethodMap(
+void Redefiner::ClassRedefinition::FillObsoleteMethodMap(
art::mirror::Class* art_klass,
const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes) {
int32_t index = 0;
@@ -532,9 +581,9 @@
i->ReJitEverything("libOpenJkdJvmti - Class Redefinition");
}
-bool Redefiner::CheckClass() {
+bool Redefiner::ClassRedefinition::CheckClass() {
// TODO Might just want to put it in a ObjPtr and NoSuspend assert.
- art::StackHandleScope<1> hs(self_);
+ art::StackHandleScope<1> hs(driver_->self_);
// Easy check that only 1 class def is present.
if (dex_file_->NumClassDefs() != 1) {
RecordFailure(ERR(ILLEGAL_ARGUMENT),
@@ -607,14 +656,15 @@
}
}
LOG(WARNING) << "No verification is done on annotations of redefined classes.";
+ LOG(WARNING) << "Bytecodes of redefinitions are not verified.";
return true;
}
// TODO Move this to use IsRedefinable when that function is made.
-bool Redefiner::CheckRedefinable() {
+bool Redefiner::ClassRedefinition::CheckRedefinable() {
std::string err;
- art::StackHandleScope<1> hs(self_);
+ art::StackHandleScope<1> hs(driver_->self_);
art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
jvmtiError res = Redefiner::GetClassRedefinitionError(h_klass, &err);
@@ -626,46 +676,200 @@
}
}
-bool Redefiner::CheckRedefinitionIsValid() {
+bool Redefiner::ClassRedefinition::CheckRedefinitionIsValid() {
return CheckRedefinable() &&
CheckClass() &&
CheckSameFields() &&
CheckSameMethods();
}
+// A wrapper that lets us hold onto the arbitrary sized data needed for redefinitions in a
+// reasonably sane way. This adds no fields to the normal ObjectArray. By doing this we can avoid
+// having to deal with the fact that we need to hold an arbitrary number of references live.
+class RedefinitionDataHolder {
+ public:
+ enum DataSlot : int32_t {
+ kSlotSourceClassLoader = 0,
+ kSlotJavaDexFile = 1,
+ kSlotNewDexFileCookie = 2,
+ kSlotNewDexCache = 3,
+ kSlotMirrorClass = 4,
+
+ // Must be last one.
+ kNumSlots = 5,
+ };
+
+ // This needs to have a HandleScope passed in that is capable of creating a new Handle without
+ // overflowing. Only one handle will be created. This object has a lifetime identical to that of
+ // the passed in handle-scope.
+ RedefinitionDataHolder(art::StackHandleScope<1>* hs,
+ art::Runtime* runtime,
+ art::Thread* self,
+ int32_t num_redefinitions) REQUIRES_SHARED(art::Locks::mutator_lock_) :
+ arr_(
+ hs->NewHandle(
+ art::mirror::ObjectArray<art::mirror::Object>::Alloc(
+ self,
+ runtime->GetClassLinker()->GetClassRoot(art::ClassLinker::kObjectArrayClass),
+ num_redefinitions * kNumSlots))) {}
+
+ bool IsNull() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return arr_.IsNull();
+ }
+
+ // TODO Maybe make an iterable view type to simplify using this.
+ art::mirror::ClassLoader* GetSourceClassLoader(jint klass_index)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::down_cast<art::mirror::ClassLoader*>(GetSlot(klass_index, kSlotSourceClassLoader));
+ }
+ art::mirror::Object* GetJavaDexFile(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return GetSlot(klass_index, kSlotJavaDexFile);
+ }
+ art::mirror::LongArray* GetNewDexFileCookie(jint klass_index)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::down_cast<art::mirror::LongArray*>(GetSlot(klass_index, kSlotNewDexFileCookie));
+ }
+ art::mirror::DexCache* GetNewDexCache(jint klass_index)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::down_cast<art::mirror::DexCache*>(GetSlot(klass_index, kSlotNewDexCache));
+ }
+ art::mirror::Class* GetMirrorClass(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::down_cast<art::mirror::Class*>(GetSlot(klass_index, kSlotMirrorClass));
+ }
+
+ void SetSourceClassLoader(jint klass_index, art::mirror::ClassLoader* loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotSourceClassLoader, loader);
+ }
+ void SetJavaDexFile(jint klass_index, art::mirror::Object* dexfile)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotJavaDexFile, dexfile);
+ }
+ void SetNewDexFileCookie(jint klass_index, art::mirror::LongArray* cookie)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotNewDexFileCookie, cookie);
+ }
+ void SetNewDexCache(jint klass_index, art::mirror::DexCache* cache)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotNewDexCache, cache);
+ }
+ void SetMirrorClass(jint klass_index, art::mirror::Class* klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotMirrorClass, klass);
+ }
+
+ int32_t Length() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return arr_->GetLength() / kNumSlots;
+ }
+
+ private:
+ art::Handle<art::mirror::ObjectArray<art::mirror::Object>> arr_;
+
+ art::mirror::Object* GetSlot(jint klass_index,
+ DataSlot slot) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK_LT(klass_index, Length());
+ return arr_->Get((kNumSlots * klass_index) + slot);
+ }
+
+ void SetSlot(jint klass_index,
+ DataSlot slot,
+ art::ObjPtr<art::mirror::Object> obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK(!art::Runtime::Current()->IsActiveTransaction());
+ DCHECK_LT(klass_index, Length());
+ arr_->Set<false>((kNumSlots * klass_index) + slot, obj);
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(RedefinitionDataHolder);
+};
+
+bool Redefiner::CheckAllRedefinitionAreValid() {
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ if (!redef.CheckRedefinitionIsValid()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Redefiner::EnsureAllClassAllocationsFinished() {
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ if (!redef.EnsureClassAllocationsFinished()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Redefiner::FinishAllRemainingAllocations(RedefinitionDataHolder& holder) {
+ int32_t cnt = 0;
+ art::StackHandleScope<4> hs(self_);
+ art::MutableHandle<art::mirror::Object> java_dex_file(hs.NewHandle<art::mirror::Object>(nullptr));
+ art::MutableHandle<art::mirror::ClassLoader> source_class_loader(
+ hs.NewHandle<art::mirror::ClassLoader>(nullptr));
+ art::MutableHandle<art::mirror::LongArray> new_dex_file_cookie(
+ hs.NewHandle<art::mirror::LongArray>(nullptr));
+ art::MutableHandle<art::mirror::DexCache> new_dex_cache(
+ hs.NewHandle<art::mirror::DexCache>(nullptr));
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ // Reset the out pointers to null
+ source_class_loader.Assign(nullptr);
+ java_dex_file.Assign(nullptr);
+ new_dex_file_cookie.Assign(nullptr);
+ new_dex_cache.Assign(nullptr);
+ // Allocate the data this redefinition requires.
+ if (!redef.FinishRemainingAllocations(&source_class_loader,
+ &java_dex_file,
+ &new_dex_file_cookie,
+ &new_dex_cache)) {
+ return false;
+ }
+ // Save the allocated data into the holder.
+ holder.SetSourceClassLoader(cnt, source_class_loader.Get());
+ holder.SetJavaDexFile(cnt, java_dex_file.Get());
+ holder.SetNewDexFileCookie(cnt, new_dex_file_cookie.Get());
+ holder.SetNewDexCache(cnt, new_dex_cache.Get());
+ holder.SetMirrorClass(cnt, redef.GetMirrorClass());
+ cnt++;
+ }
+ return true;
+}
+
+void Redefiner::ClassRedefinition::ReleaseDexFile() {
+ dex_file_.release();
+}
+
+void Redefiner::ReleaseAllDexFiles() {
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ redef.ReleaseDexFile();
+ }
+}
+
jvmtiError Redefiner::Run() {
- art::StackHandleScope<5> hs(self_);
- // TODO We might want to have a global lock (or one based on the class being redefined at least)
- // in order to make cleanup easier. Not a huge deal though.
- //
+ art::StackHandleScope<1> hs(self_);
+ // Allocate an array to hold onto all java temporary objects associated with this redefinition.
+ // We will let this be collected after the end of this function.
+ RedefinitionDataHolder holder(&hs, runtime_, self_, redefinitions_.size());
+ if (holder.IsNull()) {
+ self_->AssertPendingOOMException();
+ self_->ClearException();
+ RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate storage for temporaries");
+ return result_;
+ }
+
// First we just allocate the ClassExt and its fields that we need. These can be updated
// atomically without any issues (since we allocate the map arrays as empty) so we don't bother
// doing a try loop. The other allocations we need to ensure that nothing has changed in the time
// between allocating them and pausing all threads before we can update them so we need to do a
// try loop.
- if (!CheckRedefinitionIsValid() || !EnsureClassAllocationsFinished()) {
- return result_;
- }
- art::MutableHandle<art::mirror::ClassLoader> source_class_loader(
- hs.NewHandle<art::mirror::ClassLoader>(nullptr));
- art::MutableHandle<art::mirror::Object> java_dex_file(
- hs.NewHandle<art::mirror::Object>(nullptr));
- art::MutableHandle<art::mirror::LongArray> new_dex_file_cookie(
- hs.NewHandle<art::mirror::LongArray>(nullptr));
- art::MutableHandle<art::mirror::DexCache> new_dex_cache(
- hs.NewHandle<art::mirror::DexCache>(nullptr));
- if (!FinishRemainingAllocations(&source_class_loader,
- &java_dex_file,
- &new_dex_file_cookie,
- &new_dex_cache)) {
+ if (!CheckAllRedefinitionAreValid() ||
+ !EnsureAllClassAllocationsFinished() ||
+ !FinishAllRemainingAllocations(holder)) {
// TODO Null out the ClassExt fields we allocated (if possible, might be racing with another
// redefineclass call which made it even bigger. Leak shouldn't be huge (2x array of size
- // declared_methods_.length) but would be good to get rid of.
- // new_dex_file_cookie & new_dex_cache should be cleaned up by the GC.
+ // declared_methods_.length) but would be good to get rid of. All other allocations should be
+ // cleaned up by the GC eventually.
return result_;
}
- // Get the mirror class now that we aren't allocating anymore.
- art::Handle<art::mirror::Class> art_class(hs.NewHandle(GetMirrorClass()));
// Disable GC and wait for it to be done if we are a moving GC. This is fine since we are done
// allocating so no deadlocks.
art::gc::Heap* heap = runtime_->GetHeap();
@@ -673,31 +877,29 @@
// GC moving objects can cause deadlocks as we are deoptimizing the stack.
heap->IncrementDisableMovingGC(self_);
}
- // Enable assertion that this thread isn't interrupted during this installation.
- // After this we will need to do real cleanup in case of failure. Prior to this we could simply
- // return and would let everything get cleaned up or harmlessly leaked.
// Do transition to final suspension
// TODO We might want to give this its own suspended state!
// TODO This isn't right. We need to change state without any chance of suspend ideally!
self_->TransitionFromRunnableToSuspended(art::ThreadState::kNative);
runtime_->GetThreadList()->SuspendAll(
- "Final installation of redefined Class!", /*long_suspend*/true);
+ "Final installation of redefined Classes!", /*long_suspend*/true);
// TODO We need to invalidate all breakpoints in the redefined class with the debugger.
// TODO We need to deal with any instrumentation/debugger deoptimized_methods_.
// TODO We need to update all debugger MethodIDs so they note the method they point to is
// obsolete or implement some other well defined semantics.
// TODO We need to decide on & implement semantics for JNI jmethodids when we redefine methods.
- // TODO Might want to move this into a different type.
- // Now we reach the part where we must do active cleanup if something fails.
- // TODO We should really Retry if this fails instead of simply aborting.
- // Set the new DexFileCookie returns the original so we can fix it back up if redefinition fails
- art::ObjPtr<art::mirror::LongArray> original_dex_file_cookie(nullptr);
- UpdateJavaDexFile(java_dex_file.Get(), new_dex_file_cookie.Get(), &original_dex_file_cookie);
- FindAndAllocateObsoleteMethods(art_class.Get());
- UpdateClass(art_class.Get(), new_dex_cache.Get());
+ int32_t cnt = 0;
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ art::mirror::Class* klass = holder.GetMirrorClass(cnt);
+ redef.UpdateJavaDexFile(holder.GetJavaDexFile(cnt), holder.GetNewDexFileCookie(cnt));
+ // TODO Rewrite so we don't do a stack walk for each and every class.
+ redef.FindAndAllocateObsoleteMethods(klass);
+ redef.UpdateClass(klass, holder.GetNewDexCache(cnt));
+ cnt++;
+ }
// Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
// pointers to their ArtMethod's stashed in registers that they then use to attempt to hit the
- // DexCache.
+ // DexCache. (b/33630159)
// TODO This can fail (leave some methods optimized) near runtime methods (including
// quick-to-interpreter transition function).
// TODO We probably don't need this at all once we have a way to ensure that the
@@ -705,26 +907,25 @@
// stack-walker.
EnsureObsoleteMethodsAreDeoptimized();
// TODO Verify the new Class.
- // TODO Failure then undo updates to class
// TODO Shrink the obsolete method maps if possible?
// TODO find appropriate class loader.
// TODO Put this into a scoped thing.
runtime_->GetThreadList()->ResumeAll();
// Get back shared mutator lock as expected for return.
self_->TransitionFromSuspendedToRunnable();
- // TODO Do the dex_file_ release at a more reasonable place. This works but it muddles who really
- // owns the DexFile.
- dex_file_.release();
+ // TODO Do the dex_file release at a more reasonable place. This works but it muddles who really
+ // owns the DexFile and when ownership is transferred.
+ ReleaseAllDexFiles();
if (heap->IsGcConcurrentAndMoving()) {
heap->DecrementDisableMovingGC(self_);
}
return OK;
}
-void Redefiner::UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
- art::ObjPtr<art::mirror::DexCache> new_dex_cache,
- const art::DexFile::ClassDef& class_def) {
- art::ClassLinker* linker = runtime_->GetClassLinker();
+void Redefiner::ClassRedefinition::UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
+ art::ObjPtr<art::mirror::DexCache> new_dex_cache,
+ const art::DexFile::ClassDef& class_def) {
+ art::ClassLinker* linker = driver_->runtime_->GetClassLinker();
art::PointerSize image_pointer_size = linker->GetImagePointerSize();
const art::DexFile::TypeId& declaring_class_id = dex_file_->GetTypeId(class_def.class_idx_);
const art::DexFile& old_dex_file = mclass->GetDexFile();
@@ -759,14 +960,14 @@
method.SetDexCacheResolvedMethods(new_dex_cache->GetResolvedMethods(), image_pointer_size);
method.SetDexCacheResolvedTypes(new_dex_cache->GetResolvedTypes(), image_pointer_size);
// Notify the jit that this method is redefined.
- art::jit::Jit* jit = runtime_->GetJit();
+ art::jit::Jit* jit = driver_->runtime_->GetJit();
if (jit != nullptr) {
jit->GetCodeCache()->NotifyMethodRedefined(&method);
}
}
}
-void Redefiner::UpdateFields(art::ObjPtr<art::mirror::Class> mclass) {
+void Redefiner::ClassRedefinition::UpdateFields(art::ObjPtr<art::mirror::Class> mclass) {
// TODO The IFields & SFields pointers should be combined like the methods_ arrays were.
for (auto fields_iter : {mclass->GetIFields(), mclass->GetSFields()}) {
for (art::ArtField& field : fields_iter) {
@@ -787,10 +988,10 @@
}
// Performs updates to class that will allow us to verify it.
-void Redefiner::UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
- art::ObjPtr<art::mirror::DexCache> new_dex_cache) {
+void Redefiner::ClassRedefinition::UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
+ art::ObjPtr<art::mirror::DexCache> new_dex_cache) {
const art::DexFile::ClassDef* class_def = art::OatFile::OatDexFile::FindClassDef(
- *dex_file_, class_sig_, art::ComputeModifiedUtf8Hash(class_sig_));
+ *dex_file_, class_sig_.c_str(), art::ComputeModifiedUtf8Hash(class_sig_.c_str()));
DCHECK(class_def != nullptr);
UpdateMethods(mclass, new_dex_cache, *class_def);
UpdateFields(mclass);
@@ -800,12 +1001,12 @@
// to call GetReturnTypeDescriptor and GetParameterTypeList above).
mclass->SetDexCache(new_dex_cache.Ptr());
mclass->SetDexClassDefIndex(dex_file_->GetIndexForClassDef(*class_def));
- mclass->SetDexTypeIndex(dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(class_sig_)));
+ mclass->SetDexTypeIndex(dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(class_sig_.c_str())));
}
-void Redefiner::UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
- art::ObjPtr<art::mirror::LongArray> new_cookie,
- /*out*/art::ObjPtr<art::mirror::LongArray>* original_cookie) {
+void Redefiner::ClassRedefinition::UpdateJavaDexFile(
+ art::ObjPtr<art::mirror::Object> java_dex_file,
+ art::ObjPtr<art::mirror::LongArray> new_cookie) {
art::ArtField* internal_cookie_field = java_dex_file->GetClass()->FindDeclaredInstanceField(
"mInternalCookie", "Ljava/lang/Object;");
art::ArtField* cookie_field = java_dex_file->GetClass()->FindDeclaredInstanceField(
@@ -816,7 +1017,6 @@
art::ObjPtr<art::mirror::LongArray> orig_cookie(
cookie_field->GetObject(java_dex_file)->AsLongArray());
internal_cookie_field->SetObject<false>(java_dex_file, new_cookie);
- *original_cookie = orig_internal_cookie;
if (!orig_cookie.IsNull()) {
cookie_field->SetObject<false>(java_dex_file, new_cookie);
}
@@ -824,21 +1024,22 @@
// This function does all (java) allocations we need to do for the Class being redefined.
// TODO Change this name maybe?
-bool Redefiner::EnsureClassAllocationsFinished() {
- art::StackHandleScope<2> hs(self_);
- art::Handle<art::mirror::Class> klass(hs.NewHandle(self_->DecodeJObject(klass_)->AsClass()));
+bool Redefiner::ClassRedefinition::EnsureClassAllocationsFinished() {
+ art::StackHandleScope<2> hs(driver_->self_);
+ art::Handle<art::mirror::Class> klass(hs.NewHandle(
+ driver_->self_->DecodeJObject(klass_)->AsClass()));
if (klass.Get() == nullptr) {
RecordFailure(ERR(INVALID_CLASS), "Unable to decode class argument!");
return false;
}
// Allocate the classExt
- art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->EnsureExtDataPresent(self_)));
+ art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->EnsureExtDataPresent(driver_->self_)));
if (ext.Get() == nullptr) {
// No memory. Clear exception (it's not useful) and return error.
// TODO This doesn't need to be fatal. We could just not support obsolete methods after hitting
// this case.
- self_->AssertPendingOOMException();
- self_->ClearException();
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate ClassExt");
return false;
}
@@ -849,12 +1050,12 @@
// TODO Clear these after we walk the stacks in order to free them in the (likely?) event there
// are no obsolete methods.
{
- art::ObjectLock<art::mirror::ClassExt> lock(self_, ext);
+ art::ObjectLock<art::mirror::ClassExt> lock(driver_->self_, ext);
if (!ext->ExtendObsoleteArrays(
- self_, klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size())) {
+ driver_->self_, klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size())) {
// OOM. Clear exception and return error.
- self_->AssertPendingOOMException();
- self_->ClearException();
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate/extend obsolete methods map");
return false;
}
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 5852309..8626bc5 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -63,50 +63,154 @@
namespace openjdkjvmti {
+class RedefinitionDataHolder;
+
// Class that can redefine a single class's methods.
// TODO We should really make this be driven by an outside class so we can do multiple classes at
// the same time and have less required cleanup.
class Redefiner {
public:
- // Redefine the given class with the given dex data. Note this function does not take ownership of
- // the dex_data pointer. It is not used after this call however and may be freed if desired.
+ // Redefine the given classes with the given dex data. Note this function does not take ownership
+ // of the dex_data pointers. It is not used after this call however and may be freed if desired.
// The caller is responsible for freeing it. The runtime makes its own copy of the data.
- static jvmtiError RedefineClass(ArtJvmTiEnv* env,
- art::Runtime* runtime,
- art::Thread* self,
- jclass klass,
- const std::string& original_dex_location,
- jint data_len,
- unsigned char* dex_data,
- std::string* error_msg);
+ static jvmtiError RedefineClasses(ArtJvmTiEnv* env,
+ art::Runtime* runtime,
+ art::Thread* self,
+ jint class_count,
+ const jvmtiClassDefinition* definitions,
+ std::string* error_msg);
static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable);
private:
+ class ClassRedefinition {
+ public:
+ ClassRedefinition(Redefiner* driver,
+ jclass klass,
+ const art::DexFile* redefined_dex_file,
+ const char* class_sig)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // NO_THREAD_SAFETY_ANALYSIS so we can unlock the class in the destructor.
+ ~ClassRedefinition() NO_THREAD_SAFETY_ANALYSIS;
+
+ // Move constructor so we can put these into a vector.
+ ClassRedefinition(ClassRedefinition&& other)
+ : driver_(other.driver_),
+ klass_(other.klass_),
+ dex_file_(std::move(other.dex_file_)),
+ class_sig_(std::move(other.class_sig_)) {
+ other.driver_ = nullptr;
+ }
+
+ art::mirror::Class* GetMirrorClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ art::mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // This finds the java.lang.DexFile we will add the native DexFile to as part of the classpath.
+ // TODO Make sure the DexFile object returned is the one that the klass_ actually comes from.
+ art::mirror::Object* FindSourceDexFileObject(art::Handle<art::mirror::ClassLoader> loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ art::mirror::DexCache* CreateNewDexCache(art::Handle<art::mirror::ClassLoader> loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // Allocates and fills the new DexFileCookie
+ art::mirror::LongArray* AllocateDexFileCookie(art::Handle<art::mirror::Object> j_dex_file_obj)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void RecordFailure(jvmtiError e, const std::string& err) {
+ driver_->RecordFailure(e, class_sig_, err);
+ }
+
+ bool FinishRemainingAllocations(
+ /*out*/art::MutableHandle<art::mirror::ClassLoader>* source_class_loader,
+ /*out*/art::MutableHandle<art::mirror::Object>* source_dex_file_obj,
+ /*out*/art::MutableHandle<art::mirror::LongArray>* new_dex_file_cookie,
+ /*out*/art::MutableHandle<art::mirror::DexCache>* new_dex_cache)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
+ REQUIRES(art::Locks::mutator_lock_);
+
+ void FillObsoleteMethodMap(
+ art::mirror::Class* art_klass,
+ const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes)
+ REQUIRES(art::Locks::mutator_lock_);
+
+
+ // Checks that the dex file contains only the single expected class and that the top-level class
+ // data has not been modified in an incompatible manner.
+ bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // Preallocates all needed allocations in klass so that we can pause execution safely.
+ // TODO We should be able to free the arrays if they end up not being used. Investigate doing
+ // this in the future. For now we will just take the memory hit.
+ bool EnsureClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // This will check that no constraints are violated (more than 1 class in dex file, any changes
+ // in number/declaration of methods & fields, changes in access flags, etc.)
+ bool CheckRedefinitionIsValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // Checks that the class can even be redefined.
+ bool CheckRedefinable() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // Checks that the dex file does not add/remove methods.
+ bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ LOG(WARNING) << "methods are not checked for modification currently";
+ return true;
+ }
+
+ // Checks that the dex file does not modify fields
+ bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ LOG(WARNING) << "Fields are not checked for modification currently";
+ return true;
+ }
+
+ void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
+ art::ObjPtr<art::mirror::LongArray> new_cookie)
+ REQUIRES(art::Locks::mutator_lock_);
+
+ void UpdateFields(art::ObjPtr<art::mirror::Class> mclass)
+ REQUIRES(art::Locks::mutator_lock_);
+
+ void UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
+ art::ObjPtr<art::mirror::DexCache> new_dex_cache,
+ const art::DexFile::ClassDef& class_def)
+ REQUIRES(art::Locks::mutator_lock_);
+
+ void UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
+ art::ObjPtr<art::mirror::DexCache> new_dex_cache)
+ REQUIRES(art::Locks::mutator_lock_);
+
+ void ReleaseDexFile() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ private:
+ Redefiner* driver_;
+ jclass klass_;
+ std::unique_ptr<const art::DexFile> dex_file_;
+ std::string class_sig_;
+ };
+
jvmtiError result_;
art::Runtime* runtime_;
art::Thread* self_;
+ std::vector<ClassRedefinition> redefinitions_;
// Kept as a jclass since we have weird run-state changes that make keeping it around as a
// mirror::Class difficult and confusing.
- jclass klass_;
- std::unique_ptr<const art::DexFile> dex_file_;
std::string* error_msg_;
- char* class_sig_;
// TODO Maybe change jclass to a mirror::Class
Redefiner(art::Runtime* runtime,
art::Thread* self,
- jclass klass,
- char* class_sig,
- std::unique_ptr<const art::DexFile>& redefined_dex_file,
std::string* error_msg)
: result_(ERR(INTERNAL)),
runtime_(runtime),
self_(self),
- klass_(klass),
- dex_file_(std::move(redefined_dex_file)),
- error_msg_(error_msg),
- class_sig_(class_sig) { }
+ redefinitions_(),
+ error_msg_(error_msg) { }
+
+ jvmtiError AddRedefinition(ArtJvmTiEnv* env, const jvmtiClassDefinition& def)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
static jvmtiError GetClassRedefinitionError(art::Handle<art::mirror::Class> klass,
/*out*/std::string* error_msg)
@@ -114,23 +218,17 @@
static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
jint data_len,
- unsigned char* dex_data,
+ const unsigned char* dex_data,
std::string* error_msg);
// TODO Put on all the lock qualifiers.
jvmtiError Run() REQUIRES_SHARED(art::Locks::mutator_lock_);
- bool FinishRemainingAllocations(
- /*out*/art::MutableHandle<art::mirror::ClassLoader>* source_class_loader,
- /*out*/art::MutableHandle<art::mirror::Object>* source_dex_file_obj,
- /*out*/art::MutableHandle<art::mirror::LongArray>* new_dex_file_cookie,
- /*out*/art::MutableHandle<art::mirror::DexCache>* new_dex_cache)
+ bool CheckAllRedefinitionAreValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool EnsureAllClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool FinishAllRemainingAllocations(RedefinitionDataHolder& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- // Preallocates all needed allocations in klass so that we can pause execution safely.
- // TODO We should be able to free the arrays if they end up not being used. Investigate doing this
- // in the future. For now we will just take the memory hit.
- bool EnsureClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void ReleaseAllDexFiles() REQUIRES_SHARED(art::Locks::mutator_lock_);
// Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
// pointers to their ArtMethods stashed in registers that they then use to attempt to hit the
@@ -140,70 +238,12 @@
REQUIRES(!art::Locks::thread_list_lock_,
!art::Locks::classlinker_classes_lock_);
- art::mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- // This finds the java.lang.DexFile we will add the native DexFile to as part of the classpath.
- // TODO Make sure the DexFile object returned is the one that the klass_ actually comes from.
- art::mirror::Object* FindSourceDexFileObject(art::Handle<art::mirror::ClassLoader> loader)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- art::mirror::Class* GetMirrorClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- // Allocates and fills the new DexFileCookie
- art::mirror::LongArray* AllocateDexFileCookie(art::Handle<art::mirror::Object> java_dex_file_obj)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- art::mirror::DexCache* CreateNewDexCache(art::Handle<art::mirror::ClassLoader> loader)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- void RecordFailure(jvmtiError result, const std::string& error_msg);
-
- // This will check that no constraints are violated (more than 1 class in dex file, any changes in
- // number/declaration of methods & fields, changes in access flags, etc.)
- bool CheckRedefinitionIsValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- // Checks that the class can even be redefined.
- bool CheckRedefinable() REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- // Checks that the dex file does not add/remove methods.
- bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_) {
- LOG(WARNING) << "methods are not checked for modification currently";
- return true;
+ void RecordFailure(jvmtiError result, const std::string& class_sig, const std::string& error_msg);
+ void RecordFailure(jvmtiError result, const std::string& error_msg) {
+ RecordFailure(result, "NO CLASS", error_msg);
}
- // Checks that the dex file does not modify fields
- bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_) {
- LOG(WARNING) << "Fields are not checked for modification currently";
- return true;
- }
-
- // Checks that the dex file contains only the single expected class and that the top-level class
- // data has not been modified in an incompatible manner.
- bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
- art::ObjPtr<art::mirror::LongArray> new_cookie,
- /*out*/art::ObjPtr<art::mirror::LongArray>* original_cookie)
- REQUIRES(art::Locks::mutator_lock_);
-
- void UpdateFields(art::ObjPtr<art::mirror::Class> mclass)
- REQUIRES(art::Locks::mutator_lock_);
-
- void UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
- art::ObjPtr<art::mirror::DexCache> new_dex_cache,
- const art::DexFile::ClassDef& class_def)
- REQUIRES(art::Locks::mutator_lock_);
-
- void UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
- art::ObjPtr<art::mirror::DexCache> new_dex_cache)
- REQUIRES(art::Locks::mutator_lock_);
-
- void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
- REQUIRES(art::Locks::mutator_lock_);
-
- void FillObsoleteMethodMap(art::mirror::Class* art_klass,
- const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes)
- REQUIRES(art::Locks::mutator_lock_);
+ friend struct CallbackCtx;
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
index 579fb50..4cf55a6 100644
--- a/runtime/openjdkjvmti/ti_stack.cc
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -31,29 +31,37 @@
#include "ti_stack.h"
+#include <algorithm>
+#include <list>
+#include <unordered_map>
+#include <vector>
+
#include "art_jvmti.h"
#include "art_method-inl.h"
+#include "base/bit_utils.h"
#include "base/enums.h"
+#include "base/mutex.h"
#include "dex_file.h"
#include "dex_file_annotations.h"
+#include "handle_scope-inl.h"
#include "jni_env_ext.h"
#include "jni_internal.h"
#include "mirror/class.h"
#include "mirror/dex_cache.h"
#include "scoped_thread_state_change-inl.h"
+#include "ScopedLocalRef.h"
#include "stack.h"
-#include "thread.h"
+#include "thread-inl.h"
+#include "thread_list.h"
#include "thread_pool.h"
namespace openjdkjvmti {
struct GetStackTraceVisitor : public art::StackVisitor {
GetStackTraceVisitor(art::Thread* thread_in,
- art::ScopedObjectAccessAlreadyRunnable& soa_,
size_t start_,
size_t stop_)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- soa(soa_),
start(start_),
stop(stop_) {}
@@ -85,7 +93,6 @@
return true;
}
- art::ScopedObjectAccessAlreadyRunnable& soa;
std::vector<jvmtiFrameInfo> frames;
size_t start;
size_t stop;
@@ -99,10 +106,8 @@
start_result(0),
stop_result(0) {}
- void Run(art::Thread* self) OVERRIDE {
- art::ScopedObjectAccess soa(art::Thread::Current());
-
- GetStackTraceVisitor visitor(self, soa, start_input, stop_input);
+ void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ GetStackTraceVisitor visitor(self, start_input, stop_input);
visitor.WalkStack(false);
frames.swap(visitor.frames);
@@ -118,24 +123,81 @@
size_t stop_result;
};
+static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
+ jint start_depth,
+ size_t start_result,
+ jint max_frame_count,
+ jvmtiFrameInfo* frame_buffer,
+ jint* count_ptr) {
+ size_t collected_frames = frames.size();
+
+ // Assume we're here having collected something.
+ DCHECK_GT(max_frame_count, 0);
+
+ // Frames from the top.
+ if (start_depth >= 0) {
+ if (start_result != 0) {
+ // Not enough frames.
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
+ if (frames.size() > 0) {
+ memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
+ }
+ *count_ptr = static_cast<jint>(frames.size());
+ return ERR(NONE);
+ }
+
+ // Frames from the bottom.
+ if (collected_frames < static_cast<size_t>(-start_depth)) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+
+ size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
+ memcpy(frame_buffer,
+ &frames.data()[collected_frames + start_depth],
+ count * sizeof(jvmtiFrameInfo));
+ *count_ptr = static_cast<jint>(count);
+ return ERR(NONE);
+}
+
+static jvmtiError GetThread(JNIEnv* env, jthread java_thread, art::Thread** thread) {
+ if (java_thread == nullptr) {
+ *thread = art::Thread::Current();
+ if (*thread == nullptr) {
+ // GetStackTrace can only be run during the live phase, so the current thread should be
+ // attached and thus available. Getting a null for current means we're starting up or
+ // dying.
+ return ERR(WRONG_PHASE);
+ }
+ } else {
+ if (!env->IsInstanceOf(java_thread, art::WellKnownClasses::java_lang_Thread)) {
+ return ERR(INVALID_THREAD);
+ }
+
+ // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ *thread = art::Thread::FromManagedThread(soa, java_thread);
+ if (*thread == nullptr) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+ }
+ return ERR(NONE);
+}
+
jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
jthread java_thread,
jint start_depth,
jint max_frame_count,
jvmtiFrameInfo* frame_buffer,
jint* count_ptr) {
- if (java_thread == nullptr) {
- return ERR(INVALID_THREAD);
- }
-
art::Thread* thread;
- {
- // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
- art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
- thread = art::Thread::FromManagedThread(soa, java_thread);
- DCHECK(thread != nullptr);
+ jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
+ if (thread_error != ERR(NONE)) {
+ return thread_error;
}
+ DCHECK(thread != nullptr);
art::ThreadState state = thread->GetState();
if (state == art::ThreadState::kStarting ||
@@ -157,35 +219,506 @@
}
GetStackTraceClosure closure(start_depth >= 0 ? static_cast<size_t>(start_depth) : 0,
- start_depth >= 0 ?static_cast<size_t>(max_frame_count) : 0);
+ start_depth >= 0 ? static_cast<size_t>(max_frame_count) : 0);
thread->RequestSynchronousCheckpoint(&closure);
- size_t collected_frames = closure.frames.size();
+ return TranslateFrameVector(closure.frames,
+ start_depth,
+ closure.start_result,
+ max_frame_count,
+ frame_buffer,
+ count_ptr);
+}
- // Frames from the top.
- if (start_depth >= 0) {
- if (closure.start_result != 0) {
- // Not enough frames.
- return ERR(ILLEGAL_ARGUMENT);
- }
- DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
- if (closure.frames.size() > 0) {
- memcpy(frame_buffer, closure.frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
- }
- *count_ptr = static_cast<jint>(closure.frames.size());
- return ERR(NONE);
+struct GetAllStackTraceClosure : public art::Closure {
+ public:
+ explicit GetAllStackTraceClosure(size_t stop)
+ : start_input(0),
+ stop_input(stop),
+ frames_lock("GetAllStackTraceGuard", art::LockLevel::kAbortLock),
+ start_result(0),
+ stop_result(0) {}
+
+ void Run(art::Thread* self)
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) REQUIRES(!frames_lock) {
+ // self should be live here (so it could be suspended). No need to filter.
+
+ art::Thread* current = art::Thread::Current();
+ std::vector<jvmtiFrameInfo> self_frames;
+
+ GetStackTraceVisitor visitor(self, start_input, stop_input);
+ visitor.WalkStack(false);
+
+ self_frames.swap(visitor.frames);
+
+ art::MutexLock mu(current, frames_lock);
+ frames.emplace(self, self_frames);
}
- // Frames from the bottom.
- if (collected_frames < static_cast<size_t>(-start_depth)) {
+ const size_t start_input;
+ const size_t stop_input;
+
+ art::Mutex frames_lock;
+ std::unordered_map<art::Thread*, std::vector<jvmtiFrameInfo>> frames GUARDED_BY(frames_lock);
+ size_t start_result;
+ size_t stop_result;
+};
+
+
+
+jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
+ jint max_frame_count,
+ jvmtiStackInfo** stack_info_ptr,
+ jint* thread_count_ptr) {
+ if (max_frame_count < 0) {
return ERR(ILLEGAL_ARGUMENT);
}
+ if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
- size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
- memcpy(frame_buffer,
- &closure.frames.data()[collected_frames + start_depth],
- count * sizeof(jvmtiFrameInfo));
- *count_ptr = static_cast<jint>(count);
+
+ art::Thread* current = art::Thread::Current();
+ art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
+ art::ScopedThreadSuspension sts(current, art::kWaitingForDebuggerSuspension);
+ art::ScopedSuspendAll ssa("GetAllStackTraces");
+
+ std::vector<art::Thread*> threads;
+ std::vector<std::vector<jvmtiFrameInfo>> frames;
+ {
+ std::list<art::Thread*> thread_list;
+ {
+ art::MutexLock mu(current, *art::Locks::thread_list_lock_);
+ thread_list = art::Runtime::Current()->GetThreadList()->GetList();
+ }
+
+ for (art::Thread* thread : thread_list) {
+ // Skip threads that are still starting.
+ if (thread->IsStillStarting()) {
+ continue;
+ }
+
+ GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
+ thread->RequestSynchronousCheckpoint(&closure);
+
+ threads.push_back(thread);
+ frames.emplace_back();
+ frames.back().swap(closure.frames);
+ }
+ }
+
+ // Convert the data into our output format. Note: we need to keep the threads suspended,
+ // as we need to access them for their peers.
+
+ // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
+ // allocate one big chunk for this and the actual frames, which means we need
+ // to either be conservative or rearrange things later (the latter is implemented).
+ std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[frames.size()]);
+ std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
+ frame_infos.reserve(frames.size());
+
+ // Now run through and add data for each thread.
+ size_t sum_frames = 0;
+ for (size_t index = 0; index < frames.size(); ++index) {
+ jvmtiStackInfo& stack_info = stack_info_array.get()[index];
+ memset(&stack_info, 0, sizeof(jvmtiStackInfo));
+
+ art::Thread* self = threads[index];
+ const std::vector<jvmtiFrameInfo>& thread_frames = frames[index];
+
+ // For the time being, set the thread to null. We don't have good ScopedLocalRef
+ // infrastructure.
+ DCHECK(self->GetPeer() != nullptr);
+ stack_info.thread = nullptr;
+ stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
+
+ size_t collected_frames = thread_frames.size();
+ if (max_frame_count == 0 || collected_frames == 0) {
+ stack_info.frame_count = 0;
+ stack_info.frame_buffer = nullptr;
+ continue;
+ }
+ DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
+
+ jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
+ frame_infos.emplace_back(frame_info);
+
+ jint count;
+ jvmtiError translate_result = TranslateFrameVector(thread_frames,
+ 0,
+ 0,
+ static_cast<jint>(collected_frames),
+ frame_info,
+ &count);
+ DCHECK(translate_result == JVMTI_ERROR_NONE);
+ stack_info.frame_count = static_cast<jint>(collected_frames);
+ stack_info.frame_buffer = frame_info;
+ sum_frames += static_cast<size_t>(count);
+ }
+
+ // No errors, yet. Now put it all into an output buffer.
+ size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * frames.size(),
+ alignof(jvmtiFrameInfo));
+ size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
+ unsigned char* chunk_data;
+ jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
+ if (alloc_result != ERR(NONE)) {
+ return alloc_result;
+ }
+
+ jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
+ // First copy in all the basic data.
+ memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * frames.size());
+
+ // Now copy the frames and fix up the pointers.
+ jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
+ chunk_data + rounded_stack_info_size);
+ for (size_t i = 0; i < frames.size(); ++i) {
+ jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
+ jvmtiStackInfo& new_stack_info = stack_info[i];
+
+ jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(threads[i]->GetPeer());
+ new_stack_info.thread = thread_peer;
+
+ if (old_stack_info.frame_count > 0) {
+ // Only copy when there's data - leave the nullptr alone.
+ size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
+ memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
+ new_stack_info.frame_buffer = frame_info;
+ frame_info += old_stack_info.frame_count;
+ }
+ }
+
+ *stack_info_ptr = stack_info;
+ *thread_count_ptr = static_cast<jint>(frames.size());
+
+ return ERR(NONE);
+}
+
+jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
+ jint thread_count,
+ const jthread* thread_list,
+ jint max_frame_count,
+ jvmtiStackInfo** stack_info_ptr) {
+ if (max_frame_count < 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ if (thread_count < 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ if (thread_count == 0) {
+ *stack_info_ptr = nullptr;
+ return ERR(NONE);
+ }
+ if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::Thread* current = art::Thread::Current();
+ art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
+
+ // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
+ art::VariableSizedHandleScope hs(current);
+ std::vector<art::Handle<art::mirror::Object>> handles;
+ for (jint i = 0; i != thread_count; ++i) {
+ if (thread_list[i] == nullptr) {
+ return ERR(INVALID_THREAD);
+ }
+ if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
+ return ERR(INVALID_THREAD);
+ }
+ handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
+ }
+
+ std::vector<art::Thread*> threads;
+ std::vector<size_t> thread_list_indices;
+ std::vector<std::vector<jvmtiFrameInfo>> frames;
+
+ {
+ art::ScopedThreadSuspension sts(current, art::kWaitingForDebuggerSuspension);
+ art::ScopedSuspendAll ssa("GetThreadListStackTraces");
+
+ {
+ std::list<art::Thread*> art_thread_list;
+ {
+ art::MutexLock mu(current, *art::Locks::thread_list_lock_);
+ art_thread_list = art::Runtime::Current()->GetThreadList()->GetList();
+ }
+
+ for (art::Thread* thread : art_thread_list) {
+ if (thread->IsStillStarting()) {
+ // Skip this. We can't get the jpeer, and if it is for a thread in the thread_list,
+ // we'll just report STARTING.
+ continue;
+ }
+
+ // Get the peer, and check whether we know it.
+ art::ObjPtr<art::mirror::Object> peer = thread->GetPeer();
+ for (size_t index = 0; index != handles.size(); ++index) {
+ if (peer == handles[index].Get()) {
+ // Found the thread.
+ GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
+ thread->RequestSynchronousCheckpoint(&closure);
+
+ threads.push_back(thread);
+ thread_list_indices.push_back(index);
+ frames.emplace_back();
+ frames.back().swap(closure.frames);
+
+ continue;
+ }
+ }
+
+ // Must be not started, or dead. We'll deal with it at the end.
+ }
+ }
+ }
+
+ // Convert the data into our output format.
+
+ // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
+ // allocate one big chunk for this and the actual frames, which means we need
+ // to either be conservative or rearrange things later (the latter is implemented).
+ std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[frames.size()]);
+ std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
+ frame_infos.reserve(frames.size());
+
+ // Now run through and add data for each thread.
+ size_t sum_frames = 0;
+ for (size_t index = 0; index < frames.size(); ++index) {
+ jvmtiStackInfo& stack_info = stack_info_array.get()[index];
+ memset(&stack_info, 0, sizeof(jvmtiStackInfo));
+
+ art::Thread* self = threads[index];
+ const std::vector<jvmtiFrameInfo>& thread_frames = frames[index];
+
+ // For the time being, set the thread to null. We don't have good ScopedLocalRef
+ // infrastructure.
+ DCHECK(self->GetPeer() != nullptr);
+ stack_info.thread = nullptr;
+ stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
+
+ size_t collected_frames = thread_frames.size();
+ if (max_frame_count == 0 || collected_frames == 0) {
+ stack_info.frame_count = 0;
+ stack_info.frame_buffer = nullptr;
+ continue;
+ }
+ DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
+
+ jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
+ frame_infos.emplace_back(frame_info);
+
+ jint count;
+ jvmtiError translate_result = TranslateFrameVector(thread_frames,
+ 0,
+ 0,
+ static_cast<jint>(collected_frames),
+ frame_info,
+ &count);
+ DCHECK(translate_result == JVMTI_ERROR_NONE);
+ stack_info.frame_count = static_cast<jint>(collected_frames);
+ stack_info.frame_buffer = frame_info;
+ sum_frames += static_cast<size_t>(count);
+ }
+
+ // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
+ // potentially.
+ size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
+ alignof(jvmtiFrameInfo));
+ size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
+ unsigned char* chunk_data;
+ jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
+ if (alloc_result != ERR(NONE)) {
+ return alloc_result;
+ }
+
+ jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
+ jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
+ chunk_data + rounded_stack_info_size);
+
+ for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
+ // Check whether we found a running thread for this.
+ // Note: For simplicity, and with the expectation that the list is usually small, use a simple
+ // search. (The list is *not* sorted!)
+ auto it = std::find(thread_list_indices.begin(), thread_list_indices.end(), i);
+ if (it == thread_list_indices.end()) {
+ // No native thread. Must be new or dead. We need to fill out the stack info now.
+ // (Need to read the Java "started" field to know whether this is starting or terminated.)
+ art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
+ art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
+ art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
+ CHECK(started_field != nullptr);
+ bool started = started_field->GetBoolean(peer) != 0;
+ constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
+ constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
+ JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
+ stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
+ stack_info[i].state = started ? kTerminatedState : kStartedState;
+ stack_info[i].frame_count = 0;
+ stack_info[i].frame_buffer = nullptr;
+ } else {
+ // Had a native thread and frames.
+ size_t f_index = it - thread_list_indices.begin();
+
+ jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
+ jvmtiStackInfo& new_stack_info = stack_info[i];
+
+ memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
+ new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
+ if (old_stack_info.frame_count > 0) {
+ // Only copy when there's data - leave the nullptr alone.
+ size_t frames_size =
+ static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
+ memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
+ new_stack_info.frame_buffer = frame_info;
+ frame_info += old_stack_info.frame_count;
+ }
+ }
+ }
+
+ * stack_info_ptr = stack_info;
+
+ return ERR(NONE);
+}
+
+// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
+// runtime methods and transitions must not be counted.
+struct GetFrameCountVisitor : public art::StackVisitor {
+ explicit GetFrameCountVisitor(art::Thread* thread)
+ : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ count(0) {}
+
+ bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ArtMethod* m = GetMethod();
+ const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
+ if (do_count) {
+ count++;
+ }
+ return true;
+ }
+
+ size_t count;
+};
+
+struct GetFrameCountClosure : public art::Closure {
+ public:
+ GetFrameCountClosure() : count(0) {}
+
+ void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ GetFrameCountVisitor visitor(self);
+ visitor.WalkStack(false);
+
+ count = visitor.count;
+ }
+
+ size_t count;
+};
+
+jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jthread java_thread,
+ jint* count_ptr) {
+ art::Thread* thread;
+ jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
+ if (thread_error != ERR(NONE)) {
+ return thread_error;
+ }
+ DCHECK(thread != nullptr);
+
+ if (count_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ GetFrameCountClosure closure;
+ thread->RequestSynchronousCheckpoint(&closure);
+
+ *count_ptr = closure.count;
+ return ERR(NONE);
+}
+
+// Walks up the stack 'n' callers, when used with Thread::WalkStack.
+struct GetLocationVisitor : public art::StackVisitor {
+ GetLocationVisitor(art::Thread* thread, size_t n_in)
+ : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ n(n_in),
+ count(0),
+ caller(nullptr),
+ caller_dex_pc(0) {}
+
+ bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ArtMethod* m = GetMethod();
+ const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
+ if (do_count) {
+ DCHECK(caller == nullptr);
+ if (count == n) {
+ caller = m;
+ caller_dex_pc = GetDexPc(false);
+ return false;
+ }
+ count++;
+ }
+ return true;
+ }
+
+ const size_t n;
+ size_t count;
+ art::ArtMethod* caller;
+ uint32_t caller_dex_pc;
+};
+
+struct GetLocationClosure : public art::Closure {
+ public:
+ explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
+
+ void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ GetLocationVisitor visitor(self, n);
+ visitor.WalkStack(false);
+
+ method = visitor.caller;
+ dex_pc = visitor.caller_dex_pc;
+ }
+
+ const size_t n;
+ art::ArtMethod* method;
+ uint32_t dex_pc;
+};
+
+jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jthread java_thread,
+ jint depth,
+ jmethodID* method_ptr,
+ jlocation* location_ptr) {
+ art::Thread* thread;
+ jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
+ if (thread_error != ERR(NONE)) {
+ return thread_error;
+ }
+ DCHECK(thread != nullptr);
+
+ if (depth < 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ if (method_ptr == nullptr || location_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ GetLocationClosure closure(static_cast<size_t>(depth));
+ thread->RequestSynchronousCheckpoint(&closure);
+
+ if (closure.method == nullptr) {
+ return ERR(NO_MORE_FRAMES);
+ }
+
+ *method_ptr = art::jni::EncodeArtMethod(closure.method);
+ if (closure.method->IsNative()) {
+ *location_ptr = -1;
+ } else {
+ if (closure.dex_pc == art::DexFile::kDexNoIndex) {
+ return ERR(INTERNAL);
+ }
+ *location_ptr = static_cast<jlocation>(closure.dex_pc);
+ }
+
return ERR(NONE);
}
diff --git a/runtime/openjdkjvmti/ti_stack.h b/runtime/openjdkjvmti/ti_stack.h
index 1931ed3..6a593cf 100644
--- a/runtime/openjdkjvmti/ti_stack.h
+++ b/runtime/openjdkjvmti/ti_stack.h
@@ -32,18 +32,41 @@
#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_STACK_H_
#define ART_RUNTIME_OPENJDKJVMTI_TI_STACK_H_
+#include "jni.h"
#include "jvmti.h"
+#include "base/mutex.h"
+
namespace openjdkjvmti {
class StackUtil {
public:
+ static jvmtiError GetAllStackTraces(jvmtiEnv* env,
+ jint max_frame_count,
+ jvmtiStackInfo** stack_info_ptr,
+ jint* thread_count_ptr)
+ REQUIRES(!art::Locks::thread_list_lock_);
+
+ static jvmtiError GetFrameCount(jvmtiEnv* env, jthread thread, jint* count_ptr);
+
+ static jvmtiError GetFrameLocation(jvmtiEnv* env,
+ jthread thread,
+ jint depth,
+ jmethodID* method_ptr,
+ jlocation* location_ptr);
+
static jvmtiError GetStackTrace(jvmtiEnv* env,
jthread thread,
jint start_depth,
jint max_frame_count,
jvmtiFrameInfo* frame_buffer,
jint* count_ptr);
+
+ static jvmtiError GetThreadListStackTraces(jvmtiEnv* env,
+ jint thread_count,
+ const jthread* thread_list,
+ jint max_frame_count,
+ jvmtiStackInfo** stack_info_ptr);
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
new file mode 100644
index 0000000..2bcdd8c
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -0,0 +1,446 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "ti_thread.h"
+
+#include "art_field.h"
+#include "art_jvmti.h"
+#include "base/logging.h"
+#include "base/mutex.h"
+#include "gc/system_weak.h"
+#include "gc_root-inl.h"
+#include "jni_internal.h"
+#include "mirror/class.h"
+#include "mirror/object-inl.h"
+#include "mirror/string.h"
+#include "obj_ptr.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+#include "well_known_classes.h"
+
+namespace openjdkjvmti {
+
+jvmtiError ThreadUtil::GetCurrentThread(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread* thread_ptr) {
+ art::Thread* self = art::Thread::Current();
+
+ art::ScopedObjectAccess soa(self);
+
+ jthread thread_peer;
+ if (self->IsStillStarting()) {
+ thread_peer = nullptr;
+ } else {
+ thread_peer = soa.AddLocalReference<jthread>(self->GetPeer());
+ }
+
+ *thread_ptr = thread_peer;
+ return ERR(NONE);
+}
+
+// Read the context classloader from a Java thread object. This is a lazy implementation
+// that assumes GetThreadInfo isn't called too often. If we instead cache the ArtField,
+// we will have to add synchronization as this can't be cached on startup (which is
+// potentially runtime startup).
+static art::ObjPtr<art::mirror::Object> GetContextClassLoader(art::ObjPtr<art::mirror::Object> peer)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (peer == nullptr) {
+ return nullptr;
+ }
+ art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
+ art::ArtField* cc_field = klass->FindDeclaredInstanceField("contextClassLoader",
+ "Ljava/lang/ClassLoader;");
+ CHECK(cc_field != nullptr);
+ return cc_field->GetObject(peer);
+}
+
+// Get the native thread. The spec says a null object denotes the current thread.
+static art::Thread* GetNativeThread(jthread thread,
+ const art::ScopedObjectAccessAlreadyRunnable& soa)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (thread == nullptr) {
+ return art::Thread::Current();
+ }
+
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ return art::Thread::FromManagedThread(soa, thread);
+}
+
+jvmtiError ThreadUtil::GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr) {
+ if (info_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::ScopedObjectAccess soa(art::Thread::Current());
+
+ art::Thread* self = GetNativeThread(thread, soa);
+ if (self == nullptr && thread == nullptr) {
+ return ERR(INVALID_THREAD);
+ }
+
+ JvmtiUniquePtr name_uptr;
+ if (self != nullptr) {
+ // Have a native thread object, this thread is alive.
+ std::string name;
+ self->GetThreadName(name);
+ jvmtiError name_result = CopyString(
+ env, name.c_str(), reinterpret_cast<unsigned char**>(&info_ptr->name));
+ if (name_result != ERR(NONE)) {
+ return name_result;
+ }
+ name_uptr = MakeJvmtiUniquePtr(env, info_ptr->name);
+
+ info_ptr->priority = self->GetNativePriority();
+
+ info_ptr->is_daemon = self->IsDaemon();
+
+ art::ObjPtr<art::mirror::Object> peer = self->GetPeer();
+
+ // ThreadGroup.
+ if (peer != nullptr) {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_group);
+ CHECK(f != nullptr);
+ art::ObjPtr<art::mirror::Object> group = f->GetObject(peer);
+ info_ptr->thread_group = group == nullptr
+ ? nullptr
+ : soa.AddLocalReference<jthreadGroup>(group);
+ } else {
+ info_ptr->thread_group = nullptr;
+ }
+
+ // Context classloader.
+ art::ObjPtr<art::mirror::Object> ccl = GetContextClassLoader(peer);
+ info_ptr->context_class_loader = ccl == nullptr
+ ? nullptr
+ : soa.AddLocalReference<jobject>(ccl);
+ } else {
+ // Only the peer. This thread has either not been started, or is dead. Read things from
+ // the Java side.
+ art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread);
+
+ // Name.
+ {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_name);
+ CHECK(f != nullptr);
+ art::ObjPtr<art::mirror::Object> name = f->GetObject(peer);
+ std::string name_cpp;
+ const char* name_cstr;
+ if (name != nullptr) {
+ name_cpp = name->AsString()->ToModifiedUtf8();
+ name_cstr = name_cpp.c_str();
+ } else {
+ name_cstr = "";
+ }
+ jvmtiError name_result = CopyString(
+ env, name_cstr, reinterpret_cast<unsigned char**>(&info_ptr->name));
+ if (name_result != ERR(NONE)) {
+ return name_result;
+ }
+ name_uptr = MakeJvmtiUniquePtr(env, info_ptr->name);
+ }
+
+ // Priority.
+ {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_priority);
+ CHECK(f != nullptr);
+ info_ptr->priority = static_cast<jint>(f->GetInt(peer));
+ }
+
+ // Daemon.
+ {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_daemon);
+ CHECK(f != nullptr);
+ info_ptr->is_daemon = f->GetBoolean(peer) == 0 ? JNI_FALSE : JNI_TRUE;
+ }
+
+ // ThreadGroup.
+ {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_group);
+ CHECK(f != nullptr);
+ art::ObjPtr<art::mirror::Object> group = f->GetObject(peer);
+ info_ptr->thread_group = group == nullptr
+ ? nullptr
+ : soa.AddLocalReference<jthreadGroup>(group);
+ }
+
+ // Context classloader.
+ art::ObjPtr<art::mirror::Object> ccl = GetContextClassLoader(peer);
+ info_ptr->context_class_loader = ccl == nullptr
+ ? nullptr
+ : soa.AddLocalReference<jobject>(ccl);
+ }
+
+ name_uptr.release();
+
+ return ERR(NONE);
+}
+
+// Return the thread's (or current thread, if null) thread state. Return kStarting in case
+// there's no native counterpart (thread hasn't been started, yet, or is dead).
+static art::ThreadState GetNativeThreadState(jthread thread,
+ const art::ScopedObjectAccessAlreadyRunnable& soa,
+ art::Thread** native_thread)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::Thread* self = nullptr;
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ if (thread == nullptr) {
+ self = art::Thread::Current();
+ } else {
+ self = art::Thread::FromManagedThread(soa, thread);
+ }
+ *native_thread = self;
+ if (self == nullptr || self->IsStillStarting()) {
+ return art::ThreadState::kStarting;
+ }
+ return self->GetState();
+}
+
+static jint GetJvmtiThreadStateFromInternal(art::ThreadState internal_thread_state) {
+ jint jvmti_state = JVMTI_THREAD_STATE_ALIVE;
+
+ if (internal_thread_state == art::ThreadState::kSuspended) {
+ jvmti_state |= JVMTI_THREAD_STATE_SUSPENDED;
+ // Note: We do not have data about the previous state. Otherwise we should load the previous
+ // state here.
+ }
+
+ if (internal_thread_state == art::ThreadState::kNative) {
+ jvmti_state |= JVMTI_THREAD_STATE_IN_NATIVE;
+ }
+
+ if (internal_thread_state == art::ThreadState::kRunnable ||
+ internal_thread_state == art::ThreadState::kWaitingWeakGcRootRead ||
+ internal_thread_state == art::ThreadState::kSuspended) {
+ jvmti_state |= JVMTI_THREAD_STATE_RUNNABLE;
+ } else if (internal_thread_state == art::ThreadState::kBlocked) {
+ jvmti_state |= JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER;
+ } else {
+ // Should be in waiting state.
+ jvmti_state |= JVMTI_THREAD_STATE_WAITING;
+
+ if (internal_thread_state == art::ThreadState::kTimedWaiting ||
+ internal_thread_state == art::ThreadState::kSleeping) {
+ jvmti_state |= JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT;
+ } else {
+ jvmti_state |= JVMTI_THREAD_STATE_WAITING_INDEFINITELY;
+ }
+
+ if (internal_thread_state == art::ThreadState::kSleeping) {
+ jvmti_state |= JVMTI_THREAD_STATE_SLEEPING;
+ }
+
+ if (internal_thread_state == art::ThreadState::kTimedWaiting ||
+ internal_thread_state == art::ThreadState::kWaiting) {
+ jvmti_state |= JVMTI_THREAD_STATE_IN_OBJECT_WAIT;
+ }
+
+ // TODO: PARKED. We'll have to inspect the stack.
+ }
+
+ return jvmti_state;
+}
+
+static jint GetJavaStateFromInternal(art::ThreadState internal_thread_state) {
+ switch (internal_thread_state) {
+ case art::ThreadState::kTerminated:
+ return JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
+
+ case art::ThreadState::kRunnable:
+ case art::ThreadState::kNative:
+ case art::ThreadState::kWaitingWeakGcRootRead:
+ case art::ThreadState::kSuspended:
+ return JVMTI_JAVA_LANG_THREAD_STATE_RUNNABLE;
+
+ case art::ThreadState::kTimedWaiting:
+ case art::ThreadState::kSleeping:
+ return JVMTI_JAVA_LANG_THREAD_STATE_TIMED_WAITING;
+
+ case art::ThreadState::kBlocked:
+ return JVMTI_JAVA_LANG_THREAD_STATE_BLOCKED;
+
+ case art::ThreadState::kStarting:
+ return JVMTI_JAVA_LANG_THREAD_STATE_NEW;
+
+ case art::ThreadState::kWaiting:
+ case art::ThreadState::kWaitingForGcToComplete:
+ case art::ThreadState::kWaitingPerformingGc:
+ case art::ThreadState::kWaitingForCheckPointsToRun:
+ case art::ThreadState::kWaitingForDebuggerSend:
+ case art::ThreadState::kWaitingForDebuggerToAttach:
+ case art::ThreadState::kWaitingInMainDebuggerLoop:
+ case art::ThreadState::kWaitingForDebuggerSuspension:
+ case art::ThreadState::kWaitingForDeoptimization:
+ case art::ThreadState::kWaitingForGetObjectsAllocated:
+ case art::ThreadState::kWaitingForJniOnLoad:
+ case art::ThreadState::kWaitingForSignalCatcherOutput:
+ case art::ThreadState::kWaitingInMainSignalCatcherLoop:
+ case art::ThreadState::kWaitingForMethodTracingStart:
+ case art::ThreadState::kWaitingForVisitObjects:
+ case art::ThreadState::kWaitingForGcThreadFlip:
+ return JVMTI_JAVA_LANG_THREAD_STATE_WAITING;
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+}
+
+jvmtiError ThreadUtil::GetThreadState(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jthread thread,
+ jint* thread_state_ptr) {
+ if (thread_state_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::Thread* native_thread = nullptr;
+ art::ThreadState internal_thread_state = GetNativeThreadState(thread, soa, &native_thread);
+
+ if (internal_thread_state == art::ThreadState::kStarting) {
+ if (thread == nullptr) {
+ // No native thread, and no Java thread? We must be starting up. Report as wrong phase.
+ return ERR(WRONG_PHASE);
+ }
+
+ // Need to read the Java "started" field to know whether this is starting or terminated.
+ art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread);
+ art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
+ art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
+ CHECK(started_field != nullptr);
+ bool started = started_field->GetBoolean(peer) != 0;
+ constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
+ constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
+ JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
+ *thread_state_ptr = started ? kTerminatedState : kStartedState;
+ return ERR(NONE);
+ }
+ DCHECK(native_thread != nullptr);
+
+ // Translate internal thread state to JVMTI and Java state.
+ jint jvmti_state = GetJvmtiThreadStateFromInternal(internal_thread_state);
+ if (native_thread->IsInterrupted()) {
+ jvmti_state |= JVMTI_THREAD_STATE_INTERRUPTED;
+ }
+
+ // Java state is derived from nativeGetState.
+ // Note: Our implementation assigns "runnable" to suspended. As such, we will have slightly
+ // different mask. However, this is for consistency with the Java view.
+ jint java_state = GetJavaStateFromInternal(internal_thread_state);
+
+ *thread_state_ptr = jvmti_state | java_state;
+
+ return ERR(NONE);
+}
+
+jvmtiError ThreadUtil::GetAllThreads(jvmtiEnv* env,
+ jint* threads_count_ptr,
+ jthread** threads_ptr) {
+ if (threads_count_ptr == nullptr || threads_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::Thread* current = art::Thread::Current();
+
+ art::ScopedObjectAccess soa(current);
+
+ art::MutexLock mu(current, *art::Locks::thread_list_lock_);
+ std::list<art::Thread*> thread_list = art::Runtime::Current()->GetThreadList()->GetList();
+
+ std::vector<art::ObjPtr<art::mirror::Object>> peers;
+
+ for (art::Thread* thread : thread_list) {
+ // Skip threads that are still starting.
+ if (thread->IsStillStarting()) {
+ continue;
+ }
+
+ art::ObjPtr<art::mirror::Object> peer = thread->GetPeer();
+ if (peer != nullptr) {
+ peers.push_back(peer);
+ }
+ }
+
+ if (peers.empty()) {
+ *threads_count_ptr = 0;
+ *threads_ptr = nullptr;
+ } else {
+ unsigned char* data;
+ jvmtiError data_result = env->Allocate(peers.size() * sizeof(jthread), &data);
+ if (data_result != ERR(NONE)) {
+ return data_result;
+ }
+ jthread* threads = reinterpret_cast<jthread*>(data);
+ for (size_t i = 0; i != peers.size(); ++i) {
+ threads[i] = soa.AddLocalReference<jthread>(peers[i]);
+ }
+
+ *threads_count_ptr = static_cast<jint>(peers.size());
+ *threads_ptr = threads;
+ }
+ return ERR(NONE);
+}
+
+jvmtiError ThreadUtil::SetThreadLocalStorage(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jthread thread,
+ const void* data) {
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::Thread* self = GetNativeThread(thread, soa);
+ if (self == nullptr && thread == nullptr) {
+ return ERR(INVALID_THREAD);
+ }
+ if (self == nullptr) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+
+ self->SetCustomTLS(data);
+
+ return ERR(NONE);
+}
+
+jvmtiError ThreadUtil::GetThreadLocalStorage(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jthread thread,
+ void** data_ptr) {
+ if (data_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::Thread* self = GetNativeThread(thread, soa);
+ if (self == nullptr && thread == nullptr) {
+ return ERR(INVALID_THREAD);
+ }
+ if (self == nullptr) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+
+ *data_ptr = const_cast<void*>(self->GetCustomTLS());
+ return ERR(NONE);
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_thread.h b/runtime/openjdkjvmti/ti_thread.h
new file mode 100644
index 0000000..290e9d4
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_thread.h
@@ -0,0 +1,56 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_THREAD_H_
+#define ART_RUNTIME_OPENJDKJVMTI_TI_THREAD_H_
+
+#include "jni.h"
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+class ThreadUtil {
+ public:
+ static jvmtiError GetAllThreads(jvmtiEnv* env, jint* threads_count_ptr, jthread** threads_ptr);
+
+ static jvmtiError GetCurrentThread(jvmtiEnv* env, jthread* thread_ptr);
+
+ static jvmtiError GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr);
+
+ static jvmtiError GetThreadState(jvmtiEnv* env, jthread thread, jint* thread_state_ptr);
+
+ static jvmtiError SetThreadLocalStorage(jvmtiEnv* env, jthread thread, const void* data);
+ static jvmtiError GetThreadLocalStorage(jvmtiEnv* env, jthread thread, void** data_ptr);
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_TI_THREAD_H_
diff --git a/runtime/openjdkjvmti/ti_timers.cc b/runtime/openjdkjvmti/ti_timers.cc
new file mode 100644
index 0000000..ce4f551
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_timers.cc
@@ -0,0 +1,81 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "ti_timers.h"
+
+#include <limits>
+
+#include <time.h>
+#include <unistd.h>
+
+#include "art_jvmti.h"
+#include "base/macros.h"
+
+namespace openjdkjvmti {
+
+jvmtiError TimerUtil::GetAvailableProcessors(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jint* processor_count_ptr) {
+ if (processor_count_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ *processor_count_ptr = static_cast<jint>(sysconf(_SC_NPROCESSORS_CONF));
+
+ return ERR(NONE);
+}
+
+jvmtiError TimerUtil::GetTimerInfo(jvmtiEnv* env ATTRIBUTE_UNUSED, jvmtiTimerInfo* info_ptr) {
+ if (info_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ info_ptr->max_value = static_cast<jlong>(std::numeric_limits<uint64_t>::max());
+ info_ptr->may_skip_forward = JNI_TRUE;
+ info_ptr->may_skip_backward = JNI_TRUE;
+ info_ptr->kind = jvmtiTimerKind::JVMTI_TIMER_ELAPSED;
+
+ return ERR(NONE);
+}
+
+jvmtiError TimerUtil::GetTime(jvmtiEnv* env ATTRIBUTE_UNUSED, jlong* nanos_ptr) {
+ if (nanos_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ // Use the same implementation as System.nanoTime.
+ struct timespec now;
+ clock_gettime(CLOCK_MONOTONIC, &now);
+ *nanos_ptr = now.tv_sec * 1000000000LL + now.tv_nsec;
+
+ return ERR(NONE);
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_timers.h b/runtime/openjdkjvmti/ti_timers.h
new file mode 100644
index 0000000..6300678
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_timers.h
@@ -0,0 +1,51 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_TIMERS_H_
+#define ART_RUNTIME_OPENJDKJVMTI_TI_TIMERS_H_
+
+#include "jni.h"
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+class TimerUtil {
+ public:
+ static jvmtiError GetAvailableProcessors(jvmtiEnv* env, jint* processor_count_ptr);
+
+ static jvmtiError GetTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr);
+
+ static jvmtiError GetTime(jvmtiEnv* env, jlong* nanos_ptr);
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_TI_TIMERS_H_
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index e1022b0..a72159b 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -763,8 +763,6 @@
"(Enable new and experimental agent support)\n");
UsageMessage(stream, " -Xexperimental:agents"
"(Enable new and experimental agent support)\n");
- UsageMessage(stream, " -Xexperimental:method-handles"
- "(Enable new and experimental method handles support)\n");
UsageMessage(stream, "\n");
UsageMessage(stream, "The following previously supported Dalvik options are ignored:\n");
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2086d70..55e1852 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1364,6 +1364,34 @@
return true;
}
+static bool EnsureJvmtiPlugin(Runtime* runtime,
+ std::vector<Plugin>* plugins,
+ std::string* error_msg) {
+ constexpr const char* plugin_name = kIsDebugBuild ? "libopenjdkjvmtid.so" : "libopenjdkjvmti.so";
+
+ // Is the plugin already loaded?
+ for (Plugin p : *plugins) {
+ if (p.GetLibrary() == plugin_name) {
+ return true;
+ }
+ }
+
+ // Is the process debuggable? Otherwise, do not attempt to load the plugin.
+ if (!runtime->IsDebuggable()) {
+ *error_msg = "Process is not debuggable.";
+ return false;
+ }
+
+ Plugin new_plugin = Plugin::Create(plugin_name);
+
+ if (!new_plugin.Load(error_msg)) {
+ return false;
+ }
+
+ plugins->push_back(std::move(new_plugin));
+ return true;
+}
+
// Attach a new agent and add it to the list of runtime agents
//
// TODO: once we decide on the threading model for agents,
@@ -1371,18 +1399,25 @@
// (and we synchronize access to any shared data structures like "agents_")
//
void Runtime::AttachAgent(const std::string& agent_arg) {
+ std::string error_msg;
+ if (!EnsureJvmtiPlugin(this, &plugins_, &error_msg)) {
+ LOG(WARNING) << "Could not load plugin: " << error_msg;
+ ScopedObjectAccess soa(Thread::Current());
+ ThrowIOException("%s", error_msg.c_str());
+ return;
+ }
+
ti::Agent agent(agent_arg);
int res = 0;
- std::string err;
- ti::Agent::LoadError result = agent.Attach(&res, &err);
+ ti::Agent::LoadError result = agent.Attach(&res, &error_msg);
if (result == ti::Agent::kNoError) {
agents_.push_back(std::move(agent));
} else {
- LOG(ERROR) << "Agent attach failed (result=" << result << ") : " << err;
+ LOG(WARNING) << "Agent attach failed (result=" << result << ") : " << error_msg;
ScopedObjectAccess soa(Thread::Current());
- ThrowWrappedIOException("%s", err.c_str());
+ ThrowIOException("%s", error_msg.c_str());
}
}
@@ -2195,6 +2230,8 @@
gc::ScopedGCCriticalSection gcs(Thread::Current(),
gc::kGcCauseAddRemoveSystemWeakHolder,
gc::kCollectorTypeAddRemoveSystemWeakHolder);
+ // Note: The ScopedGCCriticalSection also ensures that the rest of the function is in
+ // a critical section.
system_weak_holders_.push_back(holder);
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 8fc211c..a87e1c1 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -304,7 +304,7 @@
}
bool IsMethodHandlesEnabled() const {
- return experimental_flags_ & ExperimentalFlags::kMethodHandles;
+ return true;
}
void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index d1970fe..ecabf9a 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -117,7 +117,7 @@
RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
RUNTIME_OPTIONS_KEY (std::string, CpuAbiList)
RUNTIME_OPTIONS_KEY (std::string, Fingerprint)
-RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{none, agents, method-handles}
+RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{none, agents}
RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentLib) // -agentlib:<libname>=<options>, Requires -Xexperimental:agents
RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentPath) // -agentpath:<libname>=<options>, Requires -Xexperimental:agents
RUNTIME_OPTIONS_KEY (std::vector<Plugin>, Plugins) // -Xplugin:<library> Requires -Xexperimental:runtime-plugins
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index a7e7c21..9ebf9a7 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -18,8 +18,9 @@
#include <stdint.h>
+#include "art_method.h"
#include "indenter.h"
-#include "invoke_type.h"
+#include "scoped_thread_state_change-inl.h"
namespace art {
@@ -106,7 +107,7 @@
<< "InlineInfoEncoding"
<< " (method_index_bit_offset=" << static_cast<uint32_t>(kMethodIndexBitOffset)
<< ", dex_pc_bit_offset=" << static_cast<uint32_t>(dex_pc_bit_offset_)
- << ", invoke_type_bit_offset=" << static_cast<uint32_t>(invoke_type_bit_offset_)
+ << ", extra_data_bit_offset=" << static_cast<uint32_t>(extra_data_bit_offset_)
<< ", dex_register_map_bit_offset=" << static_cast<uint32_t>(dex_register_map_bit_offset_)
<< ", total_bit_size=" << static_cast<uint32_t>(total_bit_size_)
<< ")\n";
@@ -230,12 +231,16 @@
vios->Stream()
<< " At depth " << i
<< std::hex
- << " (dex_pc=0x" << GetDexPcAtDepth(inline_info_encoding, i)
- << std::dec
- << ", method_index=" << GetMethodIndexAtDepth(inline_info_encoding, i)
- << ", invoke_type=" << static_cast<InvokeType>(GetInvokeTypeAtDepth(inline_info_encoding,
- i))
- << ")\n";
+ << " (dex_pc=0x" << GetDexPcAtDepth(inline_info_encoding, i);
+ if (EncodesArtMethodAtDepth(inline_info_encoding, i)) {
+ ScopedObjectAccess soa(Thread::Current());
+ vios->Stream() << ", method=" << GetArtMethodAtDepth(inline_info_encoding, i)->PrettyMethod();
+ } else {
+ vios->Stream()
+ << std::dec
+ << ", method_index=" << GetMethodIndexAtDepth(inline_info_encoding, i);
+ }
+ vios->Stream() << ")\n";
if (HasDexRegisterMapAtDepth(inline_info_encoding, i) && (number_of_dex_registers != nullptr)) {
CodeInfoEncoding encoding = code_info.ExtractEncoding();
DexRegisterMap dex_register_map =
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 5e556be..15d7816 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -35,6 +35,7 @@
// Size of Dex virtual registers.
static constexpr size_t kVRegSize = 4;
+class ArtMethod;
class CodeInfo;
class StackMapEncoding;
struct CodeInfoEncoding;
@@ -887,7 +888,7 @@
public:
void SetFromSizes(size_t method_index_max,
size_t dex_pc_max,
- size_t invoke_type_max,
+ size_t extra_data_max,
size_t dex_register_map_size) {
total_bit_size_ = kMethodIndexBitOffset;
total_bit_size_ += MinimumBitsToStore(method_index_max);
@@ -899,8 +900,8 @@
total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
}
- invoke_type_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
- total_bit_size_ += MinimumBitsToStore(invoke_type_max);
+ extra_data_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
+ total_bit_size_ += MinimumBitsToStore(extra_data_max);
// We also need +1 for kNoDexRegisterMap, but since the size is strictly
// greater than any offset we might try to encode, we already implicitly have it.
@@ -912,10 +913,10 @@
return FieldEncoding(kMethodIndexBitOffset, dex_pc_bit_offset_);
}
ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const {
- return FieldEncoding(dex_pc_bit_offset_, invoke_type_bit_offset_, -1 /* min_value */);
+ return FieldEncoding(dex_pc_bit_offset_, extra_data_bit_offset_, -1 /* min_value */);
}
- ALWAYS_INLINE FieldEncoding GetInvokeTypeEncoding() const {
- return FieldEncoding(invoke_type_bit_offset_, dex_register_map_bit_offset_);
+ ALWAYS_INLINE FieldEncoding GetExtraDataEncoding() const {
+ return FieldEncoding(extra_data_bit_offset_, dex_register_map_bit_offset_);
}
ALWAYS_INLINE FieldEncoding GetDexRegisterMapEncoding() const {
return FieldEncoding(dex_register_map_bit_offset_, total_bit_size_, -1 /* min_value */);
@@ -930,7 +931,7 @@
static constexpr uint8_t kIsLastBitOffset = 0;
static constexpr uint8_t kMethodIndexBitOffset = 1;
uint8_t dex_pc_bit_offset_;
- uint8_t invoke_type_bit_offset_;
+ uint8_t extra_data_bit_offset_;
uint8_t dex_register_map_bit_offset_;
uint8_t total_bit_size_;
};
@@ -938,7 +939,11 @@
/**
* Inline information for a specific PC. The information is of the form:
*
- * [is_last, method_index, dex_pc, invoke_type, dex_register_map_offset]+.
+ * [is_last,
+ * method_index (or ArtMethod high bits),
+ * dex_pc,
+ * extra_data (ArtMethod low bits or 1),
+ * dex_register_map_offset]+.
*/
class InlineInfo {
public:
@@ -960,6 +965,7 @@
ALWAYS_INLINE uint32_t GetMethodIndexAtDepth(const InlineInfoEncoding& encoding,
uint32_t depth) const {
+ DCHECK(!EncodesArtMethodAtDepth(encoding, depth));
return encoding.GetMethodIndexEncoding().Load(GetRegionAtDepth(encoding, depth));
}
@@ -980,15 +986,28 @@
encoding.GetDexPcEncoding().Store(GetRegionAtDepth(encoding, depth), dex_pc);
}
- ALWAYS_INLINE uint32_t GetInvokeTypeAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- return encoding.GetInvokeTypeEncoding().Load(GetRegionAtDepth(encoding, depth));
+ ALWAYS_INLINE bool EncodesArtMethodAtDepth(const InlineInfoEncoding& encoding,
+ uint32_t depth) const {
+ return (encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth)) & 1) == 0;
}
- ALWAYS_INLINE void SetInvokeTypeAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth,
- uint32_t invoke_type) {
- encoding.GetInvokeTypeEncoding().Store(GetRegionAtDepth(encoding, depth), invoke_type);
+ ALWAYS_INLINE void SetExtraDataAtDepth(const InlineInfoEncoding& encoding,
+ uint32_t depth,
+ uint32_t extra_data) {
+ encoding.GetExtraDataEncoding().Store(GetRegionAtDepth(encoding, depth), extra_data);
+ }
+
+ ALWAYS_INLINE ArtMethod* GetArtMethodAtDepth(const InlineInfoEncoding& encoding,
+ uint32_t depth) const {
+ uint32_t low_bits = encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth));
+ uint32_t high_bits = encoding.GetMethodIndexEncoding().Load(GetRegionAtDepth(encoding, depth));
+ if (high_bits == 0) {
+ return reinterpret_cast<ArtMethod*>(low_bits);
+ } else {
+ uint64_t address = high_bits;
+ address = address << 32;
+ return reinterpret_cast<ArtMethod*>(address | low_bits);
+ }
}
ALWAYS_INLINE uint32_t GetDexRegisterMapOffsetAtDepth(const InlineInfoEncoding& encoding,
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a4f0631..40b6d73 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2629,10 +2629,9 @@
QUICK_ENTRY_POINT_INFO(pAllocArray)
QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck)
- QUICK_ENTRY_POINT_INFO(pAllocObject)
QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
- QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck)
+ QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks)
QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray)
QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck)
QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes)
@@ -2728,6 +2727,7 @@
QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck)
QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck)
QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck)
+ QUICK_ENTRY_POINT_INFO(pInvokePolymorphic)
QUICK_ENTRY_POINT_INFO(pTestSuspend)
QUICK_ENTRY_POINT_INFO(pDeliverException)
QUICK_ENTRY_POINT_INFO(pThrowArrayBounds)
diff --git a/runtime/thread.h b/runtime/thread.h
index c7acfc4..2b451bc 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1140,6 +1140,14 @@
return debug_disallow_read_barrier_;
}
+ const void* GetCustomTLS() const {
+ return custom_tls_;
+ }
+
+ void SetCustomTLS(const void* data) {
+ custom_tls_ = data;
+ }
+
// Returns true if the current thread is the jit sensitive thread.
bool IsJitSensitiveThread() const {
return this == jit_sensitive_thread_;
@@ -1418,7 +1426,7 @@
stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
- thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_start(nullptr),
+ thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr),
mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
thread_local_alloc_stack_end(nullptr), nested_signal_state(nullptr),
@@ -1537,20 +1545,21 @@
// to avoid additional cost of a mutex and a condition variable, as used in art::Barrier.
AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers];
- // Entrypoint function pointers.
- // TODO: move this to more of a global offset table model to avoid per-thread duplication.
- JniEntryPoints jni_entrypoints;
- QuickEntryPoints quick_entrypoints;
+ // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
+ uint8_t* thread_local_start;
// thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
// potentially better performance.
uint8_t* thread_local_pos;
uint8_t* thread_local_end;
- // Thread-local allocation pointer.
- uint8_t* thread_local_start;
size_t thread_local_objects;
+ // Entrypoint function pointers.
+ // TODO: move this to more of a global offset table model to avoid per-thread duplication.
+ JniEntryPoints jni_entrypoints;
+ QuickEntryPoints quick_entrypoints;
+
// Mterp jump table bases.
void* mterp_current_ibase;
void* mterp_default_ibase;
@@ -1599,6 +1608,10 @@
// Pending extra checkpoints if checkpoint_function_ is already used.
std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
+ // Custom TLS field that can be used by plugins.
+ // TODO: Generalize once we have more plugins.
+ const void* custom_tls_;
+
// True if the thread is allowed to call back into java (for e.g. during class resolution).
// By default this is true.
bool can_call_into_java_;
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 3b114a9..bb9844a 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -61,7 +61,7 @@
private:
static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' };
- static constexpr uint8_t kVdexVersion[] = { '0', '0', '1', '\0' };
+ static constexpr uint8_t kVdexVersion[] = { '0', '0', '2', '\0' }; // Handle verify-profile
uint8_t magic_[4];
uint8_t version_[4];
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 715b237..25a179b 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -3106,19 +3106,16 @@
break;
}
const uint32_t proto_idx = (is_range) ? inst->VRegH_4rcc() : inst->VRegH_45cc();
- const char* descriptor =
+ const char* return_descriptor =
dex_file_->GetReturnTypeDescriptor(dex_file_->GetProtoId(proto_idx));
const RegType& return_type =
- reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
+ reg_types_.FromDescriptor(GetClassLoader(), return_descriptor, false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(this, return_type);
} else {
work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(®_types_));
}
- // TODO(oth): remove when compiler support is available.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER)
- << "invoke-polymorphic is not supported by compiler";
- have_pending_experimental_failure_ = true;
+ just_set_result = true;
break;
}
case Instruction::NEG_INT:
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index a5b275c..507ea16 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -103,6 +103,7 @@
jmethodID WellKnownClasses::java_lang_reflect_Proxy_invoke;
jmethodID WellKnownClasses::java_lang_Runtime_nativeLoad;
jmethodID WellKnownClasses::java_lang_Short_valueOf;
+jmethodID WellKnownClasses::java_lang_String_charAt;
jmethodID WellKnownClasses::java_lang_System_runFinalization = nullptr;
jmethodID WellKnownClasses::java_lang_Thread_dispatchUncaughtException;
jmethodID WellKnownClasses::java_lang_Thread_init;
@@ -337,6 +338,7 @@
java_lang_ref_ReferenceQueue_add = CacheMethod(env, java_lang_ref_ReferenceQueue.get(), true, "add", "(Ljava/lang/ref/Reference;)V");
java_lang_reflect_Parameter_init = CacheMethod(env, java_lang_reflect_Parameter, false, "<init>", "(Ljava/lang/String;ILjava/lang/reflect/Executable;I)V");
+ java_lang_String_charAt = CacheMethod(env, java_lang_String, false, "charAt", "(I)C");
java_lang_Thread_dispatchUncaughtException = CacheMethod(env, java_lang_Thread, false, "dispatchUncaughtException", "(Ljava/lang/Throwable;)V");
java_lang_Thread_init = CacheMethod(env, java_lang_Thread, false, "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
java_lang_Thread_run = CacheMethod(env, java_lang_Thread, false, "run", "()V");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 371be61..b3ce3d1 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -113,6 +113,7 @@
static jmethodID java_lang_reflect_Proxy_invoke;
static jmethodID java_lang_Runtime_nativeLoad;
static jmethodID java_lang_Short_valueOf;
+ static jmethodID java_lang_String_charAt;
static jmethodID java_lang_System_runFinalization;
static jmethodID java_lang_Thread_dispatchUncaughtException;
static jmethodID java_lang_Thread_init;
diff --git a/test/080-oom-throw/expected.txt b/test/080-oom-throw/expected.txt
index 904393b..0967278 100644
--- a/test/080-oom-throw/expected.txt
+++ b/test/080-oom-throw/expected.txt
@@ -1,3 +1,4 @@
Test reflection correctly threw
+Test reflection2 correctly threw
NEW_ARRAY correctly threw OOME
NEW_INSTANCE correctly threw OOME
diff --git a/test/080-oom-throw/src/Main.java b/test/080-oom-throw/src/Main.java
index 0ae92a9..a6c18b7 100644
--- a/test/080-oom-throw/src/Main.java
+++ b/test/080-oom-throw/src/Main.java
@@ -53,6 +53,30 @@
}
}
+ public static Object eatAllMemory() {
+ Object[] result = null;
+ int size = 1000000;
+ while (result == null && size != 0) {
+ try {
+ result = new Object[size];
+ } catch (OutOfMemoryError oome) {
+ size /= 2;
+ }
+ }
+ if (result != null) {
+ int index = 0;
+ while (index != result.length && size != 0) {
+ try {
+ result[index] = new byte[size];
+ ++index;
+ } catch (OutOfMemoryError oome) {
+ size /= 2;
+ }
+ }
+ }
+ return result;
+ }
+
static boolean triggerArrayOOM() {
ArrayMemEater.blowup(new char[128 * 1024][]);
return ArrayMemEater.sawOome;
@@ -74,6 +98,9 @@
if (triggerReflectionOOM()) {
System.out.println("Test reflection correctly threw");
}
+ if (triggerReflectionOOM2()) {
+ System.out.println("Test reflection2 correctly threw");
+ }
if (triggerArrayOOM()) {
System.out.println("NEW_ARRAY correctly threw OOME");
@@ -125,4 +152,20 @@
}
return true;
}
+
+ static boolean triggerReflectionOOM2() {
+ Object memory = eatAllMemory();
+ boolean result = false;
+ try {
+ Main.class.getDeclaredMethods();
+ } catch (OutOfMemoryError e) {
+ result = true;
+ }
+ if (!result) {
+ boolean memoryWasAllocated = (memory != null);
+ memory = null;
+ System.out.println("memoryWasAllocated = " + memoryWasAllocated);
+ }
+ return result;
+ }
}
diff --git a/test/129-ThreadGetId/expected.txt b/test/129-ThreadGetId/expected.txt
index 134d8d0..aadf90d 100644
--- a/test/129-ThreadGetId/expected.txt
+++ b/test/129-ThreadGetId/expected.txt
@@ -1 +1,2 @@
+HeapTaskDaemon depth 0
Finishing
diff --git a/test/129-ThreadGetId/src/Main.java b/test/129-ThreadGetId/src/Main.java
index 9934bba..6ba01ff 100644
--- a/test/129-ThreadGetId/src/Main.java
+++ b/test/129-ThreadGetId/src/Main.java
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+import java.lang.reflect.Field;
import java.util.Map;
public class Main implements Runnable {
@@ -22,6 +23,7 @@
public static void main(String[] args) throws Exception {
final Thread[] threads = new Thread[numberOfThreads];
+ test_getStackTraces();
for (int t = 0; t < threads.length; t++) {
threads[t] = new Thread(new Main());
threads[t].start();
@@ -32,6 +34,43 @@
System.out.println("Finishing");
}
+ static Thread getHeapTaskDaemon() throws Exception {
+ Field f = ThreadGroup.class.getDeclaredField("systemThreadGroup");
+ f.setAccessible(true);
+ ThreadGroup systemThreadGroup = (ThreadGroup) f.get(null);
+
+ while (true) {
+ int activeCount = systemThreadGroup.activeCount();
+ Thread[] array = new Thread[activeCount];
+ systemThreadGroup.enumerate(array);
+ for (Thread thread : array) {
+ if (thread.getName().equals("HeapTaskDaemon") &&
+ thread.getState() != Thread.State.NEW) {
+ return thread;
+ }
+ }
+ // Yield to eventually get the daemon started.
+ Thread.sleep(10);
+ }
+ }
+
+ static void test_getStackTraces() throws Exception {
+ Thread heapDaemon = getHeapTaskDaemon();
+
+ // Force a GC to ensure the daemon truly started.
+ Runtime.getRuntime().gc();
+ // Check all the current threads for positive IDs.
+ Map<Thread, StackTraceElement[]> map = Thread.getAllStackTraces();
+ for (Map.Entry<Thread, StackTraceElement[]> pair : map.entrySet()) {
+ Thread thread = pair.getKey();
+ // Expect empty stack trace since we do not support suspending the GC thread for
+ // obtaining stack traces. See b/28261069.
+ if (thread == heapDaemon) {
+ System.out.println(thread.getName() + " depth " + pair.getValue().length);
+ }
+ }
+ }
+
public void test_getId() {
if (Thread.currentThread().getId() <= 0) {
System.out.println("current thread's ID is not positive");
diff --git a/test/494-checker-instanceof-tests/src/Main.java b/test/494-checker-instanceof-tests/src/Main.java
index 2eac6c9..acd2305 100644
--- a/test/494-checker-instanceof-tests/src/Main.java
+++ b/test/494-checker-instanceof-tests/src/Main.java
@@ -142,11 +142,11 @@
/// CHECK: LoadClass
/// CHECK: Return [<<Const>>]
public static boolean knownTestWithUnloadedClass() {
- return $inline$returnMain() instanceof String;
+ return $inline$returnUnrelated() instanceof String;
}
- public static Object $inline$returnMain() {
- return new Main();
+ public static Object $inline$returnUnrelated() {
+ return new Unrelated();
}
public static void expect(boolean expected, boolean actual) {
diff --git a/test/496-checker-inlining-class-loader/src/Main.java b/test/496-checker-inlining-class-loader/src/Main.java
index 15d4dc0..5deb77f 100644
--- a/test/496-checker-inlining-class-loader/src/Main.java
+++ b/test/496-checker-inlining-class-loader/src/Main.java
@@ -82,10 +82,10 @@
class LoadedByMyClassLoader {
/// CHECK-START: void LoadedByMyClassLoader.bar() inliner (before)
- /// CHECK: LoadClass
+ /// CHECK: LoadClass class_name:FirstSeenByMyClassLoader
/// CHECK-NEXT: ClinitCheck
/// CHECK-NEXT: InvokeStaticOrDirect
- /// CHECK-NEXT: LoadClass
+ /// CHECK-NEXT: LoadClass class_name:java.lang.System
/// CHECK-NEXT: ClinitCheck
/// CHECK-NEXT: StaticFieldGet
/// CHECK-NEXT: LoadString
@@ -93,10 +93,10 @@
/// CHECK-NEXT: InvokeVirtual
/// CHECK-START: void LoadedByMyClassLoader.bar() inliner (after)
- /// CHECK: LoadClass
+ /// CHECK: LoadClass class_name:FirstSeenByMyClassLoader
/// CHECK-NEXT: ClinitCheck
/* We inlined FirstSeenByMyClassLoader.$inline$bar */
- /// CHECK-NEXT: LoadClass
+ /// CHECK-NEXT: LoadClass class_name:java.lang.System
/// CHECK-NEXT: ClinitCheck
/// CHECK-NEXT: StaticFieldGet
/// CHECK-NEXT: LoadString
@@ -105,12 +105,15 @@
/// CHECK-START: void LoadedByMyClassLoader.bar() register (before)
/* Load and initialize FirstSeenByMyClassLoader */
- /// CHECK: LoadClass gen_clinit_check:true
+ /// CHECK: LoadClass class_name:FirstSeenByMyClassLoader gen_clinit_check:true
/* Load and initialize System */
// There may be MipsComputeBaseMethodAddress here.
- /// CHECK: LoadClass gen_clinit_check:true
- /// CHECK-NEXT: StaticFieldGet
- // There may be HArmDexCacheArraysBase or HX86ComputeBaseMethodAddress here.
+ /// CHECK: LoadClass class_name:java.lang.System
+ // The ClinitCheck may (PIC) or may not (non-PIC) be merged into the LoadClass.
+ // (The merging checks for environment match but HLoadClass/kBootImageAddress
+ // used for non-PIC mode does not have an environment at all.)
+ /// CHECK: StaticFieldGet
+ // There may be HX86ComputeBaseMethodAddress or MipsComputeBaseMethodAddress here.
/// CHECK: LoadString
/// CHECK-NEXT: NullCheck
/// CHECK-NEXT: InvokeVirtual
diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java
index 5fd51e1..89b9cb4 100644
--- a/test/529-checker-unresolved/src/Main.java
+++ b/test/529-checker-unresolved/src/Main.java
@@ -192,13 +192,13 @@
/// CHECK-START: void Main.testLicm(int) licm (before)
/// CHECK: <<Class:l\d+>> LoadClass loop:B2
/// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:B2
- /// CHECK-NEXT: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2
+ /// CHECK-NEXT: <<New:l\d+>> NewInstance [<<Clinit>>] loop:B2
/// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2
/// CHECK-START: void Main.testLicm(int) licm (after)
/// CHECK: <<Class:l\d+>> LoadClass loop:none
/// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:none
- /// CHECK: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2
+ /// CHECK: <<New:l\d+>> NewInstance [<<Clinit>>] loop:B2
/// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2
static public void testLicm(int count) {
// Test to make sure we keep the initialization check after loading an unresolved class.
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index fe6ff13..db43768 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -331,32 +331,32 @@
/// CHECK-START-X86: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
/// CHECK-START-X86_64: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
/// CHECK-START-ARM: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
/// CHECK-START-ARM64: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
/// CHECK-START-MIPS: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
/// CHECK-START-MIPS64: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
public static Class<?> $noinline$getStringClass() {
// Prevent inlining to avoid the string comparison being optimized away.
@@ -369,34 +369,34 @@
/// CHECK: LoadClass load_kind:DexCacheViaMethod class_name:Other
/// CHECK-START-X86: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
- /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-X86: java.lang.Class Main.$noinline$getOtherClass() pc_relative_fixups_x86 (after)
/// CHECK-DAG: X86ComputeBaseMethodAddress
- /// CHECK-DAG: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK-DAG: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-X86_64: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
- /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-ARM: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
- /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-ARM: java.lang.Class Main.$noinline$getOtherClass() dex_cache_array_fixups_arm (after)
/// CHECK-DAG: ArmDexCacheArraysBase
- /// CHECK-DAG: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK-DAG: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-ARM64: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
- /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-MIPS: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
- /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-MIPS: java.lang.Class Main.$noinline$getOtherClass() dex_cache_array_fixups_mips (after)
/// CHECK-DAG: MipsDexCacheArraysBase
- /// CHECK-DAG: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK-DAG: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-MIPS64: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
- /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK: LoadClass load_kind:BssEntry class_name:Other
public static Class<?> $noinline$getOtherClass() {
// Prevent inlining to avoid the string comparison being optimized away.
diff --git a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
index af43973..a30a11a 100644
--- a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
+++ b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
@@ -196,7 +196,7 @@
const-class v0, LMain;
if-ne v0, v2, :exit
:other_loop_entry
- const-class v1, Ljava/lang/Class; # LoadClass that can throw
+ const-class v1, LOther; # LoadClass that can throw
goto :loop_entry
:exit
return-object v0
@@ -250,7 +250,7 @@
const/4 v0, 0
if-ne p0, v0, :other_loop_entry
:loop_entry
- const-class v1, Ljava/lang/Class; # LoadClass that can throw
+ const-class v1, LOther; # LoadClass that can throw
if-ne v0, p0, :exit
:other_loop_entry
sub-int v1, p0, p0
@@ -286,7 +286,7 @@
.method public static licm3(III)I
.registers 4
:loop_entry
- const-class v0, Ljava/lang/Class; # LoadClass that can throw
+ const-class v0, LOther; # LoadClass that can throw
if-ne p1, p2, :exit
goto :loop_body
diff --git a/test/559-checker-irreducible-loop/src/Main.java b/test/559-checker-irreducible-loop/src/Main.java
index ab84f81..023e769 100644
--- a/test/559-checker-irreducible-loop/src/Main.java
+++ b/test/559-checker-irreducible-loop/src/Main.java
@@ -67,3 +67,6 @@
int myField;
}
+
+class Other {
+}
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 00c1b02..b75becf 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -35,8 +35,9 @@
OatQuickMethodHeader* header = nullptr;
// Infinite loop... Test harness will have its own timeout.
while (true) {
- header = OatQuickMethodHeader::FromEntryPoint(method->GetEntryPointFromQuickCompiledCode());
- if (code_cache->ContainsPc(header->GetCode())) {
+ const void* pc = method->GetEntryPointFromQuickCompiledCode();
+ if (code_cache->ContainsPc(pc)) {
+ header = OatQuickMethodHeader::FromEntryPoint(pc);
break;
} else {
// Sleep to yield to the compiler thread.
diff --git a/test/621-checker-new-instance/info.txt b/test/621-checker-new-instance/info.txt
deleted file mode 100644
index c27c45c..0000000
--- a/test/621-checker-new-instance/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Tests for removing useless load class.
diff --git a/test/621-checker-new-instance/src/Main.java b/test/621-checker-new-instance/src/Main.java
deleted file mode 100644
index 68a4644..0000000
--- a/test/621-checker-new-instance/src/Main.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
- /// CHECK-START: java.lang.Object Main.newObject() prepare_for_register_allocation (before)
- /// CHECK: LoadClass
- /// CHECK: NewInstance
-
- /// CHECK-START: java.lang.Object Main.newObject() prepare_for_register_allocation (after)
- /// CHECK-NOT: LoadClass
- /// CHECK: NewInstance
- public static Object newObject() {
- return new Object();
- }
-
- /// CHECK-START: java.lang.Object Main.newFinalizableMayThrow() prepare_for_register_allocation (after)
- /// CHECK: LoadClass
- /// CHECK: NewInstance
- public static Object newFinalizableMayThrow() {
- return $inline$newFinalizableMayThrow();
- }
-
- public static Object $inline$newFinalizableMayThrow() {
- return new FinalizableMayThrow();
- }
-
- public static void main(String[] args) {
- newFinalizableMayThrow();
- newObject();
- }
-}
-
-class FinalizableMayThrow {
- // clinit may throw OOME.
- static Object o = new Object();
- static String s;
- public void finalize() {
- s = "Test";
- }
-}
diff --git a/test/627-checker-unroll/expected.txt b/test/627-checker-unroll/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/627-checker-unroll/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/627-checker-unroll/info.txt b/test/627-checker-unroll/info.txt
new file mode 100644
index 0000000..d7885f4
--- /dev/null
+++ b/test/627-checker-unroll/info.txt
@@ -0,0 +1 @@
+Test on loop unrolling.
diff --git a/test/627-checker-unroll/src/Main.java b/test/627-checker-unroll/src/Main.java
new file mode 100644
index 0000000..9785bdc
--- /dev/null
+++ b/test/627-checker-unroll/src/Main.java
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Test on loop unrolling. Removes loop control overhead (including suspend
+// checks) and exposes more opportunities for constant folding.
+//
+public class Main {
+
+ static int sA = 0;
+
+ /// CHECK-START: void Main.unroll() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>>
+ /// CHECK-DAG: StaticFieldSet loop:<<Loop>>
+ //
+ /// CHECK-START: void Main.unroll() loop_optimization (after)
+ /// CHECK-DAG: StaticFieldSet loop:none
+ //
+ /// CHECK-START: void Main.unroll() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 68 loop:none
+ /// CHECK-DAG: StaticFieldSet [{{l\d+}},<<Int>>] loop:none
+ //
+ /// CHECK-START: void Main.unroll() loop_optimization (after)
+ /// CHECK-NOT: Phi
+ public static void unroll() {
+ for (int i = 4; i < 5; i++) {
+ sA = 17 * i;
+ }
+ }
+
+ /// CHECK-START: int Main.unrollLV() loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>>
+ /// CHECK-DAG: StaticFieldSet loop:<<Loop>>
+ /// CHECK-DAG: Return [<<Phi>>] loop:none
+ //
+ /// CHECK-START: int Main.unrollLV() loop_optimization (after)
+ /// CHECK-DAG: StaticFieldSet loop:none
+ //
+ /// CHECK-START: int Main.unrollLV() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int1:i\d+>> IntConstant 187 loop:none
+ /// CHECK-DAG: <<Int2:i\d+>> IntConstant 12 loop:none
+ /// CHECK-DAG: StaticFieldSet [{{l\d+}},<<Int1>>] loop:none
+ /// CHECK-DAG: Return [<<Int2>>] loop:none
+ //
+ /// CHECK-START: int Main.unrollLV() loop_optimization (after)
+ /// CHECK-NOT: Phi
+ public static int unrollLV() {
+ int i;
+ for (i = 11; i < 12; i++) {
+ sA = 17 * i;
+ }
+ return i;
+ }
+
+ /// CHECK-START: void Main.unrollNest() loop_optimization (before)
+ /// CHECK-DAG: SuspendCheck loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: SuspendCheck loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:<<Loop1>>
+ /// CHECK-DAG: SuspendCheck loop:<<Loop2>> outer_loop:<<Loop1>>
+ /// CHECK-DAG: <<Phi3:i\d+>> Phi loop:<<Loop3:B\d+>> outer_loop:<<Loop2>>
+ /// CHECK-DAG: SuspendCheck loop:<<Loop3>> outer_loop:<<Loop2>>
+ /// CHECK-DAG: StaticFieldSet loop:<<Loop3>> outer_loop:<<Loop2>>
+ //
+ /// CHECK-START: void Main.unrollNest() loop_optimization (after)
+ /// CHECK-DAG: StaticFieldSet loop:none
+ /// CHECK-DAG: SuspendCheck loop:none
+ /// CHECK-NOT: SuspendCheck
+ //
+ /// CHECK-START: void Main.unrollNest() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 6 loop:none
+ /// CHECK-DAG: StaticFieldSet [{{l\d+}},<<Int>>] loop:none
+ //
+ /// CHECK-START: void Main.unrollNest() loop_optimization (after)
+ /// CHECK-NOT: Phi
+ public static void unrollNest() {
+ // Unrolling each loop in turn ultimately removes the complete nest!
+ for (int i = 4; i < 5; i++) {
+ for (int j = 5; j < 6; j++) {
+ for (int k = 6; k < 7; k++) {
+ sA = k;
+ }
+ }
+ }
+ }
+
+ //
+ // Verifier.
+ //
+
+ public static void main(String[] args) {
+ unroll();
+ expectEquals(68, sA);
+ expectEquals(12, unrollLV());
+ expectEquals(187, sA);
+ unrollNest();
+ expectEquals(6, sA);
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/633-checker-rtp-getclass/expected.txt b/test/633-checker-rtp-getclass/expected.txt
new file mode 100644
index 0000000..a178d04
--- /dev/null
+++ b/test/633-checker-rtp-getclass/expected.txt
@@ -0,0 +1,3 @@
+2
+3
+6
diff --git a/test/633-checker-rtp-getclass/info.txt b/test/633-checker-rtp-getclass/info.txt
new file mode 100644
index 0000000..e98a0ac
--- /dev/null
+++ b/test/633-checker-rtp-getclass/info.txt
@@ -0,0 +1,3 @@
+Regression test for the RTP pass of the compiler, which
+used the wrong block when bounding a type after a obj.getClass()
+check.
diff --git a/test/633-checker-rtp-getclass/src/Main.java b/test/633-checker-rtp-getclass/src/Main.java
new file mode 100644
index 0000000..f29c139
--- /dev/null
+++ b/test/633-checker-rtp-getclass/src/Main.java
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ System.out.println($opt$noinline$foo(new Main()));
+ System.out.println($opt$noinline$foo(new SubMain()));
+ System.out.println($opt$noinline$foo(new SubSubMain()));
+ }
+
+
+ // Checker test to make sure the only inlined instruction is
+ // SubMain.bar.
+ /// CHECK-START: int Main.$opt$noinline$foo(Main) inliner (after)
+ /// CHECK-DAG: InvokeVirtual method_name:Main.foo
+ /// CHECK-DAG: <<Const:i\d+>> IntConstant 3
+ /// CHECK: begin_block
+ /// CHECK: BoundType klass:SubMain
+ /// CHECK: Return [<<Const>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: end_block
+ public static int $opt$noinline$foo(Main o) {
+ if (doThrow) { throw new Error(); }
+ // To exercise the bug on Jack, we need two getClass compares.
+ if (o.getClass() == Main.class || o.getClass() != SubMain.class) {
+ return o.foo();
+ } else {
+ // We used to wrongly bound the type of o to `Main` here and then realize that's
+ // impossible and mark this branch as dead.
+ return o.bar();
+ }
+ }
+
+ public int bar() {
+ return 1;
+ }
+
+ public int foo() {
+ return 2;
+ }
+
+ public static boolean doThrow = false;
+}
+
+class SubMain extends Main {
+ public int bar() {
+ return 3;
+ }
+
+ public int foo() {
+ return 4;
+ }
+}
+
+class SubSubMain extends SubMain {
+ public int bar() {
+ return 5;
+ }
+
+ public int foo() {
+ return 6;
+ }
+}
diff --git a/test/634-vdex-duplicate/expected.txt b/test/634-vdex-duplicate/expected.txt
new file mode 100644
index 0000000..557db03
--- /dev/null
+++ b/test/634-vdex-duplicate/expected.txt
@@ -0,0 +1 @@
+Hello World
diff --git a/test/621-checker-new-instance/expected.txt b/test/634-vdex-duplicate/info.txt
similarity index 100%
rename from test/621-checker-new-instance/expected.txt
rename to test/634-vdex-duplicate/info.txt
diff --git a/test/954-invoke-polymorphic-verifier/run b/test/634-vdex-duplicate/run
old mode 100755
new mode 100644
similarity index 71%
copy from test/954-invoke-polymorphic-verifier/run
copy to test/634-vdex-duplicate/run
index a9f1822..1ccb841
--- a/test/954-invoke-polymorphic-verifier/run
+++ b/test/634-vdex-duplicate/run
@@ -1,12 +1,12 @@
#!/bin/bash
#
-# Copyright 2016 The Android Open Source Project
+# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# make us exit on a failure
-set -e
-
-./default-run "$@" --experimental method-handles
+exec ${RUN} -Xcompiler-option --compiler-filter=verify-profile --vdex-filter speed --vdex "${@}"
diff --git a/test/913-heaps/heaps.h b/test/634-vdex-duplicate/src/Main.java
similarity index 68%
copy from test/913-heaps/heaps.h
copy to test/634-vdex-duplicate/src/Main.java
index bd828ac..2283106 100644
--- a/test/913-heaps/heaps.h
+++ b/test/634-vdex-duplicate/src/Main.java
@@ -14,17 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_TEST_913_HEAPS_HEAPS_H_
-#define ART_TEST_913_HEAPS_HEAPS_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test913Heaps {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test913Heaps
-} // namespace art
-
-#endif // ART_TEST_913_HEAPS_HEAPS_H_
+public class Main {
+ public static void main(String[] args) {
+ System.out.println("Hello World");
+ }
+}
diff --git a/test/913-heaps/heaps.h b/test/634-vdex-duplicate/src/sun/misc/Unsafe.java
similarity index 68%
rename from test/913-heaps/heaps.h
rename to test/634-vdex-duplicate/src/sun/misc/Unsafe.java
index bd828ac..c32868c 100644
--- a/test/913-heaps/heaps.h
+++ b/test/634-vdex-duplicate/src/sun/misc/Unsafe.java
@@ -14,17 +14,7 @@
* limitations under the License.
*/
-#ifndef ART_TEST_913_HEAPS_HEAPS_H_
-#define ART_TEST_913_HEAPS_HEAPS_H_
+package sun.misc;
-#include <jni.h>
-
-namespace art {
-namespace Test913Heaps {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test913Heaps
-} // namespace art
-
-#endif // ART_TEST_913_HEAPS_HEAPS_H_
+public class Unsafe {
+}
diff --git a/test/901-hello-ti-agent/basics.cc b/test/901-hello-ti-agent/basics.cc
index 3a475c6..052fb9a 100644
--- a/test/901-hello-ti-agent/basics.cc
+++ b/test/901-hello-ti-agent/basics.cc
@@ -22,6 +22,9 @@
#include "base/macros.h"
#include "openjdkjvmti/jvmti.h"
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
namespace art {
namespace Test901HelloTi {
@@ -72,9 +75,22 @@
CHECK_CALL_SUCCESS(env->DisposeEnvironment());
CHECK_CALL_SUCCESS(env2->DisposeEnvironment());
#undef CHECK_CALL_SUCCESS
+
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ SetAllCapabilities(jvmti_env);
+
return JNI_OK;
}
+extern "C" JNIEXPORT void JNICALL Java_Main_setVerboseFlag(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jint iflag, jboolean val) {
+ jvmtiVerboseFlag flag = static_cast<jvmtiVerboseFlag>(iflag);
+ jvmtiError result = jvmti_env->SetVerboseFlag(flag, val);
+ JvmtiErrorToException(env, result);
+}
} // namespace Test901HelloTi
} // namespace art
diff --git a/test/901-hello-ti-agent/expected.txt b/test/901-hello-ti-agent/expected.txt
index 414eb3b..2aee99b 100644
--- a/test/901-hello-ti-agent/expected.txt
+++ b/test/901-hello-ti-agent/expected.txt
@@ -1,2 +1,8 @@
Loaded Agent for test 901-hello-ti-agent
Hello, world!
+0
+1
+2
+4
+8
+JVMTI_ERROR_ILLEGAL_ARGUMENT
diff --git a/test/901-hello-ti-agent/src/Main.java b/test/901-hello-ti-agent/src/Main.java
index 1ef6289..775e5c2 100644
--- a/test/901-hello-ti-agent/src/Main.java
+++ b/test/901-hello-ti-agent/src/Main.java
@@ -16,6 +16,26 @@
public class Main {
public static void main(String[] args) {
+ System.loadLibrary(args[1]);
+
System.out.println("Hello, world!");
+
+ set(0); // OTHER
+ set(1); // GC
+ set(2); // CLASS
+ set(4); // JNI
+ set(8); // Error.
}
+
+ private static void set(int i) {
+ System.out.println(i);
+ try {
+ setVerboseFlag(i, true);
+ setVerboseFlag(i, false);
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+ }
+
+ private static native void setVerboseFlag(int flag, boolean value);
}
diff --git a/test/903-hello-tagging/tagging.cc b/test/903-hello-tagging/tagging.cc
index 60a31bd..f74c1fc 100644
--- a/test/903-hello-tagging/tagging.cc
+++ b/test/903-hello-tagging/tagging.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "tagging.h"
-
#include <iostream>
#include <pthread.h>
#include <stdio.h>
@@ -141,18 +139,6 @@
return resultArray;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test903HelloTagging
} // namespace art
diff --git a/test/903-hello-tagging/tagging.h b/test/903-hello-tagging/tagging.h
deleted file mode 100644
index f062d44..0000000
--- a/test/903-hello-tagging/tagging.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_903_HELLO_TAGGING_TAGGING_H_
-#define ART_TEST_903_HELLO_TAGGING_TAGGING_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test903HelloTagging {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test903HelloTagging
-} // namespace art
-
-#endif // ART_TEST_903_HELLO_TAGGING_TAGGING_H_
diff --git a/test/904-object-allocation/tracking.cc b/test/904-object-allocation/tracking.cc
index f993606..95eab0c 100644
--- a/test/904-object-allocation/tracking.cc
+++ b/test/904-object-allocation/tracking.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "tracking.h"
-
#include <iostream>
#include <pthread.h>
#include <stdio.h>
@@ -89,19 +87,6 @@
}
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- jvmti_env->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_OBJECT_ALLOC, nullptr);
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test904ObjectAllocation
} // namespace art
diff --git a/test/904-object-allocation/tracking.h b/test/904-object-allocation/tracking.h
deleted file mode 100644
index 21c1837..0000000
--- a/test/904-object-allocation/tracking.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_904_OBJECT_ALLOCATION_TRACKING_H_
-#define ART_TEST_904_OBJECT_ALLOCATION_TRACKING_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test904ObjectAllocation {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test904ObjectAllocation
-} // namespace art
-
-#endif // ART_TEST_904_OBJECT_ALLOCATION_TRACKING_H_
diff --git a/test/905-object-free/tracking_free.cc b/test/905-object-free/tracking_free.cc
index 7f295ac..7b26d79 100644
--- a/test/905-object-free/tracking_free.cc
+++ b/test/905-object-free/tracking_free.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "tracking_free.h"
-
#include <iostream>
#include <pthread.h>
#include <stdio.h>
@@ -82,17 +80,5 @@
return ret;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test905ObjectFree
} // namespace art
diff --git a/test/905-object-free/tracking_free.h b/test/905-object-free/tracking_free.h
deleted file mode 100644
index ba4aa43..0000000
--- a/test/905-object-free/tracking_free.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_905_OBJECT_FREE_TRACKING_FREE_H_
-#define ART_TEST_905_OBJECT_FREE_TRACKING_FREE_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test905ObjectFree {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test905ObjectFree
-} // namespace art
-
-#endif // ART_TEST_905_OBJECT_FREE_TRACKING_FREE_H_
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index a2fd591..1362d47 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "iterate_heap.h"
-
#include <iostream>
#include <pthread.h>
#include <stdio.h>
@@ -174,17 +172,5 @@
Run(heap_filter, klass_filter, &config);
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test906IterateHeap
} // namespace art
diff --git a/test/906-iterate-heap/iterate_heap.h b/test/906-iterate-heap/iterate_heap.h
deleted file mode 100644
index f25cdba..0000000
--- a/test/906-iterate-heap/iterate_heap.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_906_ITERATE_HEAP_ITERATE_HEAP_H_
-#define ART_TEST_906_ITERATE_HEAP_ITERATE_HEAP_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test906IterateHeap {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test906IterateHeap
-} // namespace art
-
-#endif // ART_TEST_906_ITERATE_HEAP_ITERATE_HEAP_H_
diff --git a/test/907-get-loaded-classes/get_loaded_classes.cc b/test/907-get-loaded-classes/get_loaded_classes.cc
index 36d33b6..5bda7eb 100644
--- a/test/907-get-loaded-classes/get_loaded_classes.cc
+++ b/test/907-get-loaded-classes/get_loaded_classes.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "get_loaded_classes.h"
-
#include <iostream>
#include <pthread.h>
#include <stdio.h>
@@ -65,17 +63,5 @@
return ret;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test907GetLoadedClasses
} // namespace art
diff --git a/test/907-get-loaded-classes/get_loaded_classes.h b/test/907-get-loaded-classes/get_loaded_classes.h
deleted file mode 100644
index 4d27f89..0000000
--- a/test/907-get-loaded-classes/get_loaded_classes.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
-#define ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test907GetLoadedClasses {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test907GetLoadedClasses
-} // namespace art
-
-#endif // ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
diff --git a/test/908-gc-start-finish/gc_callbacks.cc b/test/908-gc-start-finish/gc_callbacks.cc
index 1fab79d..59801ff 100644
--- a/test/908-gc-start-finish/gc_callbacks.cc
+++ b/test/908-gc-start-finish/gc_callbacks.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "gc_callbacks.h"
-
#include <stdio.h>
#include <string.h>
@@ -94,17 +92,5 @@
return result;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test908GcStartFinish
} // namespace art
diff --git a/test/908-gc-start-finish/gc_callbacks.h b/test/908-gc-start-finish/gc_callbacks.h
deleted file mode 100644
index 177a4eb..0000000
--- a/test/908-gc-start-finish/gc_callbacks.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_908_GC_START_FINISH_GC_CALLBACKS_H_
-#define ART_TEST_908_GC_START_FINISH_GC_CALLBACKS_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test908GcStartFinish {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test908GcStartFinish
-} // namespace art
-
-#endif // ART_TEST_908_GC_START_FINISH_GC_CALLBACKS_H_
diff --git a/test/909-attach-agent/expected.txt b/test/909-attach-agent/expected.txt
index eacc595..c0bccd6 100644
--- a/test/909-attach-agent/expected.txt
+++ b/test/909-attach-agent/expected.txt
@@ -1,3 +1,11 @@
Hello, world!
Attached Agent for test 909-attach-agent
Goodbye!
+Hello, world!
+Attached Agent for test 909-attach-agent
+Goodbye!
+Hello, world!
+java.io.IOException: Process is not debuggable.
+ at dalvik.system.VMDebug.attachAgent(Native Method)
+ at Main.main(Main.java:27)
+Goodbye!
diff --git a/test/909-attach-agent/run b/test/909-attach-agent/run
index aed6e83..985341b 100755
--- a/test/909-attach-agent/run
+++ b/test/909-attach-agent/run
@@ -24,4 +24,14 @@
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
--android-runtime-option -Xplugin:${plugin} \
+ --android-runtime-option -Xfully-deoptable \
+ --args agent:${agent}=909-attach-agent
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --android-runtime-option -Xfully-deoptable \
+ --args agent:${agent}=909-attach-agent
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
--args agent:${agent}=909-attach-agent
diff --git a/test/910-methods/methods.cc b/test/910-methods/methods.cc
index fa9679d..f60fabb 100644
--- a/test/910-methods/methods.cc
+++ b/test/910-methods/methods.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "methods.h"
-
#include <stdio.h>
#include "base/macros.h"
@@ -207,17 +205,5 @@
return is_synthetic;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test910Methods
} // namespace art
diff --git a/test/910-methods/methods.h b/test/910-methods/methods.h
deleted file mode 100644
index 93d1874..0000000
--- a/test/910-methods/methods.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_910_METHODS_METHODS_H_
-#define ART_TEST_910_METHODS_METHODS_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test910Methods {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test910Methods
-} // namespace art
-
-#endif // ART_TEST_910_METHODS_METHODS_H_
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
index f8c97ce..dad08c9 100644
--- a/test/911-get-stack-trace/expected.txt
+++ b/test/911-get-stack-trace/expected.txt
@@ -4,72 +4,72 @@
From top
---------
getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
- print (Ljava/lang/Thread;II)V 0 124
- printOrWait (IILMain$ControlData;)V 6 151
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- doTest ()V 38 34
- main ([Ljava/lang/String;)V 6 24
+ print (Ljava/lang/Thread;II)V 0 34
+ printOrWait (IILControlData;)V 6 39
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ doTest ()V 38 23
+ main ([Ljava/lang/String;)V 6 21
---------
- print (Ljava/lang/Thread;II)V 0 124
- printOrWait (IILMain$ControlData;)V 6 151
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- doTest ()V 42 35
- main ([Ljava/lang/String;)V 6 24
+ print (Ljava/lang/Thread;II)V 0 34
+ printOrWait (IILControlData;)V 6 39
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ doTest ()V 42 24
+ main ([Ljava/lang/String;)V 6 21
---------
getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
- print (Ljava/lang/Thread;II)V 0 124
- printOrWait (IILMain$ControlData;)V 6 151
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
+ print (Ljava/lang/Thread;II)V 0 34
+ printOrWait (IILControlData;)V 6 39
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
---------
- printOrWait (IILMain$ControlData;)V 6 151
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ printOrWait (IILControlData;)V 6 39
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
From bottom
---------
- main ([Ljava/lang/String;)V 6 24
+ main ([Ljava/lang/String;)V 6 21
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- doTest ()V 65 41
- main ([Ljava/lang/String;)V 6 24
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ doTest ()V 65 30
+ main ([Ljava/lang/String;)V 6 21
---------
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
################################
### Other thread (suspended) ###
@@ -77,132 +77,760 @@
From top
---------
wait ()V -1 -2
- printOrWait (IILMain$ControlData;)V 24 157
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 54
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 26
---------
- printOrWait (IILMain$ControlData;)V 24 157
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 54
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 26
---------
wait ()V -1 -2
- printOrWait (IILMain$ControlData;)V 24 157
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
From bottom
---------
- run ()V 4 54
+ run ()V 4 26
---------
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 54
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 26
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
###########################
### Other thread (live) ###
###########################
From top
---------
- printOrWait (IILMain$ControlData;)V 44 164
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 88
+ printOrWait (IILControlData;)V 44 52
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 59
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 88
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 59
---------
- printOrWait (IILMain$ControlData;)V 44 164
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ printOrWait (IILControlData;)V 44 52
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
---------
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
From bottom
---------
- run ()V 4 88
+ run ()V 4 59
---------
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 88
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 59
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+
+################################
+### Other threads (suspended) ###
+################################
+---------
+FinalizerDaemon
+<not printed>
+---------
+FinalizerWatchdogDaemon
+<not printed>
+---------
+HeapTaskDaemon
+<not printed>
+---------
+ReferenceQueueDaemon
+<not printed>
+---------
+Signal Catcher
+
+---------
+Thread-10
+
+---------
+Thread-11
+
+---------
+Thread-12
+
+---------
+Thread-13
+
+---------
+Thread-4
+
+---------
+Thread-5
+
+---------
+Thread-6
+
+---------
+Thread-7
+
+---------
+Thread-8
+
+---------
+Thread-9
+
+---------
+main
+
+---------
+FinalizerDaemon
+<not printed>
+---------
+FinalizerWatchdogDaemon
+<not printed>
+---------
+HeapTaskDaemon
+<not printed>
+---------
+ReferenceQueueDaemon
+<not printed>
+---------
+Signal Catcher
+
+---------
+Thread-10
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-11
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-12
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-13
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-4
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-5
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-6
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-7
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-8
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-9
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+main
+ getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
+ printAll (I)V 0 73
+ doTest ()V 102 57
+ main ([Ljava/lang/String;)V 30 33
+
+---------
+FinalizerDaemon
+<not printed>
+---------
+FinalizerWatchdogDaemon
+<not printed>
+---------
+HeapTaskDaemon
+<not printed>
+---------
+ReferenceQueueDaemon
+<not printed>
+---------
+Signal Catcher
+
+---------
+Thread-10
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-11
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-12
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-13
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-4
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-5
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-6
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-7
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-8
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-9
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+main
+ getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
+ printAll (I)V 0 73
+ doTest ()V 107 59
+ main ([Ljava/lang/String;)V 30 33
+
+
+########################################
+### Other select threads (suspended) ###
+########################################
+---------
+Thread-14
+
+---------
+Thread-16
+
+---------
+Thread-18
+
+---------
+Thread-20
+
+---------
+Thread-22
+
+---------
+main
+
+---------
+Thread-14
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-16
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-18
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-20
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-22
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+main
+ getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
+ printList ([Ljava/lang/Thread;I)V 0 66
+ doTest ()V 96 52
+ main ([Ljava/lang/String;)V 38 37
+
+---------
+Thread-14
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 35
+
+---------
+Thread-16
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 35
+
+---------
+Thread-18
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 35
+
+---------
+Thread-20
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 35
+
+---------
+Thread-22
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 35
+
+---------
+main
+ getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
+ printList ([Ljava/lang/Thread;I)V 0 66
+ doTest ()V 101 54
+ main ([Ljava/lang/String;)V 38 37
+
+
+###################
+### Same thread ###
+###################
+4
+JVMTI_ERROR_ILLEGAL_ARGUMENT
+[public static native java.lang.Object[] Frames.getFrameLocation(java.lang.Thread,int), ffffffff]
+[public static void Frames.doTestSameThread(), 38]
+[public static void Frames.doTest() throws java.lang.Exception, 0]
+[public static void Main.main(java.lang.String[]) throws java.lang.Exception, 2e]
+JVMTI_ERROR_NO_MORE_FRAMES
+
+################################
+### Other thread (suspended) ###
+################################
+18
+JVMTI_ERROR_ILLEGAL_ARGUMENT
+[public final native void java.lang.Object.wait() throws java.lang.InterruptedException, ffffffff]
+[private static void Recurse.printOrWait(int,int,ControlData), 18]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 2]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[public void Frames$1.run(), 4]
+JVMTI_ERROR_NO_MORE_FRAMES
+
+###########################
+### Other thread (live) ###
+###########################
+17
+JVMTI_ERROR_ILLEGAL_ARGUMENT
+[private static void Recurse.printOrWait(int,int,ControlData), 2c]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 2]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[public void Frames$2.run(), 4]
+JVMTI_ERROR_NO_MORE_FRAMES
+Done
diff --git a/test/911-get-stack-trace/src/AllTraces.java b/test/911-get-stack-trace/src/AllTraces.java
new file mode 100644
index 0000000..adf6f38
--- /dev/null
+++ b/test/911-get-stack-trace/src/AllTraces.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class AllTraces {
+ private final static List<Object> RETAIN = new ArrayList<Object>();
+
+ public static void doTest() throws Exception {
+ System.out.println("################################");
+ System.out.println("### Other threads (suspended) ###");
+ System.out.println("################################");
+
+ // Also create an unstarted and a dead thread.
+ RETAIN.add(new Thread());
+ Thread deadThread = new Thread();
+ RETAIN.add(deadThread);
+ deadThread.start();
+ deadThread.join();
+
+ final int N = 10;
+
+ final ControlData data = new ControlData(N);
+ data.waitFor = new Object();
+
+ Thread threads[] = new Thread[N];
+
+ for (int i = 0; i < N; i++) {
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ threads[i] = t;
+ }
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ printAll(0);
+
+ printAll(5);
+
+ printAll(25);
+
+ // Let the thread make progress and die.
+ synchronized(data.waitFor) {
+ data.waitFor.notifyAll();
+ }
+ for (int i = 0; i < N; i++) {
+ threads[i].join();
+ }
+
+ RETAIN.clear();
+ }
+
+ public static void printAll(int max) {
+ PrintThread.printAll(getAllStackTraces(max));
+ }
+
+ // Get all stack traces. This will return an array with an element for each thread. The element
+ // is an array itself with the first element being the thread, and the second element a nested
+ // String array as in getStackTrace.
+ public static native Object[][] getAllStackTraces(int max);
+}
diff --git a/test/912-classes/classes.h b/test/911-get-stack-trace/src/ControlData.java
similarity index 67%
rename from test/912-classes/classes.h
rename to test/911-get-stack-trace/src/ControlData.java
index 62fb203..76ac4b8 100644
--- a/test/912-classes/classes.h
+++ b/test/911-get-stack-trace/src/ControlData.java
@@ -14,17 +14,18 @@
* limitations under the License.
*/
-#ifndef ART_TEST_912_CLASSES_CLASSES_H_
-#define ART_TEST_912_CLASSES_CLASSES_H_
+import java.util.concurrent.CountDownLatch;
-#include <jni.h>
+public class ControlData {
+ CountDownLatch reached;
+ Object waitFor = null;
+ volatile boolean stop = false;
-namespace art {
-namespace Test912Classes {
+ public ControlData() {
+ this(1);
+ }
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test912Classes
-} // namespace art
-
-#endif // ART_TEST_912_CLASSES_CLASSES_H_
+ public ControlData(int latchCount) {
+ reached = new CountDownLatch(latchCount);
+ }
+}
diff --git a/test/911-get-stack-trace/src/Frames.java b/test/911-get-stack-trace/src/Frames.java
new file mode 100644
index 0000000..a1a11c3
--- /dev/null
+++ b/test/911-get-stack-trace/src/Frames.java
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+
+public class Frames {
+ public static void doTest() throws Exception {
+ doTestSameThread();
+
+ System.out.println();
+
+ doTestOtherThreadWait();
+
+ System.out.println();
+
+ doTestOtherThreadBusyLoop();
+ }
+
+ public static void doTestSameThread() {
+ System.out.println("###################");
+ System.out.println("### Same thread ###");
+ System.out.println("###################");
+
+ Thread t = Thread.currentThread();
+
+ int count = getFrameCount(t);
+ System.out.println(count);
+ try {
+ System.out.println(Arrays.toString(getFrameLocation(t, -1)));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+ for (int i = 0; i < count; i++) {
+ System.out.println(Arrays.toString(getFrameLocation(t, i)));
+ }
+ try {
+ System.out.println(Arrays.toString(getFrameLocation(t, count)));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+ }
+
+ public static void doTestOtherThreadWait() throws Exception {
+ System.out.println("################################");
+ System.out.println("### Other thread (suspended) ###");
+ System.out.println("################################");
+ final ControlData data = new ControlData();
+ data.waitFor = new Object();
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ int count = getFrameCount(t);
+ System.out.println(count);
+ try {
+ System.out.println(Arrays.toString(getFrameLocation(t, -1)));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+ for (int i = 0; i < count; i++) {
+ System.out.println(Arrays.toString(getFrameLocation(t, i)));
+ }
+ try {
+ System.out.println(Arrays.toString(getFrameLocation(t, count)));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+
+ // Let the thread make progress and die.
+ synchronized(data.waitFor) {
+ data.waitFor.notifyAll();
+ }
+ t.join();
+ }
+
+ public static void doTestOtherThreadBusyLoop() throws Exception {
+ System.out.println("###########################");
+ System.out.println("### Other thread (live) ###");
+ System.out.println("###########################");
+ final ControlData data = new ControlData();
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ int count = getFrameCount(t);
+ System.out.println(count);
+ try {
+ System.out.println(Arrays.toString(getFrameLocation(t, -1)));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+ for (int i = 0; i < count; i++) {
+ System.out.println(Arrays.toString(getFrameLocation(t, i)));
+ }
+ try {
+ System.out.println(Arrays.toString(getFrameLocation(t, count)));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+
+ // Let the thread stop looping and die.
+ data.stop = true;
+ t.join();
+ }
+
+ public static native int getFrameCount(Thread thread);
+ public static native Object[] getFrameLocation(Thread thread, int depth);
+}
diff --git a/test/911-get-stack-trace/src/Main.java b/test/911-get-stack-trace/src/Main.java
index 722bee8..b199033 100644
--- a/test/911-get-stack-trace/src/Main.java
+++ b/test/911-get-stack-trace/src/Main.java
@@ -14,166 +14,32 @@
* limitations under the License.
*/
-import java.util.Arrays;
-import java.util.concurrent.CountDownLatch;
-
public class Main {
public static void main(String[] args) throws Exception {
System.loadLibrary(args[1]);
- doTest();
- doTestOtherThreadWait();
- doTestOtherThreadBusyLoop();
- }
+ SameThread.doTest();
- public static void doTest() throws Exception {
- System.out.println("###################");
- System.out.println("### Same thread ###");
- System.out.println("###################");
- System.out.println("From top");
- Recurse.foo(4, 0, 25, null);
- Recurse.foo(4, 1, 25, null);
- Recurse.foo(4, 0, 5, null);
- Recurse.foo(4, 2, 5, null);
-
- System.out.println("From bottom");
- Recurse.foo(4, -1, 25, null);
- Recurse.foo(4, -5, 5, null);
- Recurse.foo(4, -7, 5, null);
- }
-
- public static void doTestOtherThreadWait() throws Exception {
System.out.println();
- System.out.println("################################");
- System.out.println("### Other thread (suspended) ###");
- System.out.println("################################");
- final ControlData data = new ControlData();
- data.waitFor = new Object();
- Thread t = new Thread() {
- public void run() {
- Recurse.foo(4, 0, 0, data);
- }
- };
- t.start();
- data.reached.await();
- Thread.yield();
- Thread.sleep(500); // A little bit of time...
- System.out.println("From top");
- print(t, 0, 25);
- print(t, 1, 25);
- print(t, 0, 5);
- print(t, 2, 5);
+ OtherThread.doTestOtherThreadWait();
- System.out.println("From bottom");
- print(t, -1, 25);
- print(t, -5, 5);
- print(t, -7, 5);
-
- // Let the thread make progress and die.
- synchronized(data.waitFor) {
- data.waitFor.notifyAll();
- }
- t.join();
- }
-
- public static void doTestOtherThreadBusyLoop() throws Exception {
System.out.println();
- System.out.println("###########################");
- System.out.println("### Other thread (live) ###");
- System.out.println("###########################");
- final ControlData data = new ControlData();
- Thread t = new Thread() {
- public void run() {
- Recurse.foo(4, 0, 0, data);
- }
- };
- t.start();
- data.reached.await();
- Thread.yield();
- Thread.sleep(500); // A little bit of time...
- System.out.println("From top");
- print(t, 0, 25);
- print(t, 1, 25);
- print(t, 0, 5);
- print(t, 2, 5);
+ OtherThread.doTestOtherThreadBusyLoop();
- System.out.println("From bottom");
- print(t, -1, 25);
- print(t, -5, 5);
- print(t, -7, 5);
+ System.out.println();
- // Let the thread stop looping and die.
- data.stop = true;
- t.join();
+ AllTraces.doTest();
+
+ System.out.println();
+
+ ThreadListTraces.doTest();
+
+ System.out.println();
+
+ Frames.doTest();
+
+ System.out.println("Done");
}
-
- public static void print(String[][] stack) {
- System.out.println("---------");
- for (String[] stackElement : stack) {
- for (String part : stackElement) {
- System.out.print(' ');
- System.out.print(part);
- }
- System.out.println();
- }
- }
-
- public static void print(Thread t, int start, int max) {
- print(getStackTrace(t, start, max));
- }
-
- // Wrap generated stack traces into a class to separate them nicely.
- public static class Recurse {
-
- public static int foo(int x, int start, int max, ControlData data) {
- bar(x, start, max, data);
- return 0;
- }
-
- private static long bar(int x, int start, int max, ControlData data) {
- baz(x, start, max, data);
- return 0;
- }
-
- private static Object baz(int x, int start, int max, ControlData data) {
- if (x == 0) {
- printOrWait(start, max, data);
- } else {
- foo(x - 1, start, max, data);
- }
- return null;
- }
-
- private static void printOrWait(int start, int max, ControlData data) {
- if (data == null) {
- print(Thread.currentThread(), start, max);
- } else {
- if (data.waitFor != null) {
- synchronized (data.waitFor) {
- data.reached.countDown();
- try {
- data.waitFor.wait(); // Use wait() as it doesn't have a "hidden" Java call-graph.
- } catch (Throwable t) {
- throw new RuntimeException(t);
- }
- }
- } else {
- data.reached.countDown();
- while (!data.stop) {
- // Busy-loop.
- }
- }
- }
- }
- }
-
- public static class ControlData {
- CountDownLatch reached = new CountDownLatch(1);
- Object waitFor = null;
- volatile boolean stop = false;
- }
-
- public static native String[][] getStackTrace(Thread thread, int start, int max);
}
diff --git a/test/911-get-stack-trace/src/OtherThread.java b/test/911-get-stack-trace/src/OtherThread.java
new file mode 100644
index 0000000..0748433
--- /dev/null
+++ b/test/911-get-stack-trace/src/OtherThread.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class OtherThread {
+ public static void doTestOtherThreadWait() throws Exception {
+ System.out.println("################################");
+ System.out.println("### Other thread (suspended) ###");
+ System.out.println("################################");
+ final ControlData data = new ControlData();
+ data.waitFor = new Object();
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ System.out.println("From top");
+ PrintThread.print(t, 0, 25);
+ PrintThread.print(t, 1, 25);
+ PrintThread.print(t, 0, 5);
+ PrintThread.print(t, 2, 5);
+
+ System.out.println("From bottom");
+ PrintThread.print(t, -1, 25);
+ PrintThread.print(t, -5, 5);
+ PrintThread.print(t, -7, 5);
+
+ // Let the thread make progress and die.
+ synchronized(data.waitFor) {
+ data.waitFor.notifyAll();
+ }
+ t.join();
+ }
+
+ public static void doTestOtherThreadBusyLoop() throws Exception {
+ System.out.println("###########################");
+ System.out.println("### Other thread (live) ###");
+ System.out.println("###########################");
+ final ControlData data = new ControlData();
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ System.out.println("From top");
+ PrintThread.print(t, 0, 25);
+ PrintThread.print(t, 1, 25);
+ PrintThread.print(t, 0, 5);
+ PrintThread.print(t, 2, 5);
+
+ System.out.println("From bottom");
+ PrintThread.print(t, -1, 25);
+ PrintThread.print(t, -5, 5);
+ PrintThread.print(t, -7, 5);
+
+ // Let the thread stop looping and die.
+ data.stop = true;
+ t.join();
+ }
+}
diff --git a/test/911-get-stack-trace/src/PrintThread.java b/test/911-get-stack-trace/src/PrintThread.java
new file mode 100644
index 0000000..97815cc
--- /dev/null
+++ b/test/911-get-stack-trace/src/PrintThread.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+public class PrintThread {
+ public static void print(String[][] stack) {
+ System.out.println("---------");
+ for (String[] stackElement : stack) {
+ for (String part : stackElement) {
+ System.out.print(' ');
+ System.out.print(part);
+ }
+ System.out.println();
+ }
+ }
+
+ public static void print(Thread t, int start, int max) {
+ print(getStackTrace(t, start, max));
+ }
+
+ public static void printAll(Object[][] stacks) {
+ List<String> stringified = new ArrayList<String>(stacks.length);
+
+ for (Object[] stackInfo : stacks) {
+ Thread t = (Thread)stackInfo[0];
+ String name = (t != null) ? t.getName() : "null";
+ String stackSerialization;
+ if (name.contains("Daemon")) {
+ // Do not print daemon stacks, as they're non-deterministic.
+ stackSerialization = "<not printed>";
+ } else {
+ StringBuilder sb = new StringBuilder();
+ for (String[] stackElement : (String[][])stackInfo[1]) {
+ for (String part : stackElement) {
+ sb.append(' ');
+ sb.append(part);
+ }
+ sb.append('\n');
+ }
+ stackSerialization = sb.toString();
+ }
+ stringified.add(name + "\n" + stackSerialization);
+ }
+
+ Collections.sort(stringified);
+
+ for (String s : stringified) {
+ System.out.println("---------");
+ System.out.println(s);
+ }
+ }
+
+ public static native String[][] getStackTrace(Thread thread, int start, int max);
+}
\ No newline at end of file
diff --git a/test/911-get-stack-trace/src/Recurse.java b/test/911-get-stack-trace/src/Recurse.java
new file mode 100644
index 0000000..439fbaa
--- /dev/null
+++ b/test/911-get-stack-trace/src/Recurse.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Recurse {
+ public static int foo(int x, int start, int max, ControlData data) {
+ bar(x, start, max, data);
+ return 0;
+ }
+
+ private static long bar(int x, int start, int max, ControlData data) {
+ baz(x, start, max, data);
+ return 0;
+ }
+
+ private static Object baz(int x, int start, int max, ControlData data) {
+ if (x == 0) {
+ printOrWait(start, max, data);
+ } else {
+ foo(x - 1, start, max, data);
+ }
+ return null;
+ }
+
+ private static void printOrWait(int start, int max, ControlData data) {
+ if (data == null) {
+ PrintThread.print(Thread.currentThread(), start, max);
+ } else {
+ if (data.waitFor != null) {
+ synchronized (data.waitFor) {
+ data.reached.countDown();
+ try {
+ data.waitFor.wait(); // Use wait() as it doesn't have a "hidden" Java call-graph.
+ } catch (Throwable t) {
+ throw new RuntimeException(t);
+ }
+ }
+ } else {
+ data.reached.countDown();
+ while (!data.stop) {
+ // Busy-loop.
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/test/911-get-stack-trace/src/SameThread.java b/test/911-get-stack-trace/src/SameThread.java
new file mode 100644
index 0000000..f1e19e3
--- /dev/null
+++ b/test/911-get-stack-trace/src/SameThread.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class SameThread {
+ public static void doTest() throws Exception {
+ System.out.println("###################");
+ System.out.println("### Same thread ###");
+ System.out.println("###################");
+ System.out.println("From top");
+ Recurse.foo(4, 0, 25, null);
+ Recurse.foo(4, 1, 25, null);
+ Recurse.foo(4, 0, 5, null);
+ Recurse.foo(4, 2, 5, null);
+
+ System.out.println("From bottom");
+ Recurse.foo(4, -1, 25, null);
+ Recurse.foo(4, -5, 5, null);
+ Recurse.foo(4, -7, 5, null);
+ }
+}
diff --git a/test/911-get-stack-trace/src/ThreadListTraces.java b/test/911-get-stack-trace/src/ThreadListTraces.java
new file mode 100644
index 0000000..f66557f
--- /dev/null
+++ b/test/911-get-stack-trace/src/ThreadListTraces.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class ThreadListTraces {
+ public static void doTest() throws Exception {
+ System.out.println("########################################");
+ System.out.println("### Other select threads (suspended) ###");
+ System.out.println("########################################");
+
+ final int N = 10;
+
+ final ControlData data = new ControlData(N);
+ data.waitFor = new Object();
+
+ Thread threads[] = new Thread[N];
+
+ Thread list[] = new Thread[N/2 + 1];
+
+ for (int i = 0; i < N; i++) {
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ threads[i] = t;
+ if (i % 2 == 0) {
+ list[i/2] = t;
+ }
+ }
+ list[list.length - 1] = Thread.currentThread();
+
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ printList(list, 0);
+
+ printList(list, 5);
+
+ printList(list, 25);
+
+ // Let the thread make progress and die.
+ synchronized(data.waitFor) {
+ data.waitFor.notifyAll();
+ }
+ for (int i = 0; i < N; i++) {
+ threads[i].join();
+ }
+ }
+
+ public static void printList(Thread[] threads, int max) {
+ PrintThread.printAll(getThreadListStackTraces(threads, max));
+ }
+
+ // Similar to getAllStackTraces, but restricted to the given threads.
+ public static native Object[][] getThreadListStackTraces(Thread threads[], int max);
+}
diff --git a/test/911-get-stack-trace/stack_trace.cc b/test/911-get-stack-trace/stack_trace.cc
index b3e8bc3..d162e8a 100644
--- a/test/911-get-stack-trace/stack_trace.cc
+++ b/test/911-get-stack-trace/stack_trace.cc
@@ -14,14 +14,13 @@
* limitations under the License.
*/
-#include "stack_trace.h"
-
#include <inttypes.h>
#include <memory>
#include <stdio.h>
#include "android-base/stringprintf.h"
+#include "android-base/stringprintf.h"
#include "base/logging.h"
#include "base/macros.h"
#include "jni.h"
@@ -52,33 +51,16 @@
return line_number;
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace(
- JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread, jint start, jint max) {
- std::unique_ptr<jvmtiFrameInfo[]> frames(new jvmtiFrameInfo[max]);
-
- jint count;
- {
- jvmtiError result = jvmti_env->GetStackTrace(thread, start, max, frames.get(), &count);
- if (result != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(result, &err);
- printf("Failure running GetStackTrace: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
- return nullptr;
- }
- }
-
+static jobjectArray TranslateJvmtiFrameInfoArray(JNIEnv* env,
+ jvmtiFrameInfo* frames,
+ jint count) {
auto callback = [&](jint method_index) -> jobjectArray {
char* name;
char* sig;
char* gen;
{
jvmtiError result2 = jvmti_env->GetMethodName(frames[method_index].method, &name, &sig, &gen);
- if (result2 != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(result2, &err);
- printf("Failure running GetMethodName: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, result2)) {
return nullptr;
}
}
@@ -142,16 +124,133 @@
return CreateObjectArray(env, count, "[Ljava/lang/String;", callback);
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
+extern "C" JNIEXPORT jobjectArray JNICALL Java_PrintThread_getStackTrace(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread, jint start, jint max) {
+ std::unique_ptr<jvmtiFrameInfo[]> frames(new jvmtiFrameInfo[max]);
+
+ jint count;
+ {
+ jvmtiError result = jvmti_env->GetStackTrace(thread, start, max, frames.get(), &count);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
}
- SetAllCapabilities(jvmti_env);
- return 0;
+
+ return TranslateJvmtiFrameInfoArray(env, frames.get(), count);
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_AllTraces_getAllStackTraces(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jint max) {
+ jint thread_count;
+ jvmtiStackInfo* stack_infos;
+ {
+ jvmtiError result = jvmti_env->GetAllStackTraces(max, &stack_infos, &thread_count);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+ }
+
+ auto callback = [&](jint thread_index) -> jobject {
+ auto inner_callback = [&](jint index) -> jobject {
+ if (index == 0) {
+ return stack_infos[thread_index].thread;
+ } else {
+ return TranslateJvmtiFrameInfoArray(env,
+ stack_infos[thread_index].frame_buffer,
+ stack_infos[thread_index].frame_count);
+ }
+ };
+ return CreateObjectArray(env, 2, "java/lang/Object", inner_callback);
+ };
+ jobjectArray ret = CreateObjectArray(env, thread_count, "[Ljava/lang/Object;", callback);
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(stack_infos));
+ return ret;
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_ThreadListTraces_getThreadListStackTraces(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobjectArray jthreads, jint max) {
+ jint thread_count = env->GetArrayLength(jthreads);
+ std::unique_ptr<jthread[]> threads(new jthread[thread_count]);
+ for (jint i = 0; i != thread_count; ++i) {
+ threads[i] = env->GetObjectArrayElement(jthreads, i);
+ }
+
+ jvmtiStackInfo* stack_infos;
+ {
+ jvmtiError result = jvmti_env->GetThreadListStackTraces(thread_count,
+ threads.get(),
+ max,
+ &stack_infos);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+ }
+
+ auto callback = [&](jint thread_index) -> jobject {
+ auto inner_callback = [&](jint index) -> jobject {
+ if (index == 0) {
+ return stack_infos[thread_index].thread;
+ } else {
+ return TranslateJvmtiFrameInfoArray(env,
+ stack_infos[thread_index].frame_buffer,
+ stack_infos[thread_index].frame_count);
+ }
+ };
+ return CreateObjectArray(env, 2, "java/lang/Object", inner_callback);
+ };
+ jobjectArray ret = CreateObjectArray(env, thread_count, "[Ljava/lang/Object;", callback);
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(stack_infos));
+ return ret;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Frames_getFrameCount(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread) {
+ jint count;
+ jvmtiError result = jvmti_env->GetFrameCount(thread, &count);
+ if (JvmtiErrorToException(env, result)) {
+ return -1;
+ }
+ return count;
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Frames_getFrameLocation(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread, jint depth) {
+ jmethodID method;
+ jlocation location;
+
+ jvmtiError result = jvmti_env->GetFrameLocation(thread, depth, &method, &location);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+
+ auto callback = [&](jint index) -> jobject {
+ switch (index) {
+ case 0:
+ {
+ jclass decl_class;
+ jvmtiError class_result = jvmti_env->GetMethodDeclaringClass(method, &decl_class);
+ if (JvmtiErrorToException(env, class_result)) {
+ return nullptr;
+ }
+ jint modifiers;
+ jvmtiError mod_result = jvmti_env->GetMethodModifiers(method, &modifiers);
+ if (JvmtiErrorToException(env, mod_result)) {
+ return nullptr;
+ }
+ constexpr jint kStatic = 0x8;
+ return env->ToReflectedMethod(decl_class,
+ method,
+ (modifiers & kStatic) != 0 ? JNI_TRUE : JNI_FALSE);
+ }
+ case 1:
+ return env->NewStringUTF(
+ android::base::StringPrintf("%x", static_cast<uint32_t>(location)).c_str());
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ };
+ jobjectArray ret = CreateObjectArray(env, 2, "java/lang/Object", callback);
+ return ret;
}
} // namespace Test911GetStackTrace
diff --git a/test/911-get-stack-trace/stack_trace.h b/test/911-get-stack-trace/stack_trace.h
deleted file mode 100644
index eba2a91..0000000
--- a/test/911-get-stack-trace/stack_trace.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_911_GET_STACK_TRACE_STACK_TRACE_H_
-#define ART_TEST_911_GET_STACK_TRACE_STACK_TRACE_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test911GetStackTrace {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test911GetStackTrace
-} // namespace art
-
-#endif // ART_TEST_911_GET_STACK_TRACE_STACK_TRACE_H_
diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc
index 38a4f0e..a22d1d7 100644
--- a/test/912-classes/classes.cc
+++ b/test/912-classes/classes.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "classes.h"
-
#include <stdio.h>
#include "base/macros.h"
@@ -224,16 +222,23 @@
return classloader;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getClassLoaderClasses(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jobject jclassloader) {
+ jint count = 0;
+ jclass* classes = nullptr;
+ jvmtiError result = jvmti_env->GetClassLoaderClasses(jclassloader, &count, &classes);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
}
- SetAllCapabilities(jvmti_env);
- return 0;
+
+ auto callback = [&](jint i) {
+ return classes[i];
+ };
+ jobjectArray ret = CreateObjectArray(env, count, "java/lang/Class", callback);
+ if (classes != nullptr) {
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(classes));
+ }
+ return ret;
}
} // namespace Test912Classes
diff --git a/test/912-classes/expected.txt b/test/912-classes/expected.txt
index 44c861a..a95a465 100644
--- a/test/912-classes/expected.txt
+++ b/test/912-classes/expected.txt
@@ -43,3 +43,19 @@
class [Ljava.lang.String; null
interface Main$InfA dalvik.system.PathClassLoader
class $Proxy0 dalvik.system.PathClassLoader
+
+boot <- src <- src-ex (A,B)
+912-classes-ex.jar+ -> 912-classes.jar+ ->
+[class A, class B, class java.lang.Object]
+912-classes.jar+ ->
+[class B, class java.lang.Object]
+
+boot <- src (B) <- src-ex (A, List)
+912-classes-ex.jar+ -> 912-classes.jar+ ->
+[class A, class java.lang.Object, interface java.util.List]
+912-classes.jar+ ->
+[class B, class java.lang.Object]
+
+boot <- src+src-ex (A,B)
+912-classes.jar+ ->
+[class A, class B, class java.lang.Object]
diff --git a/test/923-monitors/monitors.h b/test/912-classes/src-ex/A.java
similarity index 66%
rename from test/923-monitors/monitors.h
rename to test/912-classes/src-ex/A.java
index 14cd5cd..64acb2f 100644
--- a/test/923-monitors/monitors.h
+++ b/test/912-classes/src-ex/A.java
@@ -14,17 +14,5 @@
* limitations under the License.
*/
-#ifndef ART_TEST_923_MONITORS_MONITORS_H_
-#define ART_TEST_923_MONITORS_MONITORS_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test923Monitors {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test923Monitors
-} // namespace art
-
-#endif // ART_TEST_923_MONITORS_MONITORS_H_
+public class A {
+}
\ No newline at end of file
diff --git a/test/923-monitors/monitors.h b/test/912-classes/src/B.java
similarity index 66%
copy from test/923-monitors/monitors.h
copy to test/912-classes/src/B.java
index 14cd5cd..f1458c3 100644
--- a/test/923-monitors/monitors.h
+++ b/test/912-classes/src/B.java
@@ -14,17 +14,5 @@
* limitations under the License.
*/
-#ifndef ART_TEST_923_MONITORS_MONITORS_H_
-#define ART_TEST_923_MONITORS_MONITORS_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test923Monitors {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test923Monitors
-} // namespace art
-
-#endif // ART_TEST_923_MONITORS_MONITORS_H_
+public class B {
+}
\ No newline at end of file
diff --git a/test/912-classes/src/Main.java b/test/912-classes/src/Main.java
index e627d42..ea3c49c 100644
--- a/test/912-classes/src/Main.java
+++ b/test/912-classes/src/Main.java
@@ -14,8 +14,10 @@
* limitations under the License.
*/
+import java.lang.reflect.Constructor;
import java.lang.reflect.Proxy;
import java.util.Arrays;
+import java.util.Comparator;
public class Main {
public static void main(String[] args) throws Exception {
@@ -76,6 +78,8 @@
testClassLoader(String[].class);
testClassLoader(InfA.class);
testClassLoader(getProxyClass());
+
+ testClassLoaderClasses();
}
private static Class<?> proxyClass = null;
@@ -151,6 +155,95 @@
}
}
+ private static void testClassLoaderClasses() throws Exception {
+ ClassLoader boot = ClassLoader.getSystemClassLoader().getParent();
+ while (boot.getParent() != null) {
+ boot = boot.getParent();
+ }
+
+ System.out.println();
+ System.out.println("boot <- src <- src-ex (A,B)");
+ ClassLoader cl1 = create(create(boot, DEX1), DEX2);
+ Class.forName("B", false, cl1);
+ Class.forName("A", false, cl1);
+ printClassLoaderClasses(cl1);
+
+ System.out.println();
+ System.out.println("boot <- src (B) <- src-ex (A, List)");
+ ClassLoader cl2 = create(create(boot, DEX1), DEX2);
+ Class.forName("A", false, cl2);
+ Class.forName("java.util.List", false, cl2);
+ Class.forName("B", false, cl2.getParent());
+ printClassLoaderClasses(cl2);
+
+ System.out.println();
+ System.out.println("boot <- src+src-ex (A,B)");
+ ClassLoader cl3 = create(boot, DEX1, DEX2);
+ Class.forName("B", false, cl3);
+ Class.forName("A", false, cl3);
+ printClassLoaderClasses(cl3);
+
+ // Check that the boot classloader dumps something non-empty.
+ Class<?>[] bootClasses = getClassLoaderClasses(boot);
+ if (bootClasses.length == 0) {
+ throw new RuntimeException("No classes initiated by boot classloader.");
+ }
+ // Check that at least java.util.List is loaded.
+ boolean foundList = false;
+ for (Class<?> c : bootClasses) {
+ if (c == java.util.List.class) {
+ foundList = true;
+ break;
+ }
+ }
+ if (!foundList) {
+ System.out.println(Arrays.toString(bootClasses));
+ throw new RuntimeException("Could not find class java.util.List.");
+ }
+ }
+
+ private static void printClassLoaderClasses(ClassLoader cl) {
+ for (;;) {
+ if (cl == null || !cl.getClass().getName().startsWith("dalvik.system")) {
+ break;
+ }
+
+ ClassLoader saved = cl;
+ for (;;) {
+ if (cl == null || !cl.getClass().getName().startsWith("dalvik.system")) {
+ break;
+ }
+ String s = cl.toString();
+ int index1 = s.indexOf("zip file");
+ int index2 = s.indexOf(']', index1);
+ if (index2 < 0) {
+ throw new RuntimeException("Unexpected classloader " + s);
+ }
+ String zip_file = s.substring(index1, index2);
+ int index3 = zip_file.indexOf('"');
+ int index4 = zip_file.indexOf('"', index3 + 1);
+ if (index4 < 0) {
+ throw new RuntimeException("Unexpected classloader " + s);
+ }
+ String paths = zip_file.substring(index3 + 1, index4);
+ String pathArray[] = paths.split(":");
+ for (String path : pathArray) {
+ int index5 = path.lastIndexOf('/');
+ System.out.print(path.substring(index5 + 1));
+ System.out.print('+');
+ }
+ System.out.print(" -> ");
+ cl = cl.getParent();
+ }
+ System.out.println();
+ Class<?> classes[] = getClassLoaderClasses(saved);
+ Arrays.sort(classes, new ClassNameComparator());
+ System.out.println(Arrays.toString(classes));
+
+ cl = saved.getParent();
+ }
+ }
+
private static native boolean isModifiableClass(Class<?> c);
private static native String[] getClassSignature(Class<?> c);
@@ -161,12 +254,14 @@
private static native Object[] getClassFields(Class<?> c);
private static native Object[] getClassMethods(Class<?> c);
- private static native Class[] getImplementedInterfaces(Class<?> c);
+ private static native Class<?>[] getImplementedInterfaces(Class<?> c);
private static native int getClassStatus(Class<?> c);
private static native Object getClassLoader(Class<?> c);
+ private static native Class<?>[] getClassLoaderClasses(ClassLoader cl);
+
private static class TestForNonInit {
public static double dummy = Math.random(); // So it can't be compile-time initialized.
}
@@ -188,4 +283,23 @@
}
public abstract static class ClassC implements InfA, InfC {
}
+
+ private static final String DEX1 = System.getenv("DEX_LOCATION") + "/912-classes.jar";
+ private static final String DEX2 = System.getenv("DEX_LOCATION") + "/912-classes-ex.jar";
+
+ private static ClassLoader create(ClassLoader parent, String... elements) throws Exception {
+ // Note: We use a PathClassLoader, as we do not care about code performance. We only load
+ // the classes, and they're empty.
+ Class<?> pathClassLoaderClass = Class.forName("dalvik.system.PathClassLoader");
+ Constructor<?> pathClassLoaderInit = pathClassLoaderClass.getConstructor(String.class,
+ ClassLoader.class);
+ String path = String.join(":", elements);
+ return (ClassLoader) pathClassLoaderInit.newInstance(path, parent);
+ }
+
+ private static class ClassNameComparator implements Comparator<Class<?>> {
+ public int compare(Class<?> c1, Class<?> c2) {
+ return c1.getName().compareTo(c2.getName());
+ }
+ }
}
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index 0b232af..6759919 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "heaps.h"
-
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
@@ -495,17 +493,5 @@
return ret;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test913Heaps
} // namespace art
diff --git a/test/918-fields/fields.cc b/test/918-fields/fields.cc
index 4d2b34b..7d29912 100644
--- a/test/918-fields/fields.cc
+++ b/test/918-fields/fields.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "fields.h"
-
#include <stdio.h>
#include "base/macros.h"
@@ -132,17 +130,5 @@
return synth;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test918Fields
} // namespace art
diff --git a/test/918-fields/fields.h b/test/918-fields/fields.h
deleted file mode 100644
index 89bd161..0000000
--- a/test/918-fields/fields.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_918_FIELDS_FIELDS_H_
-#define ART_TEST_918_FIELDS_FIELDS_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test918Fields {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test918Fields
-} // namespace art
-
-#endif // ART_TEST_918_FIELDS_FIELDS_H_
diff --git a/test/920-objects/objects.cc b/test/920-objects/objects.cc
index 886dd0e..0553a9d 100644
--- a/test/920-objects/objects.cc
+++ b/test/920-objects/objects.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "objects.h"
-
#include <stdio.h>
#include "base/macros.h"
@@ -61,17 +59,5 @@
return hash;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test920Objects
} // namespace art
diff --git a/test/920-objects/objects.h b/test/920-objects/objects.h
deleted file mode 100644
index 5f21e7b..0000000
--- a/test/920-objects/objects.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_920_OBJECTS_OBJECTS_H_
-#define ART_TEST_920_OBJECTS_OBJECTS_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test920Objects {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test920Objects
-} // namespace art
-
-#endif // ART_TEST_920_OBJECTS_OBJECTS_H_
diff --git a/test/921-hello-failure/expected.txt b/test/921-hello-failure/expected.txt
index e2665ef..1c1d4d9 100644
--- a/test/921-hello-failure/expected.txt
+++ b/test/921-hello-failure/expected.txt
@@ -13,3 +13,11 @@
hello2 - ReorderInterface
Transformation error : java.lang.Exception(Failed to redefine class <LTransform2;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED)
hello2 - ReorderInterface
+hello - MultiRedef
+hello2 - MultiRedef
+Transformation error : java.lang.Exception(Failed to redefine classes <LTransform2;, LTransform;> due to JVMTI_ERROR_NAMES_DONT_MATCH)
+hello - MultiRedef
+hello2 - MultiRedef
+Transformation error : java.lang.Exception(Failed to redefine classes <LTransform;, LTransform2;> due to JVMTI_ERROR_NAMES_DONT_MATCH)
+hello - MultiRedef
+hello2 - MultiRedef
diff --git a/test/922-properties/properties.h b/test/921-hello-failure/src/CommonClassDefinition.java
similarity index 63%
rename from test/922-properties/properties.h
rename to test/921-hello-failure/src/CommonClassDefinition.java
index 84feb10..62602a0 100644
--- a/test/922-properties/properties.h
+++ b/test/921-hello-failure/src/CommonClassDefinition.java
@@ -14,17 +14,14 @@
* limitations under the License.
*/
-#ifndef ART_TEST_922_PROPERTIES_PROPERTIES_H_
-#define ART_TEST_922_PROPERTIES_PROPERTIES_H_
+class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
-#include <jni.h>
-
-namespace art {
-namespace Test922Properties {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test922Properties
-} // namespace art
-
-#endif // ART_TEST_922_PROPERTIES_PROPERTIES_H_
+ CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+}
diff --git a/test/921-hello-failure/src/Main.java b/test/921-hello-failure/src/Main.java
index 69c48e2..1fe2599 100644
--- a/test/921-hello-failure/src/Main.java
+++ b/test/921-hello-failure/src/Main.java
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+import java.util.ArrayList;
public class Main {
public static void main(String[] args) {
@@ -23,10 +24,30 @@
NewInterface.doTest(new Transform2());
MissingInterface.doTest(new Transform2());
ReorderInterface.doTest(new Transform2());
+ MultiRedef.doTest(new Transform(), new Transform2());
}
// Transforms the class. This throws an exception if something goes wrong.
public static native void doCommonClassRedefinition(Class<?> target,
byte[] classfile,
byte[] dexfile) throws Exception;
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) throws Exception {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles) throws Exception;
}
diff --git a/test/921-hello-failure/src/MultiRedef.java b/test/921-hello-failure/src/MultiRedef.java
new file mode 100644
index 0000000..c64342c
--- /dev/null
+++ b/test/921-hello-failure/src/MultiRedef.java
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class MultiRedef {
+
+ // class NotTransform {
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static CommonClassDefinition INVALID_DEFINITION_T1 = new CommonClassDefinition(
+ Transform.class,
+ Base64.getDecoder().decode(
+ "yv66vgAAADQAFQoABgAPBwAQCAARCgACABIHABMHABQBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+ "TGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3VyY2VG" +
+ "aWxlAQARTm90VHJhbnNmb3JtLmphdmEMAAcACAEAD2phdmEvbGFuZy9FcnJvcgEAFVNob3VsZCBu" +
+ "b3QgYmUgY2FsbGVkIQwABwAMAQAMTm90VHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAAgAAUA" +
+ "BgAAAAAAAgAAAAcACAABAAkAAAAdAAEAAQAAAAUqtwABsQAAAAEACgAAAAYAAQAAAAEAAQALAAwA" +
+ "AQAJAAAAIgADAAIAAAAKuwACWRIDtwAEvwAAAAEACgAAAAYAAQAAAAMAAQANAAAAAgAO"),
+ Base64.getDecoder().decode(
+ "ZGV4CjAzNQDLV95i5xnv6iUi6uIeDoY5jP5Xe9NP1AiYAgAAcAAAAHhWNBIAAAAAAAAAAAQCAAAL" +
+ "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACQAQAACAEAAEoB" +
+ "AABSAQAAYgEAAHUBAACJAQAAnQEAALABAADHAQAAygEAAM4BAADiAQAAAQAAAAIAAAADAAAABAAA" +
+ "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAAAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+ "AAAAAAAAAAAAAAAAAgAAAAAAAAAFAAAAAAAAAPQBAAAAAAAAAQABAAEAAADpAQAABAAAAHAQAwAA" +
+ "AA4ABAACAAIAAADuAQAACQAAACIAAQAbAQYAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgAOTE5v" +
+ "dFRyYW5zZm9ybTsAEUxqYXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZh" +
+ "L2xhbmcvU3RyaW5nOwARTm90VHJhbnNmb3JtLmphdmEAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAB" +
+ "VgACVkwAEmVtaXR0ZXI6IGphY2stNC4yMAAFc2F5SGkAAQAHDgADAQAHDgAAAAEBAICABIgCAQGg" +
+ "AgAADAAAAAAAAAABAAAAAAAAAAEAAAALAAAAcAAAAAIAAAAFAAAAnAAAAAMAAAACAAAAsAAAAAUA" +
+ "AAAEAAAAyAAAAAYAAAABAAAA6AAAAAEgAAACAAAACAEAAAEQAAABAAAARAEAAAIgAAALAAAASgEA" +
+ "AAMgAAACAAAA6QEAAAAgAAABAAAA9AEAAAAQAAABAAAABAIAAA=="));
+
+ // Valid redefinition of Transform2
+ // class Transform2 implements Iface1, Iface2 {
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static CommonClassDefinition VALID_DEFINITION_T2 = new CommonClassDefinition(
+ Transform2.class,
+ Base64.getDecoder().decode(
+ "yv66vgAAADQAGQoABgARBwASCAATCgACABQHABUHABYHABcHABgBAAY8aW5pdD4BAAMoKVYBAARD" +
+ "b2RlAQAPTGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApT" +
+ "b3VyY2VGaWxlAQAPVHJhbnNmb3JtMi5qYXZhDAAJAAoBAA9qYXZhL2xhbmcvRXJyb3IBABVTaG91" +
+ "bGQgbm90IGJlIGNhbGxlZCEMAAkADgEAClRyYW5zZm9ybTIBABBqYXZhL2xhbmcvT2JqZWN0AQAG" +
+ "SWZhY2UxAQAGSWZhY2UyACAABQAGAAIABwAIAAAAAgAAAAkACgABAAsAAAAdAAEAAQAAAAUqtwAB" +
+ "sQAAAAEADAAAAAYAAQAAAAEAAQANAA4AAQALAAAAIgADAAIAAAAKuwACWRIDtwAEvwAAAAEADAAA" +
+ "AAYAAQAAAAMAAQAPAAAAAgAQ"),
+ Base64.getDecoder().decode(
+ "ZGV4CjAzNQDSWls05CPkX+gbTGMVRvx9dc9vozzVbu7AAgAAcAAAAHhWNBIAAAAAAAAAACwCAAAN" +
+ "AAAAcAAAAAcAAACkAAAAAgAAAMAAAAAAAAAAAAAAAAQAAADYAAAAAQAAAPgAAACoAQAAGAEAAGIB" +
+ "AABqAQAAdAEAAH4BAACMAQAAnwEAALMBAADHAQAA3gEAAO8BAADyAQAA9gEAAAoCAAABAAAAAgAA" +
+ "AAMAAAAEAAAABQAAAAYAAAAJAAAACQAAAAYAAAAAAAAACgAAAAYAAABcAQAAAgAAAAAAAAACAAEA" +
+ "DAAAAAMAAQAAAAAABAAAAAAAAAACAAAAAAAAAAQAAABUAQAACAAAAAAAAAAcAgAAAAAAAAEAAQAB" +
+ "AAAAEQIAAAQAAABwEAMAAAAOAAQAAgACAAAAFgIAAAkAAAAiAAMAGwEHAAAAcCACABAAJwAAAAIA" +
+ "AAAAAAEAAQAAAAUABjxpbml0PgAITElmYWNlMTsACExJZmFjZTI7AAxMVHJhbnNmb3JtMjsAEUxq" +
+ "YXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwAV" +
+ "U2hvdWxkIG5vdCBiZSBjYWxsZWQhAA9UcmFuc2Zvcm0yLmphdmEAAVYAAlZMABJlbWl0dGVyOiBq" +
+ "YWNrLTQuMjAABXNheUhpAAEABw4AAwEABw4AAAABAQCAgASYAgEBsAIAAAwAAAAAAAAAAQAAAAAA" +
+ "AAABAAAADQAAAHAAAAACAAAABwAAAKQAAAADAAAAAgAAAMAAAAAFAAAABAAAANgAAAAGAAAAAQAA" +
+ "APgAAAABIAAAAgAAABgBAAABEAAAAgAAAFQBAAACIAAADQAAAGIBAAADIAAAAgAAABECAAAAIAAA" +
+ "AQAAABwCAAAAEAAAAQAAACwCAAA="));
+
+ public static void doTest(Transform t1, Transform2 t2) {
+ t1.sayHi("MultiRedef");
+ t2.sayHi("MultiRedef");
+ try {
+ Main.doMultiClassRedefinition(VALID_DEFINITION_T2, INVALID_DEFINITION_T1);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t1.sayHi("MultiRedef");
+ t2.sayHi("MultiRedef");
+ try {
+ Main.doMultiClassRedefinition(INVALID_DEFINITION_T1, VALID_DEFINITION_T2);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t1.sayHi("MultiRedef");
+ t2.sayHi("MultiRedef");
+ }
+}
diff --git a/test/922-properties/properties.cc b/test/922-properties/properties.cc
index b1e7fce..cb732c7 100644
--- a/test/922-properties/properties.cc
+++ b/test/922-properties/properties.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "properties.h"
-
#include <stdio.h>
#include "base/macros.h"
@@ -91,17 +89,5 @@
}
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test922Properties
} // namespace art
diff --git a/test/923-monitors/monitors.cc b/test/923-monitors/monitors.cc
index 2aa36cb..4baa530 100644
--- a/test/923-monitors/monitors.cc
+++ b/test/923-monitors/monitors.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "monitors.h"
-
#include <stdio.h>
#include "base/macros.h"
@@ -84,17 +82,5 @@
JvmtiErrorToException(env, result);
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test923Monitors
} // namespace art
diff --git a/test/954-invoke-polymorphic-verifier/run b/test/924-threads/build
similarity index 86%
rename from test/954-invoke-polymorphic-verifier/run
rename to test/924-threads/build
index a9f1822..898e2e5 100755
--- a/test/954-invoke-polymorphic-verifier/run
+++ b/test/924-threads/build
@@ -14,7 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# make us exit on a failure
-set -e
-
-./default-run "$@" --experimental method-handles
+./default-build "$@" --experimental agents
diff --git a/test/924-threads/expected.txt b/test/924-threads/expected.txt
new file mode 100644
index 0000000..32e3368
--- /dev/null
+++ b/test/924-threads/expected.txt
@@ -0,0 +1,31 @@
+currentThread OK
+main
+5
+false
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
+main
+5
+false
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
+Daemon Thread
+5
+true
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
+Daemon Thread
+5
+true
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
+5
+5
+0 = NEW
+191 = ALIVE|WAITING_INDEFINITELY|WAITING|IN_OBJECT_WAIT
+1a1 = ALIVE|WAITING_WITH_TIMEOUT|WAITING|IN_OBJECT_WAIT
+401 = ALIVE|BLOCKED_ON_MONITOR_ENTER
+e1 = ALIVE|WAITING_WITH_TIMEOUT|SLEEPING|WAITING
+5 = ALIVE|RUNNABLE
+2 = TERMINATED
+[Thread[FinalizerDaemon,5,system], Thread[FinalizerWatchdogDaemon,5,system], Thread[HeapTaskDaemon,5,system], Thread[ReferenceQueueDaemon,5,system], Thread[Signal Catcher,5,system], Thread[main,5,main]]
diff --git a/test/924-threads/info.txt b/test/924-threads/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/924-threads/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/954-invoke-polymorphic-verifier/run b/test/924-threads/run
similarity index 83%
copy from test/954-invoke-polymorphic-verifier/run
copy to test/924-threads/run
index a9f1822..4379349 100755
--- a/test/954-invoke-polymorphic-verifier/run
+++ b/test/924-threads/run
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# make us exit on a failure
-set -e
-
-./default-run "$@" --experimental method-handles
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --jvmti
diff --git a/test/924-threads/src/Main.java b/test/924-threads/src/Main.java
new file mode 100644
index 0000000..492a7ac
--- /dev/null
+++ b/test/924-threads/src/Main.java
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.concurrent.CountDownLatch;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[1]);
+
+ doTest();
+ }
+
+ private static void doTest() throws Exception {
+ Thread t1 = Thread.currentThread();
+ Thread t2 = getCurrentThread();
+
+ if (t1 != t2) {
+ throw new RuntimeException("Expected " + t1 + " but got " + t2);
+ }
+ System.out.println("currentThread OK");
+
+ printThreadInfo(t1);
+ printThreadInfo(null);
+
+ Thread t3 = new Thread("Daemon Thread");
+ t3.setDaemon(true);
+ // Do not start this thread, yet.
+ printThreadInfo(t3);
+ // Start, and wait for it to die.
+ t3.start();
+ t3.join();
+ Thread.sleep(500); // Wait a little bit.
+ // Thread has died, check that we can still get info.
+ printThreadInfo(t3);
+
+ doStateTests();
+
+ doAllThreadsTests();
+ }
+
+ private static class Holder {
+ volatile boolean flag = false;
+ }
+
+ private static void doStateTests() throws Exception {
+ System.out.println(Integer.toHexString(getThreadState(null)));
+ System.out.println(Integer.toHexString(getThreadState(Thread.currentThread())));
+
+ final CountDownLatch cdl1 = new CountDownLatch(1);
+ final CountDownLatch cdl2 = new CountDownLatch(1);
+ final CountDownLatch cdl3_1 = new CountDownLatch(1);
+ final CountDownLatch cdl3_2 = new CountDownLatch(1);
+ final CountDownLatch cdl4 = new CountDownLatch(1);
+ final CountDownLatch cdl5 = new CountDownLatch(1);
+ final Holder h = new Holder();
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ cdl1.countDown();
+ synchronized(cdl1) {
+ cdl1.wait();
+ }
+
+ cdl2.countDown();
+ synchronized(cdl2) {
+ cdl2.wait(1000); // Wait a second.
+ }
+
+ cdl3_1.await();
+ cdl3_2.countDown();
+ synchronized(cdl3_2) {
+ // Nothing, just wanted to block on cdl3.
+ }
+
+ cdl4.countDown();
+ Thread.sleep(1000);
+
+ cdl5.countDown();
+ while (!h.flag) {
+ // Busy-loop.
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+
+ Thread t = new Thread(r);
+ printThreadState(t);
+ t.start();
+
+ // Waiting.
+ cdl1.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ synchronized(cdl1) {
+ cdl1.notifyAll();
+ }
+
+ // Timed waiting.
+ cdl2.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ synchronized(cdl2) {
+ cdl2.notifyAll();
+ }
+
+ // Blocked on monitor.
+ synchronized(cdl3_2) {
+ cdl3_1.countDown();
+ cdl3_2.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ }
+
+ // Sleeping.
+ cdl4.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+
+ // Running.
+ cdl5.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ h.flag = true;
+
+ // Dying.
+ t.join();
+ Thread.yield();
+ Thread.sleep(100);
+
+ printThreadState(t);
+ }
+
+ private static void doAllThreadsTests() {
+ Thread[] threads = getAllThreads();
+ Arrays.sort(threads, THREAD_COMP);
+ System.out.println(Arrays.toString(threads));
+ }
+
+ private final static Comparator<Thread> THREAD_COMP = new Comparator<Thread>() {
+ public int compare(Thread o1, Thread o2) {
+ return o1.getName().compareTo(o2.getName());
+ }
+ };
+
+ private final static Map<Integer, String> STATE_NAMES = new HashMap<Integer, String>();
+ private final static List<Integer> STATE_KEYS = new ArrayList<Integer>();
+ static {
+ STATE_NAMES.put(0x1, "ALIVE");
+ STATE_NAMES.put(0x2, "TERMINATED");
+ STATE_NAMES.put(0x4, "RUNNABLE");
+ STATE_NAMES.put(0x400, "BLOCKED_ON_MONITOR_ENTER");
+ STATE_NAMES.put(0x80, "WAITING");
+ STATE_NAMES.put(0x10, "WAITING_INDEFINITELY");
+ STATE_NAMES.put(0x20, "WAITING_WITH_TIMEOUT");
+ STATE_NAMES.put(0x40, "SLEEPING");
+ STATE_NAMES.put(0x100, "IN_OBJECT_WAIT");
+ STATE_NAMES.put(0x200, "PARKED");
+ STATE_NAMES.put(0x100000, "SUSPENDED");
+ STATE_NAMES.put(0x200000, "INTERRUPTED");
+ STATE_NAMES.put(0x400000, "IN_NATIVE");
+ STATE_KEYS.addAll(STATE_NAMES.keySet());
+ Collections.sort(STATE_KEYS);
+ }
+
+ private static void printThreadState(Thread t) {
+ int state = getThreadState(t);
+
+ StringBuilder sb = new StringBuilder();
+
+ for (Integer i : STATE_KEYS) {
+ if ((state & i) != 0) {
+ if (sb.length()>0) {
+ sb.append('|');
+ }
+ sb.append(STATE_NAMES.get(i));
+ }
+ }
+
+ if (sb.length() == 0) {
+ sb.append("NEW");
+ }
+
+ System.out.println(Integer.toHexString(state) + " = " + sb.toString());
+ }
+
+ private static void printThreadInfo(Thread t) {
+ Object[] threadInfo = getThreadInfo(t);
+ if (threadInfo == null || threadInfo.length != 5) {
+ System.out.println(Arrays.toString(threadInfo));
+ throw new RuntimeException("threadInfo length wrong");
+ }
+
+ System.out.println(threadInfo[0]); // Name
+ System.out.println(threadInfo[1]); // Priority
+ System.out.println(threadInfo[2]); // Daemon
+ System.out.println(threadInfo[3]); // Threadgroup
+ System.out.println(threadInfo[4] == null ? "null" : threadInfo[4].getClass()); // Context CL.
+ }
+
+ private static native Thread getCurrentThread();
+ private static native Object[] getThreadInfo(Thread t);
+ private static native int getThreadState(Thread t);
+ private static native Thread[] getAllThreads();
+}
diff --git a/test/924-threads/threads.cc b/test/924-threads/threads.cc
new file mode 100644
index 0000000..1487b7c
--- /dev/null
+++ b/test/924-threads/threads.cc
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include "android-base/stringprintf.h"
+#include "base/macros.h"
+#include "base/logging.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test924Threads {
+
+// private static native Thread getCurrentThread();
+// private static native Object[] getThreadInfo(Thread t);
+
+extern "C" JNIEXPORT jthread JNICALL Java_Main_getCurrentThread(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
+ jthread thread = nullptr;
+ jvmtiError result = jvmti_env->GetCurrentThread(&thread);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+ return thread;
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getThreadInfo(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jthread thread) {
+ jvmtiThreadInfo info;
+ memset(&info, 0, sizeof(jvmtiThreadInfo));
+
+ jvmtiError result = jvmti_env->GetThreadInfo(thread, &info);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+
+ auto callback = [&](jint component_index) -> jobject {
+ switch (component_index) {
+ // The name.
+ case 0:
+ return (info.name == nullptr) ? nullptr : env->NewStringUTF(info.name);
+
+ // The priority. Use a string for simplicity of construction.
+ case 1:
+ return env->NewStringUTF(android::base::StringPrintf("%d", info.priority).c_str());
+
+ // Whether it's a daemon. Use a string for simplicity of construction.
+ case 2:
+ return env->NewStringUTF(info.is_daemon == JNI_TRUE ? "true" : "false");
+
+ // The thread group;
+ case 3:
+ return env->NewLocalRef(info.thread_group);
+
+ // The context classloader.
+ case 4:
+ return env->NewLocalRef(info.context_class_loader);
+ }
+ LOG(FATAL) << "Should not reach here";
+ UNREACHABLE();
+ };
+ jobjectArray ret = CreateObjectArray(env, 5, "java/lang/Object", callback);
+
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(info.name));
+ if (info.thread_group != nullptr) {
+ env->DeleteLocalRef(info.thread_group);
+ }
+ if (info.context_class_loader != nullptr) {
+ env->DeleteLocalRef(info.context_class_loader);
+ }
+
+ return ret;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_getThreadState(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jthread thread) {
+ jint state;
+ jvmtiError result = jvmti_env->GetThreadState(thread, &state);
+ if (JvmtiErrorToException(env, result)) {
+ return 0;
+ }
+ return state;
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getAllThreads(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
+ jint thread_count;
+ jthread* threads;
+
+ jvmtiError result = jvmti_env->GetAllThreads(&thread_count, &threads);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+
+ auto callback = [&](jint index) {
+ return threads[index];
+ };
+ jobjectArray ret = CreateObjectArray(env, thread_count, "java/lang/Thread", callback);
+
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(threads));
+
+ return ret;
+}
+
+} // namespace Test924Threads
+} // namespace art
diff --git a/test/954-invoke-polymorphic-verifier/run b/test/926-multi-obsolescence/build
similarity index 86%
copy from test/954-invoke-polymorphic-verifier/run
copy to test/926-multi-obsolescence/build
index a9f1822..898e2e5 100755
--- a/test/954-invoke-polymorphic-verifier/run
+++ b/test/926-multi-obsolescence/build
@@ -14,7 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# make us exit on a failure
-set -e
-
-./default-run "$@" --experimental method-handles
+./default-build "$@" --experimental agents
diff --git a/test/926-multi-obsolescence/expected.txt b/test/926-multi-obsolescence/expected.txt
new file mode 100644
index 0000000..0546490
--- /dev/null
+++ b/test/926-multi-obsolescence/expected.txt
@@ -0,0 +1,15 @@
+hello
+hello - 2
+Not doing anything here
+goodbye - 2
+goodbye
+hello
+hello - 2
+transforming calling functions
+goodbye - 2
+goodbye
+Hello - Transformed
+Hello 2 - Transformed
+Not doing anything here
+Goodbye 2 - Transformed
+Goodbye - Transformed
diff --git a/test/926-multi-obsolescence/info.txt b/test/926-multi-obsolescence/info.txt
new file mode 100644
index 0000000..1399b96
--- /dev/null
+++ b/test/926-multi-obsolescence/info.txt
@@ -0,0 +1,2 @@
+Tests that we can redefine multiple classes at once using the RedefineClasses
+function.
diff --git a/test/954-invoke-polymorphic-verifier/run b/test/926-multi-obsolescence/run
similarity index 83%
copy from test/954-invoke-polymorphic-verifier/run
copy to test/926-multi-obsolescence/run
index a9f1822..4379349 100755
--- a/test/954-invoke-polymorphic-verifier/run
+++ b/test/926-multi-obsolescence/run
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# make us exit on a failure
-set -e
-
-./default-run "$@" --experimental method-handles
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --jvmti
diff --git a/test/922-properties/properties.h b/test/926-multi-obsolescence/src/CommonClassDefinition.java
similarity index 63%
copy from test/922-properties/properties.h
copy to test/926-multi-obsolescence/src/CommonClassDefinition.java
index 84feb10..62602a0 100644
--- a/test/922-properties/properties.h
+++ b/test/926-multi-obsolescence/src/CommonClassDefinition.java
@@ -14,17 +14,14 @@
* limitations under the License.
*/
-#ifndef ART_TEST_922_PROPERTIES_PROPERTIES_H_
-#define ART_TEST_922_PROPERTIES_PROPERTIES_H_
+class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
-#include <jni.h>
-
-namespace art {
-namespace Test922Properties {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test922Properties
-} // namespace art
-
-#endif // ART_TEST_922_PROPERTIES_PROPERTIES_H_
+ CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+}
diff --git a/test/926-multi-obsolescence/src/Main.java b/test/926-multi-obsolescence/src/Main.java
new file mode 100644
index 0000000..8a6cf84
--- /dev/null
+++ b/test/926-multi-obsolescence/src/Main.java
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Base64;
+
+public class Main {
+ // class Transform {
+ // public void sayHi(Runnable r) {
+ // System.out.println("Hello - Transformed");
+ // r.run();
+ // System.out.println("Goodbye - Transformed");
+ // }
+ // }
+ private static CommonClassDefinition VALID_DEFINITION_T1 = new CommonClassDefinition(
+ Transform.class,
+ Base64.getDecoder().decode(
+ "yv66vgAAADQAJAoACAARCQASABMIABQKABUAFgsAFwAYCAAZBwAaBwAbAQAGPGluaXQ+AQADKClW" +
+ "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7" +
+ "KVYBAApTb3VyY2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMAAkACgcAHAwAHQAeAQATSGVsbG8gLSBU" +
+ "cmFuc2Zvcm1lZAcAHwwAIAAhBwAiDAAjAAoBABVHb29kYnllIC0gVHJhbnNmb3JtZWQBAAlUcmFu" +
+ "c2Zvcm0BABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZh" +
+ "L2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZh" +
+ "L2xhbmcvU3RyaW5nOylWAQASamF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAABwAIAAAAAAACAAAA" +
+ "CQAKAAEACwAAAB0AAQABAAAABSq3AAGxAAAAAQAMAAAABgABAAAAAQABAA0ADgABAAsAAAA7AAIA" +
+ "AgAAABeyAAISA7YABCu5AAUBALIAAhIGtgAEsQAAAAEADAAAABIABAAAAAMACAAEAA4ABQAWAAYA" +
+ "AQAPAAAAAgAQ"),
+ Base64.getDecoder().decode(
+ "ZGV4CjAzNQAYeAMMXgYWxoeSHAS9EWKCCtVRSAGpqZVQAwAAcAAAAHhWNBIAAAAAAAAAALACAAAR" +
+ "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAUAAAD8AAAAAQAAACQBAAAMAgAARAEAAKIB" +
+ "AACqAQAAwQEAANYBAADjAQAA+gEAAA4CAAAkAgAAOAIAAEwCAABcAgAAXwIAAGMCAAB3AgAAfAIA" +
+ "AIUCAACKAgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAoAAAAGAAAAAAAAAAsAAAAGAAAA" +
+ "lAEAAAsAAAAGAAAAnAEAAAUAAQANAAAAAAAAAAAAAAAAAAEAEAAAAAEAAgAOAAAAAgAAAAAAAAAD" +
+ "AAAADwAAAAAAAAAAAAAAAgAAAAAAAAAJAAAAAAAAAJ8CAAAAAAAAAQABAAEAAACRAgAABAAAAHAQ" +
+ "AwAAAA4ABAACAAIAAACWAgAAFAAAAGIAAAAbAQIAAABuIAIAEAByEAQAAwBiAAAAGwEBAAAAbiAC" +
+ "ABAADgABAAAAAwAAAAEAAAAEAAY8aW5pdD4AFUdvb2RieWUgLSBUcmFuc2Zvcm1lZAATSGVsbG8g" +
+ "LSBUcmFuc2Zvcm1lZAALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEv" +
+ "bGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABJM" +
+ "amF2YS9sYW5nL1N5c3RlbTsADlRyYW5zZm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00" +
+ "LjEzAANvdXQAB3ByaW50bG4AA3J1bgAFc2F5SGkAAQAHDgADAQAHDoc8hwAAAAEBAICABMQCAQHc" +
+ "AgAAAA0AAAAAAAAAAQAAAAAAAAABAAAAEQAAAHAAAAACAAAABwAAALQAAAADAAAAAwAAANAAAAAE" +
+ "AAAAAQAAAPQAAAAFAAAABQAAAPwAAAAGAAAAAQAAACQBAAABIAAAAgAAAEQBAAABEAAAAgAAAJQB" +
+ "AAACIAAAEQAAAKIBAAADIAAAAgAAAJECAAAAIAAAAQAAAJ8CAAAAEAAAAQAAALACAAA="));
+ // class Transform2 {
+ // public void sayHi(Runnable r) {
+ // System.out.println("Hello 2 - Transformed");
+ // r.run();
+ // System.out.println("Goodbye 2 - Transformed");
+ // }
+ // }
+ private static CommonClassDefinition VALID_DEFINITION_T2 = new CommonClassDefinition(
+ Transform2.class,
+ Base64.getDecoder().decode(
+ "yv66vgAAADQAJAoACAARCQASABMIABQKABUAFgsAFwAYCAAZBwAaBwAbAQAGPGluaXQ+AQADKClW" +
+ "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7" +
+ "KVYBAApTb3VyY2VGaWxlAQAPVHJhbnNmb3JtMi5qYXZhDAAJAAoHABwMAB0AHgEAFUhlbGxvIDIg" +
+ "LSBUcmFuc2Zvcm1lZAcAHwwAIAAhBwAiDAAjAAoBABdHb29kYnllIDIgLSBUcmFuc2Zvcm1lZAEA" +
+ "ClRyYW5zZm9ybTIBABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEA" +
+ "FUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAV" +
+ "KExqYXZhL2xhbmcvU3RyaW5nOylWAQASamF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAABwAIAAAA" +
+ "AAACAAAACQAKAAEACwAAAB0AAQABAAAABSq3AAGxAAAAAQAMAAAABgABAAAAAQABAA0ADgABAAsA" +
+ "AAA7AAIAAgAAABeyAAISA7YABCu5AAUBALIAAhIGtgAEsQAAAAEADAAAABIABAAAAAMACAAEAA4A" +
+ "BQAWAAYAAQAPAAAAAgAQ"),
+ Base64.getDecoder().decode(
+ "ZGV4CjAzNQCee5Z6+AuFcjnPjjn7QYgZmKSmFQCO4nxUAwAAcAAAAHhWNBIAAAAAAAAAALQCAAAR" +
+ "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAUAAAD8AAAAAQAAACQBAAAQAgAARAEAAKIB" +
+ "AACqAQAAwwEAANoBAADoAQAA/wEAABMCAAApAgAAPQIAAFECAABiAgAAZQIAAGkCAAB9AgAAggIA" +
+ "AIsCAACQAgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAoAAAAGAAAAAAAAAAsAAAAGAAAA" +
+ "lAEAAAsAAAAGAAAAnAEAAAUAAQANAAAAAAAAAAAAAAAAAAEAEAAAAAEAAgAOAAAAAgAAAAAAAAAD" +
+ "AAAADwAAAAAAAAAAAAAAAgAAAAAAAAAJAAAAAAAAAKUCAAAAAAAAAQABAAEAAACXAgAABAAAAHAQ" +
+ "AwAAAA4ABAACAAIAAACcAgAAFAAAAGIAAAAbAQIAAABuIAIAEAByEAQAAwBiAAAAGwEBAAAAbiAC" +
+ "ABAADgABAAAAAwAAAAEAAAAEAAY8aW5pdD4AF0dvb2RieWUgMiAtIFRyYW5zZm9ybWVkABVIZWxs" +
+ "byAyIC0gVHJhbnNmb3JtZWQADExUcmFuc2Zvcm0yOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJM" +
+ "amF2YS9sYW5nL09iamVjdDsAFExqYXZhL2xhbmcvUnVubmFibGU7ABJMamF2YS9sYW5nL1N0cmlu" +
+ "ZzsAEkxqYXZhL2xhbmcvU3lzdGVtOwAPVHJhbnNmb3JtMi5qYXZhAAFWAAJWTAASZW1pdHRlcjog" +
+ "amFjay00LjIwAANvdXQAB3ByaW50bG4AA3J1bgAFc2F5SGkAAQAHDgADAQAHDoc8hwAAAAEBAICA" +
+ "BMQCAQHcAgANAAAAAAAAAAEAAAAAAAAAAQAAABEAAABwAAAAAgAAAAcAAAC0AAAAAwAAAAMAAADQ" +
+ "AAAABAAAAAEAAAD0AAAABQAAAAUAAAD8AAAABgAAAAEAAAAkAQAAASAAAAIAAABEAQAAARAAAAIA" +
+ "AACUAQAAAiAAABEAAACiAQAAAyAAAAIAAACXAgAAACAAAAEAAAClAgAAABAAAAEAAAC0AgAA"));
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[1]);
+ doTest(new Transform(), new Transform2());
+ }
+
+ public static void doTest(final Transform t1, final Transform2 t2) {
+ t1.sayHi(() -> { t2.sayHi(() -> { System.out.println("Not doing anything here"); }); });
+ t1.sayHi(() -> {
+ t2.sayHi(() -> {
+ System.out.println("transforming calling functions");
+ doMultiClassRedefinition(VALID_DEFINITION_T1, VALID_DEFINITION_T2);
+ });
+ });
+ t1.sayHi(() -> { t2.sayHi(() -> { System.out.println("Not doing anything here"); }); });
+ }
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+}
diff --git a/test/926-multi-obsolescence/src/Transform.java b/test/926-multi-obsolescence/src/Transform.java
new file mode 100644
index 0000000..8cda6cd
--- /dev/null
+++ b/test/926-multi-obsolescence/src/Transform.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ public void sayHi(Runnable r) {
+ // Use lower 'h' to make sure the string will have a different string id
+ // than the transformation (the transformation code is the same except
+ // the actual printed String, which was making the test inacurately passing
+ // in JIT mode when loading the string from the dex cache, as the string ids
+ // of the two different strings were the same).
+ // We know the string ids will be different because lexicographically:
+ // "Hello" < "LTransform;" < "hello".
+ System.out.println("hello");
+ r.run();
+ System.out.println("goodbye");
+ }
+}
diff --git a/test/913-heaps/heaps.h b/test/926-multi-obsolescence/src/Transform2.java
similarity index 68%
copy from test/913-heaps/heaps.h
copy to test/926-multi-obsolescence/src/Transform2.java
index bd828ac..4877f84 100644
--- a/test/913-heaps/heaps.h
+++ b/test/926-multi-obsolescence/src/Transform2.java
@@ -14,17 +14,10 @@
* limitations under the License.
*/
-#ifndef ART_TEST_913_HEAPS_HEAPS_H_
-#define ART_TEST_913_HEAPS_HEAPS_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test913Heaps {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test913Heaps
-} // namespace art
-
-#endif // ART_TEST_913_HEAPS_HEAPS_H_
+class Transform2 {
+ public void sayHi(Runnable r) {
+ System.out.println("hello - 2");
+ r.run();
+ System.out.println("goodbye - 2");
+ }
+}
diff --git a/test/954-invoke-polymorphic-verifier/run b/test/927-timers/build
similarity index 86%
copy from test/954-invoke-polymorphic-verifier/run
copy to test/927-timers/build
index a9f1822..898e2e5 100755
--- a/test/954-invoke-polymorphic-verifier/run
+++ b/test/927-timers/build
@@ -14,7 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# make us exit on a failure
-set -e
-
-./default-run "$@" --experimental method-handles
+./default-build "$@" --experimental agents
diff --git a/test/927-timers/expected.txt b/test/927-timers/expected.txt
new file mode 100644
index 0000000..a4ef442
--- /dev/null
+++ b/test/927-timers/expected.txt
@@ -0,0 +1,3 @@
+availableProcessors OK
+[-1, true, true, 32]
+Time OK
diff --git a/test/927-timers/info.txt b/test/927-timers/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/927-timers/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/954-invoke-polymorphic-verifier/run b/test/927-timers/run
similarity index 83%
copy from test/954-invoke-polymorphic-verifier/run
copy to test/927-timers/run
index a9f1822..4379349 100755
--- a/test/954-invoke-polymorphic-verifier/run
+++ b/test/927-timers/run
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# make us exit on a failure
-set -e
-
-./default-run "$@" --experimental method-handles
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --jvmti
diff --git a/test/927-timers/src/Main.java b/test/927-timers/src/Main.java
new file mode 100644
index 0000000..2f5c85c
--- /dev/null
+++ b/test/927-timers/src/Main.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[1]);
+
+ doTest();
+ }
+
+ private static void doTest() {
+ int all1 = Runtime.getRuntime().availableProcessors();
+ int all2 = getAvailableProcessors();
+ if (all1 != all2) {
+ throw new RuntimeException("Available processors doesn't match: " + all1 + " vs " + all2);
+ }
+ System.out.println("availableProcessors OK");
+
+ Object info[] = getTimerInfo();
+ System.out.println(Arrays.toString(info));
+
+ // getTime checks.
+ // Note: there isn't really much to check independent from the implementation. So we check
+ // a few details of the ART implementation. This may fail on other runtimes.
+ long time1 = getTime();
+ long time2 = getTime();
+
+ // Under normal circumstances, time1 <= time2.
+ if (time2 < time1) {
+ throw new RuntimeException("Time unexpectedly decreased: " + time1 + " vs " + time2);
+ }
+
+ long time3 = System.nanoTime();
+ long time4 = getTime();
+
+ final long MINUTE = 60l * 1000 * 1000 * 1000;
+ if (time4 < time3 || (time4 - time3 > MINUTE)) {
+ throw new RuntimeException("Time unexpectedly divergent: " + time3 + " vs " + time4);
+ }
+
+ System.out.println("Time OK");
+ }
+
+ private static native int getAvailableProcessors();
+ private static native Object[] getTimerInfo();
+ private static native long getTime();
+}
diff --git a/test/927-timers/timers.cc b/test/927-timers/timers.cc
new file mode 100644
index 0000000..58d5c27
--- /dev/null
+++ b/test/927-timers/timers.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#include "android-base/stringprintf.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test926Timers {
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_getAvailableProcessors(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
+ jint count;
+ jvmtiError result = jvmti_env->GetAvailableProcessors(&count);
+ if (JvmtiErrorToException(env, result)) {
+ return -1;
+ }
+ return count;
+}
+
+extern "C" JNIEXPORT jlong JNICALL Java_Main_getTime(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
+ jlong time;
+ jvmtiError result = jvmti_env->GetTime(&time);
+ if (JvmtiErrorToException(env, result)) {
+ return -1;
+ }
+ return time;
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getTimerInfo(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
+ jvmtiTimerInfo info;
+ jvmtiError result = jvmti_env->GetTimerInfo(&info);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+
+ auto callback = [&](jint index) -> jobject {
+ switch (index) {
+ // Max value.
+ case 0:
+ return env->NewStringUTF(android::base::StringPrintf("%" PRId64, info.max_value).c_str());
+
+ // Skip forward.
+ case 1:
+ return env->NewStringUTF(info.may_skip_forward == JNI_TRUE ? "true" : "false");
+ // Skip backward.
+ case 2:
+ return env->NewStringUTF(info.may_skip_forward == JNI_TRUE ? "true" : "false");
+
+ // The kind.
+ case 3:
+ return env->NewStringUTF(
+ android::base::StringPrintf("%d", static_cast<jint>(info.kind)).c_str());
+ }
+ LOG(FATAL) << "Should not reach here";
+ UNREACHABLE();
+ };
+ return CreateObjectArray(env, 4, "java/lang/Object", callback);
+}
+
+} // namespace Test926Timers
+} // namespace art
diff --git a/test/954-invoke-polymorphic-verifier/run b/test/953-invoke-polymorphic-compiler/build
similarity index 81%
copy from test/954-invoke-polymorphic-verifier/run
copy to test/953-invoke-polymorphic-compiler/build
index a9f1822..a423ca6 100755
--- a/test/954-invoke-polymorphic-verifier/run
+++ b/test/953-invoke-polymorphic-compiler/build
@@ -17,4 +17,9 @@
# make us exit on a failure
set -e
-./default-run "$@" --experimental method-handles
+if [[ $@ != *"--jvm"* ]]; then
+ # Don't do anything with jvm.
+ export USE_JACK=true
+fi
+
+./default-build "$@" --experimental method-handles
diff --git a/test/953-invoke-polymorphic-compiler/expected.txt b/test/953-invoke-polymorphic-compiler/expected.txt
new file mode 100644
index 0000000..f47ee23
--- /dev/null
+++ b/test/953-invoke-polymorphic-compiler/expected.txt
@@ -0,0 +1,25 @@
+Running Main.Min2Print2([33, -4])
+Running Main.Min2Print2([-4, 33])
+Running Main.Min2Print3([33, -4, 17])
+Running Main.Min2Print3([-4, 17, 33])
+Running Main.Min2Print3([17, 33, -4])
+Running Main.Min2Print6([33, -4, 77, 88, 99, 111])
+Running Main.Min2Print6([-4, 77, 88, 99, 111, 33])
+Running Main.Min2Print6([77, 88, 99, 111, 33, -4])
+Running Main.Min2Print6([88, 99, 111, 33, -4, 77])
+Running Main.Min2Print6([99, 111, 33, -4, 77, 88])
+Running Main.Min2Print6([111, 33, -4, 77, 88, 99])
+Running Main.Min2Print26([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25])
+Running Main.Min2Print26([25, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24])
+Running Main.Min2Print26([24, 25, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
+BasicTest done.
+$opt$ReturnBooleanTest done.
+$opt$ReturnCharTest done.
+$opt$ReturnByteTest done.
+$opt$ReturnShortTest done.
+$opt$ReturnIntTest done.
+$opt$ReturnLongTest done.
+$opt$ReturnFloatTest done.
+$opt$ReturnDoubleTest done.
+$opt$ReturnStringTest done.
+ReturnValuesTest done.
diff --git a/test/953-invoke-polymorphic-compiler/info.txt b/test/953-invoke-polymorphic-compiler/info.txt
new file mode 100644
index 0000000..f1dbb61
--- /dev/null
+++ b/test/953-invoke-polymorphic-compiler/info.txt
@@ -0,0 +1,3 @@
+Tests for method handle invocations.
+
+NOTE: needs to run under ART or a Java 8 Language runtime and compiler.
diff --git a/test/953-invoke-polymorphic-compiler/src/Main.java b/test/953-invoke-polymorphic-compiler/src/Main.java
new file mode 100644
index 0000000..20a8fec
--- /dev/null
+++ b/test/953-invoke-polymorphic-compiler/src/Main.java
@@ -0,0 +1,374 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodHandles.Lookup;
+import java.lang.invoke.MethodType;
+import java.lang.invoke.WrongMethodTypeException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+public class Main {
+ public static void assertTrue(boolean value) {
+ if (!value) {
+ throw new AssertionError("assertTrue value: " + value);
+ }
+ }
+
+ public static void assertFalse(boolean value) {
+ if (value) {
+ throw new AssertionError("assertTrue value: " + value);
+ }
+ }
+
+ public static void assertEquals(int i1, int i2) {
+ if (i1 == i2) { return; }
+ throw new AssertionError("assertEquals i1: " + i1 + ", i2: " + i2);
+ }
+
+ public static void assertEquals(long i1, long i2) {
+ if (i1 == i2) { return; }
+ throw new AssertionError("assertEquals l1: " + i1 + ", l2: " + i2);
+ }
+
+ public static void assertEquals(Object o, Object p) {
+ if (o == p) { return; }
+ if (o != null && p != null && o.equals(p)) { return; }
+ throw new AssertionError("assertEquals: o1: " + o + ", o2: " + p);
+ }
+
+ public static void assertEquals(String s1, String s2) {
+ if (s1 == s2) {
+ return;
+ }
+
+ if (s1 != null && s2 != null && s1.equals(s2)) {
+ return;
+ }
+
+ throw new AssertionError("assertEquals s1: " + s1 + ", s2: " + s2);
+ }
+
+ public static void fail() {
+ System.err.println("fail");
+ Thread.dumpStack();
+ }
+
+ public static void fail(String message) {
+ System.err.println("fail: " + message);
+ Thread.dumpStack();
+ }
+
+ public static int Min2Print2(int a, int b) {
+ int[] values = new int[] { a, b };
+ System.err.println("Running Main.Min2Print2(" + Arrays.toString(values) + ")");
+ return a > b ? a : b;
+ }
+
+ public static int Min2Print3(int a, int b, int c) {
+ int[] values = new int[] { a, b, c };
+ System.err.println("Running Main.Min2Print3(" + Arrays.toString(values) + ")");
+ return a > b ? a : b;
+ }
+
+ public static int Min2Print6(int a, int b, int c, int d, int e, int f) {
+ int[] values = new int[] { a, b, c, d, e, f };
+ System.err.println("Running Main.Min2Print6(" + Arrays.toString(values) + ")");
+ return a > b ? a : b;
+ }
+
+ public static int Min2Print26(int a, int b, int c, int d,
+ int e, int f, int g, int h,
+ int i, int j, int k, int l,
+ int m, int n, int o, int p,
+ int q, int r, int s, int t,
+ int u, int v, int w, int x,
+ int y, int z) {
+ int[] values = new int[] { a, b, c, d, e, f, g, h, i, j, k, l, m,
+ n, o, p, q, r, s, t, u, v, w, x, y, z };
+ System.err.println("Running Main.Min2Print26(" + Arrays.toString(values) + ")");
+ return a > b ? a : b;
+ }
+
+ public static void $opt$BasicTest() throws Throwable {
+ MethodHandle mh;
+ mh = MethodHandles.lookup().findStatic(
+ Main.class, "Min2Print2", MethodType.methodType(int.class, int.class, int.class));
+ assertEquals((int) mh.invokeExact(33, -4), 33);
+ assertEquals((int) mh.invokeExact(-4, 33), 33);
+
+ mh = MethodHandles.lookup().findStatic(
+ Main.class, "Min2Print3",
+ MethodType.methodType(int.class, int.class, int.class, int.class));
+ assertEquals((int) mh.invokeExact(33, -4, 17), 33);
+ assertEquals((int) mh.invokeExact(-4, 17, 33), 17);
+ assertEquals((int) mh.invokeExact(17, 33, -4), 33);
+
+ mh = MethodHandles.lookup().findStatic(
+ Main.class, "Min2Print6",
+ MethodType.methodType(
+ int.class, int.class, int.class, int.class, int.class, int.class, int.class));
+ assertEquals((int) mh.invokeExact(33, -4, 77, 88, 99, 111), 33);
+ try {
+ // Too few arguments
+ assertEquals((int) mh.invokeExact(33, -4, 77, 88), 33);
+ fail("No WMTE for too few arguments");
+ } catch (WrongMethodTypeException e) {}
+ try {
+ // Too many arguments
+ assertEquals((int) mh.invokeExact(33, -4, 77, 88, 89, 90, 91), 33);
+ fail("No WMTE for too many arguments");
+ } catch (WrongMethodTypeException e) {}
+ assertEquals((int) mh.invokeExact(-4, 77, 88, 99, 111, 33), 77);
+ assertEquals((int) mh.invokeExact(77, 88, 99, 111, 33, -4), 88);
+ assertEquals((int) mh.invokeExact(88, 99, 111, 33, -4, 77), 99);
+ assertEquals((int) mh.invokeExact(99, 111, 33, -4, 77, 88), 111);
+ assertEquals((int) mh.invokeExact(111, 33, -4, 77, 88, 99), 111);
+
+ // A preposterous number of arguments.
+ mh = MethodHandles.lookup().findStatic(
+ Main.class, "Min2Print26",
+ MethodType.methodType(
+ // Return-type
+ int.class,
+ // Arguments
+ int.class, int.class, int.class, int.class, int.class, int.class, int.class, int.class,
+ int.class, int.class, int.class, int.class, int.class, int.class, int.class, int.class,
+ int.class, int.class, int.class, int.class, int.class, int.class, int.class, int.class,
+ int.class, int.class));
+ assertEquals(1, (int) mh.invokeExact(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25));
+ assertEquals(25, (int) mh.invokeExact(25, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24));
+ assertEquals(25, (int) mh.invokeExact(24, 25, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23));
+
+ try {
+ // Wrong argument type
+ mh.invokeExact("a");
+ fail("No WMTE for wrong arguments");
+ } catch (WrongMethodTypeException wmte) {}
+
+ try {
+ // Invoke on null handle.
+ MethodHandle mh0 = null;
+ mh0.invokeExact("bad");
+ fail("No NPE for you");
+ } catch (NullPointerException npe) {}
+
+ System.err.println("BasicTest done.");
+ }
+
+ private static boolean And(boolean lhs, boolean rhs) {
+ return lhs & rhs;
+ }
+
+ private static boolean Xor(boolean lhs, boolean rhs) {
+ return lhs ^ rhs;
+ }
+
+ private static String Multiply(String value, int n) {
+ String result = "";
+ for (int i = 0; i < n; ++i) {
+ result = value + result;
+ }
+ return result;
+ }
+
+ private static byte Multiply(byte value, byte n) {
+ return (byte)(value * n);
+ }
+
+ private static short Multiply(short value, short n) {
+ return (short)(value * n);
+ }
+
+ private static int Multiply(int value, int n) {
+ return value * n;
+ }
+
+ private static long Multiply(long value, long n) {
+ return value * n;
+ }
+
+ private static float Multiply(float value, float n) {
+ return value * n;
+ }
+
+ private static double Multiply(double value, double n) {
+ return value * n;
+ }
+
+ private static char Next(char c) {
+ return (char)(c + 1);
+ }
+
+ public static void $opt$ReturnBooleanTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh =
+ lookup.findStatic(Main.class, "And",
+ MethodType.methodType(boolean.class, boolean.class, boolean.class));
+ assertEquals(true, (boolean) mh.invokeExact(true, true));
+ assertEquals(false, (boolean) mh.invokeExact(true, false));
+ assertEquals(false, (boolean) mh.invokeExact(false, true));
+ assertEquals(false, (boolean) mh.invokeExact(false, false));
+ assertEquals(true, (boolean) mh.invoke(true, true));
+ assertEquals(false, (boolean) mh.invoke(true, false));
+ assertEquals(false, (boolean) mh.invoke(false, true));
+ assertEquals(false, (boolean) mh.invoke(false, false));
+
+ mh = lookup.findStatic(Main.class, "Xor",
+ MethodType.methodType(boolean.class, boolean.class, boolean.class));
+ assertEquals(false, (boolean) mh.invokeExact(true, true));
+ assertEquals(true, (boolean) mh.invokeExact(true, false));
+ assertEquals(true, (boolean) mh.invokeExact(false, true));
+ assertEquals(false, (boolean) mh.invokeExact(false, false));
+ assertEquals(false, (boolean) mh.invoke(true, true));
+ assertEquals(true, (boolean) mh.invoke(true, false));
+ assertEquals(true, (boolean) mh.invoke(false, true));
+ assertEquals(false, (boolean) mh.invoke(false, false));
+
+ System.err.println("$opt$ReturnBooleanTest done.");
+ }
+
+ public static void $opt$ReturnCharTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Next",
+ MethodType.methodType(char.class, char.class));
+ assertEquals('B', (char) mh.invokeExact('A'));
+ assertEquals((char) -55, (char) mh.invokeExact((char) -56));
+ System.err.println("$opt$ReturnCharTest done.");
+ }
+
+ public static void $opt$ReturnByteTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(byte.class, byte.class, byte.class));
+ assertEquals((byte) 30, (byte) mh.invokeExact((byte) 10, (byte) 3));
+ assertEquals((byte) -90, (byte) mh.invoke((byte) -10, (byte) 9));
+ System.err.println("$opt$ReturnByteTest done.");
+ }
+
+ public static void $opt$ReturnShortTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(short.class, short.class, short.class));
+ assertEquals((short) 3000, (short) mh.invokeExact((short) 1000, (short) 3));
+ assertEquals((short) -3000, (short) mh.invoke((short) -1000, (short) 3));
+ System.err.println("$opt$ReturnShortTest done.");
+ }
+
+ public static void $opt$ReturnIntTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(int.class, int.class, int.class));
+ assertEquals(3_000_000, (int) mh.invokeExact(1_000_000, 3));
+ assertEquals(-3_000_000, (int) mh.invoke(-1_000, 3_000));
+ System.err.println("$opt$ReturnIntTest done.");
+ }
+
+ public static void $opt$ReturnLongTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(long.class, long.class, long.class));
+ assertEquals(4_294_967_295_000L, (long) mh.invokeExact(1000L, 4_294_967_295L));
+ assertEquals(-4_294_967_295_000L, (long) mh.invoke(-1000L, 4_294_967_295L));
+ System.err.println("$opt$ReturnLongTest done.");
+ }
+
+ public static void $opt$ReturnFloatTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(float.class, float.class, float.class));
+ assertEquals(3.0F, (float) mh.invokeExact(1000.0F, 3e-3F));
+ assertEquals(-3.0F, (float) mh.invoke(-1000.0F, 3e-3F));
+ System.err.println("$opt$ReturnFloatTest done.");
+ }
+
+ public static void $opt$ReturnDoubleTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(double.class, double.class, double.class));
+ assertEquals(3033000.0, (double) mh.invokeExact(1000.0, 3.033e3));
+ assertEquals(-3033000.0, (double) mh.invoke(-1000.0, 3.033e3));
+ System.err.println("$opt$ReturnDoubleTest done.");
+ }
+
+ public static void $opt$ReturnStringTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(String.class, String.class, int.class));
+ assertEquals("100010001000", (String) mh.invokeExact("1000", 3));
+ assertEquals("100010001000", (String) mh.invoke("1000", 3));
+ System.err.println("$opt$ReturnStringTest done.");
+ }
+
+ public static void ReturnValuesTest() throws Throwable {
+ $opt$ReturnBooleanTest();
+ $opt$ReturnCharTest();
+ $opt$ReturnByteTest();
+ $opt$ReturnShortTest();
+ $opt$ReturnIntTest();
+ $opt$ReturnLongTest();
+ $opt$ReturnFloatTest();
+ $opt$ReturnDoubleTest();
+ $opt$ReturnStringTest();
+ System.err.println("ReturnValuesTest done.");
+ }
+
+ static class ValueHolder {
+ public boolean m_z;
+ public static boolean s_z;
+ }
+
+ public static void $opt$AccessorsTest() throws Throwable {
+ ValueHolder valueHolder = new ValueHolder();
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+
+ MethodHandle setMember = lookup.findSetter(ValueHolder.class, "m_z", boolean.class);
+ MethodHandle getMember = lookup.findGetter(ValueHolder.class, "m_z", boolean.class);
+ MethodHandle setStatic = lookup.findStaticSetter(ValueHolder.class, "s_z", boolean.class);
+ MethodHandle getStatic = lookup.findStaticGetter(ValueHolder.class, "s_z", boolean.class);
+
+ boolean [] values = { false, true, false, true, false };
+ for (boolean value : values) {
+ assertEquals((boolean) getStatic.invoke(), ValueHolder.s_z);
+ setStatic.invoke(value);
+ ValueHolder.s_z = value;
+ assertEquals(ValueHolder.s_z, value);
+ assertEquals((boolean) getStatic.invoke(), value);
+
+ assertEquals((boolean) getMember.invoke(valueHolder), valueHolder.m_z);
+ setMember.invoke(valueHolder, value);
+ valueHolder.m_z = value;
+ assertEquals(valueHolder.m_z, value);
+ assertEquals((boolean) getMember.invoke(valueHolder), value);
+ }
+ }
+
+ public static void main(String[] args) throws Throwable {
+ $opt$BasicTest();
+ ReturnValuesTest();
+ $opt$AccessorsTest();
+ }
+}
diff --git a/test/955-methodhandles-smali/run b/test/955-methodhandles-smali/run
deleted file mode 100755
index a9f1822..0000000
--- a/test/955-methodhandles-smali/run
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# make us exit on a failure
-set -e
-
-./default-run "$@" --experimental method-handles
diff --git a/test/956-methodhandles/run b/test/956-methodhandles/run
deleted file mode 100755
index a9f1822..0000000
--- a/test/956-methodhandles/run
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# make us exit on a failure
-set -e
-
-./default-run "$@" --experimental method-handles
diff --git a/test/956-methodhandles/src/Main.java b/test/956-methodhandles/src/Main.java
index 17b56b4..f8daba6 100644
--- a/test/956-methodhandles/src/Main.java
+++ b/test/956-methodhandles/src/Main.java
@@ -76,6 +76,7 @@
testStringConstructors();
testReturnValueConversions();
testVariableArity();
+ testVariableArity_MethodHandles_bind();
}
public static void testfindSpecial_invokeSuperBehaviour() throws Throwable {
@@ -1466,4 +1467,23 @@
fail();
} catch (WrongMethodTypeException e) {}
}
+
+ // The same tests as the above, except that we use use MethodHandles.bind instead of
+ // MethodHandle.bindTo.
+ public static void testVariableArity_MethodHandles_bind() throws Throwable {
+ VariableArityTester vat = new VariableArityTester();
+ MethodHandle mh = MethodHandles.lookup().bind(vat, "update",
+ MethodType.methodType(String.class, boolean[].class));
+ assertTrue(mh.isVarargsCollector());
+
+ assertEquals("[]", mh.invoke());
+ assertEquals("[true, false, true]", mh.invoke(true, false, true));
+ assertEquals("[true, false, true]", mh.invoke(new boolean[] { true, false, true}));
+ assertEquals("[false, true]", mh.invoke(Boolean.valueOf(false), Boolean.valueOf(true)));
+
+ try {
+ mh.invoke(true, true, 0);
+ fail();
+ } catch (WrongMethodTypeException e) {}
+ }
}
diff --git a/test/957-methodhandle-transforms/run b/test/957-methodhandle-transforms/run
deleted file mode 100755
index a9f1822..0000000
--- a/test/957-methodhandle-transforms/run
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# make us exit on a failure
-set -e
-
-./default-run "$@" --experimental method-handles
diff --git a/test/957-methodhandle-transforms/src/Main.java b/test/957-methodhandle-transforms/src/Main.java
index 5806509..eebf55f 100644
--- a/test/957-methodhandle-transforms/src/Main.java
+++ b/test/957-methodhandle-transforms/src/Main.java
@@ -33,6 +33,7 @@
testBindTo();
testFilterReturnValue();
testPermuteArguments();
+ testInvokers();
}
public static void testThrowException() throws Throwable {
@@ -40,17 +41,17 @@
IllegalArgumentException.class);
if (handle.type().returnType() != String.class) {
- System.out.println("Unexpected return type for handle: " + handle +
+ fail("Unexpected return type for handle: " + handle +
" [ " + handle.type() + "]");
}
final IllegalArgumentException iae = new IllegalArgumentException("boo!");
try {
handle.invoke(iae);
- System.out.println("Expected an exception of type: java.lang.IllegalArgumentException");
+ fail("Expected an exception of type: java.lang.IllegalArgumentException");
} catch (IllegalArgumentException expected) {
if (expected != iae) {
- System.out.println("Wrong exception: expected " + iae + " but was " + expected);
+ fail("Wrong exception: expected " + iae + " but was " + expected);
}
}
}
@@ -262,7 +263,7 @@
array[0] = 42;
int value = (int) getter.invoke(array, 0);
if (value != 42) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
try {
@@ -284,7 +285,7 @@
array[0] = 42;
long value = (long) getter.invoke(array, 0);
if (value != 42l) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -294,7 +295,7 @@
array[0] = 42;
short value = (short) getter.invoke(array, 0);
if (value != 42l) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -304,7 +305,7 @@
array[0] = 42;
char value = (char) getter.invoke(array, 0);
if (value != 42l) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -314,7 +315,7 @@
array[0] = (byte) 0x8;
byte value = (byte) getter.invoke(array, 0);
if (value != (byte) 0x8) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -324,7 +325,7 @@
array[0] = true;
boolean value = (boolean) getter.invoke(array, 0);
if (!value) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -334,7 +335,7 @@
array[0] = 42.0f;
float value = (float) getter.invoke(array, 0);
if (value != 42.0f) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -344,7 +345,7 @@
array[0] = 42.0;
double value = (double) getter.invoke(array, 0);
if (value != 42.0) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -372,10 +373,10 @@
setter.invoke(array, 1, 43);
if (array[0] != 42) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
if (array[1] != 43) {
- System.out.println("Unexpected value: " + array[1]);
+ fail("Unexpected value: " + array[1]);
}
try {
@@ -396,7 +397,7 @@
long[] array = new long[1];
setter.invoke(array, 0, 42l);
if (array[0] != 42l) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -405,7 +406,7 @@
short[] array = new short[1];
setter.invoke(array, 0, (short) 42);
if (array[0] != 42l) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -414,7 +415,7 @@
char[] array = new char[1];
setter.invoke(array, 0, (char) 42);
if (array[0] != 42) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -423,7 +424,7 @@
byte[] array = new byte[1];
setter.invoke(array, 0, (byte) 0x8);
if (array[0] != (byte) 0x8) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -432,7 +433,7 @@
boolean[] array = new boolean[1];
setter.invoke(array, 0, true);
if (!array[0]) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -441,7 +442,7 @@
float[] array = new float[1];
setter.invoke(array, 0, 42.0f);
if (array[0] != 42.0f) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -450,7 +451,7 @@
double[] array = new double[1];
setter.invoke(array, 0, 42.0);
if (array[0] != 42.0) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -471,7 +472,7 @@
MethodHandle identity = MethodHandles.identity(boolean.class);
boolean value = (boolean) identity.invoke(false);
if (value) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -479,7 +480,7 @@
MethodHandle identity = MethodHandles.identity(byte.class);
byte value = (byte) identity.invoke((byte) 0x8);
if (value != (byte) 0x8) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -487,7 +488,7 @@
MethodHandle identity = MethodHandles.identity(char.class);
char value = (char) identity.invoke((char) -56);
if (value != (char) -56) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -495,7 +496,7 @@
MethodHandle identity = MethodHandles.identity(short.class);
short value = (short) identity.invoke((short) -59);
if (value != (short) -59) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + Short.toString(value));
}
}
@@ -503,7 +504,7 @@
MethodHandle identity = MethodHandles.identity(int.class);
int value = (int) identity.invoke(52);
if (value != 52) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -511,7 +512,7 @@
MethodHandle identity = MethodHandles.identity(long.class);
long value = (long) identity.invoke(-76l);
if (value != (long) -76) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -519,7 +520,7 @@
MethodHandle identity = MethodHandles.identity(float.class);
float value = (float) identity.invoke(56.0f);
if (value != (float) 56.0f) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -527,7 +528,7 @@
MethodHandle identity = MethodHandles.identity(double.class);
double value = (double) identity.invoke((double) 72.0);
if (value != (double) 72.0) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -544,28 +545,28 @@
MethodHandle constant = MethodHandles.constant(int.class, 56);
int value = (int) constant.invoke();
if (value != 56) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
// short constant values are converted to int.
constant = MethodHandles.constant(int.class, (short) 52);
value = (int) constant.invoke();
if (value != 52) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
// char constant values are converted to int.
constant = MethodHandles.constant(int.class, (char) 'b');
value = (int) constant.invoke();
if (value != (int) 'b') {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
// int constant values are converted to int.
constant = MethodHandles.constant(int.class, (byte) 0x1);
value = (int) constant.invoke();
if (value != 1) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
// boolean, float, double and long primitive constants are not convertible
@@ -600,13 +601,13 @@
MethodHandle constant = MethodHandles.constant(long.class, 56l);
long value = (long) constant.invoke();
if (value != 56l) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
constant = MethodHandles.constant(long.class, (int) 56);
value = (long) constant.invoke();
if (value != 56l) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -615,7 +616,7 @@
MethodHandle constant = MethodHandles.constant(byte.class, (byte) 0x12);
byte value = (byte) constant.invoke();
if (value != (byte) 0x12) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -624,7 +625,7 @@
MethodHandle constant = MethodHandles.constant(boolean.class, true);
boolean value = (boolean) constant.invoke();
if (!value) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -633,7 +634,7 @@
MethodHandle constant = MethodHandles.constant(char.class, 'f');
char value = (char) constant.invoke();
if (value != 'f') {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -642,7 +643,7 @@
MethodHandle constant = MethodHandles.constant(short.class, (short) 123);
short value = (short) constant.invoke();
if (value != (short) 123) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -651,7 +652,7 @@
MethodHandle constant = MethodHandles.constant(float.class, 56.0f);
float value = (float) constant.invoke();
if (value != 56.0f) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -660,7 +661,7 @@
MethodHandle constant = MethodHandles.constant(double.class, 256.0);
double value = (double) constant.invoke();
if (value != 256.0) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -678,13 +679,13 @@
char value = (char) stringCharAt.invoke("foo", 0);
if (value != 'f') {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
MethodHandle bound = stringCharAt.bindTo("foo");
value = (char) bound.invoke(0);
if (value != 'f') {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
try {
@@ -706,7 +707,7 @@
bound = integerParseInt.bindTo("78452");
int intValue = (int) bound.invoke();
if (intValue != 78452) {
- System.out.println("Unexpected value: " + intValue);
+ fail("Unexpected value: " + intValue);
}
}
@@ -745,11 +746,11 @@
boolean value = (boolean) adapter.invoke((int) 42);
if (!value) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
value = (boolean) adapter.invoke((int) 43);
if (value) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -764,7 +765,7 @@
int value = (int) adapter.invoke("56");
if (value != 57) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -779,7 +780,7 @@
int value = (int) adapter.invoke();
if (value != 42) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
}
@@ -791,7 +792,7 @@
return;
}
- System.out.println("Unexpected arguments: " + a + ", " + b + ", " + c
+ fail("Unexpected arguments: " + a + ", " + b + ", " + c
+ ", " + d + ", " + e + ", " + f + ", " + g + ", " + h);
}
@@ -800,7 +801,7 @@
return;
}
- System.out.println("Unexpected arguments: " + a + ", " + b);
+ fail("Unexpected arguments: " + a + ", " + b);
}
public static void testPermuteArguments() throws Throwable {
@@ -888,11 +889,48 @@
}
}
+ private static Object returnBar() {
+ return "bar";
+ }
+
+ public static void testInvokers() throws Throwable {
+ final MethodType targetType = MethodType.methodType(String.class, String.class);
+ final MethodHandle target = MethodHandles.lookup().findVirtual(
+ String.class, "concat", targetType);
+
+ MethodHandle invoker = MethodHandles.invoker(target.type());
+ assertEquals("barbar", (String) invoker.invoke(target, "bar", "bar"));
+ assertEquals("barbar", (String) invoker.invoke(target, (Object) returnBar(), "bar"));
+ try {
+ String foo = (String) invoker.invoke(target, "bar", "bar", 24);
+ fail();
+ } catch (WrongMethodTypeException expected) {
+ }
+
+ MethodHandle exactInvoker = MethodHandles.exactInvoker(target.type());
+ assertEquals("barbar", (String) exactInvoker.invoke(target, "bar", "bar"));
+ try {
+ String foo = (String) exactInvoker.invoke(target, (Object) returnBar(), "bar");
+ fail();
+ } catch (WrongMethodTypeException expected) {
+ }
+ try {
+ String foo = (String) exactInvoker.invoke(target, "bar", "bar", 24);
+ fail();
+ } catch (WrongMethodTypeException expected) {
+ }
+ }
+
public static void fail() {
System.out.println("FAIL");
Thread.dumpStack();
}
+ public static void fail(String message) {
+ System.out.println("fail: " + message);
+ Thread.dumpStack();
+ }
+
public static void assertEquals(String s1, String s2) {
if (s1 == s2) {
return;
diff --git a/test/958-methodhandle-emulated-stackframe/run b/test/958-methodhandle-emulated-stackframe/run
deleted file mode 100755
index a9f1822..0000000
--- a/test/958-methodhandle-emulated-stackframe/run
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# make us exit on a failure
-set -e
-
-./default-run "$@" --experimental method-handles
diff --git a/test/959-invoke-polymorphic-accessors/run b/test/959-invoke-polymorphic-accessors/run
deleted file mode 100644
index a9f1822..0000000
--- a/test/959-invoke-polymorphic-accessors/run
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# make us exit on a failure
-set -e
-
-./default-run "$@" --experimental method-handles
diff --git a/test/Android.bp b/test/Android.bp
index a223c3a..1ea1252 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -264,6 +264,8 @@
"920-objects/objects.cc",
"922-properties/properties.cc",
"923-monitors/monitors.cc",
+ "924-threads/threads.cc",
+ "927-timers/timers.cc",
],
shared_libs: [
"libbase",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index fd3a897..55cef97 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -228,9 +228,15 @@
# Disable 153-reference-stress temporarily until a fix arrives. b/33389022.
# Disable 080-oom-fragmentation due to flakes. b/33795328
+# Disable 497-inlining-and-class-loader and 542-unresolved-access-check until
+# they are rewritten. These tests use a broken class loader that tries to
+# register a dex file that's already registered with a different loader.
+# b/34193123
ART_TEST_RUN_TEST_SKIP += \
153-reference-stress \
- 080-oom-fragmentation
+ 080-oom-fragmentation \
+ 497-inlining-and-class-loader \
+ 542-unresolved-access-check
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
@@ -296,6 +302,9 @@
921-hello-failure \
922-properties \
923-monitors \
+ 924-threads \
+ 926-multi-obsolescence \
+ 927-timers \
ifneq (,$(filter target,$(TARGET_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -564,6 +573,7 @@
915-obsolete-2 \
917-fields-transformation \
919-obsolete-fields \
+ 926-multi-obsolescence \
ifneq (,$(filter jit,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -721,6 +731,16 @@
TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS :=
+# Tests that check semantics for a non-debuggable app.
+TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS := \
+ 909-attach-agent \
+
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),debuggable,$(TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+
+TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS :=
+
# Tests incompatible with bisection bug search. Sorted by incompatibility reason.
# 000 through 595 do not compile anything. 089 tests a build failure. 018 through 137
# run dalvikvm more than once. 115 and 088 assume they are always compiled.
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 7451cf9..1b6fc70 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -169,10 +169,9 @@
}
jit::JitCodeCache* code_cache = jit->GetCodeCache();
- OatQuickMethodHeader* header = nullptr;
while (true) {
- header = OatQuickMethodHeader::FromEntryPoint(method->GetEntryPointFromQuickCompiledCode());
- if (code_cache->ContainsPc(header->GetCode())) {
+ const void* pc = method->GetEntryPointFromQuickCompiledCode();
+ if (code_cache->ContainsPc(pc)) {
break;
} else {
// Sleep to yield to the compiler thread.
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 8245947..5f1071f 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -62,6 +62,7 @@
TEST_VDEX="n"
TEST_IS_NDEBUG="n"
APP_IMAGE="y"
+VDEX_FILTER=""
while true; do
if [ "x$1" = "x--quiet" ]; then
@@ -256,6 +257,11 @@
elif [ "x$1" = "x--vdex" ]; then
TEST_VDEX="y"
shift
+ elif [ "x$1" = "x--vdex-filter" ]; then
+ shift
+ option="$1"
+ VDEX_FILTER="--compiler-filter=$option"
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
exit 1
@@ -322,6 +328,28 @@
DEBUGGER_OPTS="-agentlib:jdwp=transport=dt_socket,address=$PORT,server=y,suspend=y"
fi
+if [ "$IS_JVMTI_TEST" = "y" ]; then
+ plugin=libopenjdkjvmtid.so
+ agent=libtiagentd.so
+ lib=tiagentd
+ if [[ "$TEST_IS_NDEBUG" = "y" ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+ fi
+
+ ARGS="${ARGS} ${lib}"
+ if [[ "$USE_JVM" = "y" ]]; then
+ FLAGS="${FLAGS} -agentpath:${ANDROID_HOST_OUT}/nativetest64/${agent}=${TEST_NAME},jvm"
+ else
+ FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},art"
+ FLAGS="${FLAGS} -Xplugin:${plugin}"
+ FLAGS="${FLAGS} -Xfully-deoptable"
+ # Always make the compilation be debuggable.
+ COMPILE_FLAGS="${COMPILE_FLAGS} --debuggable"
+ fi
+fi
+
if [ "$USE_JVM" = "y" ]; then
export LD_LIBRARY_PATH=${ANDROID_HOST_OUT}/lib64
# Xmx is necessary since we don't pass down the ART flags to JVM.
@@ -387,28 +415,6 @@
fi
fi
-if [ "$IS_JVMTI_TEST" = "y" ]; then
- plugin=libopenjdkjvmtid.so
- agent=libtiagentd.so
- lib=tiagentd
- if [[ "$TEST_IS_NDEBUG" = "y" ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
- fi
-
- ARGS="${ARGS} ${lib}"
- if [[ "$USE_JVM" = "y" ]]; then
- FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},jvm"
- else
- FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},art"
- FLAGS="${FLAGS} -Xplugin:${plugin}"
- FLAGS="${FLAGS} -Xfully-deoptable"
- # Always make the compilation be debuggable.
- COMPILE_FLAGS="${COMPILE_FLAGS} --debuggable"
- fi
-fi
-
JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
if [ "$RELOCATE" = "y" ]; then
@@ -514,7 +520,7 @@
dex2oat_cmdline="timeout -k 1m -s SIGRTMIN+2 1m ${dex2oat_cmdline}"
fi
if [ "$TEST_VDEX" = "y" ]; then
- vdex_cmdline="${dex2oat_cmdline} --input-vdex=$DEX_LOCATION/oat/$ISA/$TEST_NAME.vdex"
+ vdex_cmdline="${dex2oat_cmdline} ${VDEX_FILTER} --input-vdex=$DEX_LOCATION/oat/$ISA/$TEST_NAME.vdex"
fi
fi
diff --git a/test/run-test b/test/run-test
index ea9622a..a913e78 100755
--- a/test/run-test
+++ b/test/run-test
@@ -354,6 +354,11 @@
elif [ "x$1" = "x--vdex" ]; then
run_args="${run_args} --vdex"
shift
+ elif [ "x$1" = "x--vdex-filter" ]; then
+ shift
+ filter=$1
+ run_args="${run_args} --vdex-filter $filter"
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
usage="yes"
@@ -489,7 +494,7 @@
fi
elif [ "$runtime" = "jvm" ]; then
# TODO: Detect whether the host is 32-bit or 64-bit.
- run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib64"
+ run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib64:${ANDROID_HOST_OUT}/nativetest64"
fi
if [ "$have_image" = "no" ]; then
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
index 6f98f10..2c6d3ed 100644
--- a/test/ti-agent/common_helper.cc
+++ b/test/ti-agent/common_helper.cc
@@ -62,48 +62,64 @@
namespace common_redefine {
-static void throwRedefinitionError(jvmtiEnv* jvmti, JNIEnv* env, jclass target, jvmtiError res) {
+static void throwRedefinitionError(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jint num_targets,
+ jclass* target,
+ jvmtiError res) {
std::stringstream err;
- char* signature = nullptr;
- char* generic = nullptr;
- jvmti->GetClassSignature(target, &signature, &generic);
char* error = nullptr;
jvmti->GetErrorName(res, &error);
- err << "Failed to redefine class <" << signature << "> due to " << error;
+ err << "Failed to redefine class";
+ if (num_targets > 1) {
+ err << "es";
+ }
+ err << " <";
+ for (jint i = 0; i < num_targets; i++) {
+ char* signature = nullptr;
+ char* generic = nullptr;
+ jvmti->GetClassSignature(target[i], &signature, &generic);
+ if (i != 0) {
+ err << ", ";
+ }
+ err << signature;
+ jvmti->Deallocate(reinterpret_cast<unsigned char*>(signature));
+ jvmti->Deallocate(reinterpret_cast<unsigned char*>(generic));
+ }
+ err << "> due to " << error;
std::string message = err.str();
- jvmti->Deallocate(reinterpret_cast<unsigned char*>(signature));
- jvmti->Deallocate(reinterpret_cast<unsigned char*>(generic));
jvmti->Deallocate(reinterpret_cast<unsigned char*>(error));
env->ThrowNew(env->FindClass("java/lang/Exception"), message.c_str());
}
-using RedefineDirectFunction = jvmtiError (*)(jvmtiEnv*, jclass, jint, const unsigned char*);
-static void DoClassTransformation(jvmtiEnv* jvmti_env,
- JNIEnv* env,
- jclass target,
- jbyteArray class_file_bytes,
- jbyteArray dex_file_bytes) {
- jbyteArray desired_array = IsJVM() ? class_file_bytes : dex_file_bytes;
- jint len = static_cast<jint>(env->GetArrayLength(desired_array));
- const unsigned char* redef_bytes = reinterpret_cast<const unsigned char*>(
- env->GetByteArrayElements(desired_array, nullptr));
- jvmtiError res;
- if (IsJVM()) {
- jvmtiClassDefinition def;
- def.klass = target;
- def.class_byte_count = static_cast<jint>(len);
- def.class_bytes = redef_bytes;
- res = jvmti_env->RedefineClasses(1, &def);
- } else {
- RedefineDirectFunction f =
- reinterpret_cast<RedefineDirectFunction>(jvmti_env->functions->reserved3);
- res = f(jvmti_env, target, len, redef_bytes);
+static void DoMultiClassRedefine(jvmtiEnv* jvmti_env,
+ JNIEnv* env,
+ jint num_redefines,
+ jclass* targets,
+ jbyteArray* class_file_bytes,
+ jbyteArray* dex_file_bytes) {
+ std::vector<jvmtiClassDefinition> defs;
+ for (jint i = 0; i < num_redefines; i++) {
+ jbyteArray desired_array = IsJVM() ? class_file_bytes[i] : dex_file_bytes[i];
+ jint len = static_cast<jint>(env->GetArrayLength(desired_array));
+ const unsigned char* redef_bytes = reinterpret_cast<const unsigned char*>(
+ env->GetByteArrayElements(desired_array, nullptr));
+ defs.push_back({targets[i], static_cast<jint>(len), redef_bytes});
}
+ jvmtiError res = jvmti_env->RedefineClasses(num_redefines, defs.data());
if (res != JVMTI_ERROR_NONE) {
- throwRedefinitionError(jvmti_env, env, target, res);
+ throwRedefinitionError(jvmti_env, env, num_redefines, targets, res);
}
}
+static void DoClassRedefine(jvmtiEnv* jvmti_env,
+ JNIEnv* env,
+ jclass target,
+ jbyteArray class_file_bytes,
+ jbyteArray dex_file_bytes) {
+ return DoMultiClassRedefine(jvmti_env, env, 1, &target, &class_file_bytes, &dex_file_bytes);
+}
+
// Magic JNI export that classes can use for redefining classes.
// To use classes should declare this as a native function with signature (Ljava/lang/Class;[B[B)V
extern "C" JNIEXPORT void JNICALL Java_Main_doCommonClassRedefinition(JNIEnv* env,
@@ -111,7 +127,38 @@
jclass target,
jbyteArray class_file_bytes,
jbyteArray dex_file_bytes) {
- DoClassTransformation(jvmti_env, env, target, class_file_bytes, dex_file_bytes);
+ DoClassRedefine(jvmti_env, env, target, class_file_bytes, dex_file_bytes);
+}
+
+// Magic JNI export that classes can use for redefining classes.
+// To use classes should declare this as a native function with signature
+// ([Ljava/lang/Class;[[B[[B)V
+extern "C" JNIEXPORT void JNICALL Java_Main_doCommonMultiClassRedefinition(
+ JNIEnv* env,
+ jclass,
+ jobjectArray targets,
+ jobjectArray class_file_bytes,
+ jobjectArray dex_file_bytes) {
+ std::vector<jclass> classes;
+ std::vector<jbyteArray> class_files;
+ std::vector<jbyteArray> dex_files;
+ jint len = env->GetArrayLength(targets);
+ if (len != env->GetArrayLength(class_file_bytes) || len != env->GetArrayLength(dex_file_bytes)) {
+ env->ThrowNew(env->FindClass("java/lang/IllegalArgumentException"),
+ "the three array arguments passed to this function have different lengths!");
+ return;
+ }
+ for (jint i = 0; i < len; i++) {
+ classes.push_back(static_cast<jclass>(env->GetObjectArrayElement(targets, i)));
+ dex_files.push_back(static_cast<jbyteArray>(env->GetObjectArrayElement(dex_file_bytes, i)));
+ class_files.push_back(static_cast<jbyteArray>(env->GetObjectArrayElement(class_file_bytes, i)));
+ }
+ return DoMultiClassRedefine(jvmti_env,
+ env,
+ len,
+ classes.data(),
+ class_files.data(),
+ dex_files.data());
}
// Don't do anything
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index 33e1321..521e672 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -26,21 +26,7 @@
#include "common_helper.h"
#include "901-hello-ti-agent/basics.h"
-#include "903-hello-tagging/tagging.h"
-#include "904-object-allocation/tracking.h"
-#include "905-object-free/tracking_free.h"
-#include "906-iterate-heap/iterate_heap.h"
-#include "907-get-loaded-classes/get_loaded_classes.h"
-#include "908-gc-start-finish/gc_callbacks.h"
#include "909-attach-agent/attach.h"
-#include "910-methods/methods.h"
-#include "911-get-stack-trace/stack_trace.h"
-#include "912-classes/classes.h"
-#include "913-heaps/heaps.h"
-#include "918-fields/fields.h"
-#include "920-objects/objects.h"
-#include "922-properties/properties.h"
-#include "923-monitors/monitors.h"
namespace art {
@@ -55,31 +41,31 @@
OnAttach attach;
};
-// A list of all the agents we have for testing.
+// A trivial OnLoad implementation that only initializes the global jvmti_env.
+static jint MinimalOnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ SetAllCapabilities(jvmti_env);
+ return 0;
+}
+
+// A list of all non-standard the agents we have for testing. All other agents will use
+// MinimalOnLoad.
AgentLib agents[] = {
{ "901-hello-ti-agent", Test901HelloTi::OnLoad, nullptr },
{ "902-hello-transformation", common_redefine::OnLoad, nullptr },
- { "903-hello-tagging", Test903HelloTagging::OnLoad, nullptr },
- { "904-object-allocation", Test904ObjectAllocation::OnLoad, nullptr },
- { "905-object-free", Test905ObjectFree::OnLoad, nullptr },
- { "906-iterate-heap", Test906IterateHeap::OnLoad, nullptr },
- { "907-get-loaded-classes", Test907GetLoadedClasses::OnLoad, nullptr },
- { "908-gc-start-finish", Test908GcStartFinish::OnLoad, nullptr },
{ "909-attach-agent", nullptr, Test909AttachAgent::OnAttach },
- { "910-methods", Test910Methods::OnLoad, nullptr },
- { "911-get-stack-trace", Test911GetStackTrace::OnLoad, nullptr },
- { "912-classes", Test912Classes::OnLoad, nullptr },
- { "913-heaps", Test913Heaps::OnLoad, nullptr },
{ "914-hello-obsolescence", common_redefine::OnLoad, nullptr },
{ "915-obsolete-2", common_redefine::OnLoad, nullptr },
{ "916-obsolete-jit", common_redefine::OnLoad, nullptr },
{ "917-fields-transformation", common_redefine::OnLoad, nullptr },
- { "918-fields", Test918Fields::OnLoad, nullptr },
{ "919-obsolete-fields", common_redefine::OnLoad, nullptr },
- { "920-objects", Test920Objects::OnLoad, nullptr },
{ "921-hello-failure", common_redefine::OnLoad, nullptr },
- { "922-properties", Test922Properties::OnLoad, nullptr },
- { "923-monitors", Test923Monitors::OnLoad, nullptr },
+ { "926-multi-obsolescence", common_redefine::OnLoad, nullptr },
};
static AgentLib* FindAgent(char* name) {
@@ -120,18 +106,21 @@
printf("Unable to find agent name in options: %s\n", options);
return -1;
}
- AgentLib* lib = FindAgent(name_option);
- if (lib == nullptr) {
- printf("Unable to find agent named: %s, add it to the list in test/ti-agent/common_load.cc\n",
- name_option);
- return -2;
- }
- if (lib->load == nullptr) {
- printf("agent: %s does not include an OnLoad method.\n", name_option);
- return -3;
- }
+
SetIsJVM(remaining_options);
- return lib->load(vm, remaining_options, reserved);
+
+ AgentLib* lib = FindAgent(name_option);
+ OnLoad fn = nullptr;
+ if (lib == nullptr) {
+ fn = &MinimalOnLoad;
+ } else {
+ if (lib->load == nullptr) {
+ printf("agent: %s does not include an OnLoad method.\n", name_option);
+ return -3;
+ }
+ fn = lib->load;
+ }
+ return fn(vm, remaining_options, reserved);
}
extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM* vm, char* options, void* reserved) {