Merge "Add ClassPreDefine hook."
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index d8b780a..c87075f 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -87,7 +87,7 @@
ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested
ART_GTEST_atomic_method_ref_map_test_DEX_DEPS := Interfaces
-ART_GTEST_class_linker_test_DEX_DEPS := ErroneousA ErroneousB Interfaces MethodTypes MultiDex MyClass Nested Statics StaticsFromCode
+ART_GTEST_class_linker_test_DEX_DEPS := AllFields ErroneousA ErroneousB Interfaces MethodTypes MultiDex MyClass Nested Statics StaticsFromCode
ART_GTEST_class_table_test_DEX_DEPS := XandY
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex
ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 3db7306..18a9165 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -53,7 +53,8 @@
// Write line table for given set of methods.
// Returns the number of bytes written.
size_t WriteCompilationUnit(ElfCompilationUnit& compilation_unit) {
- const bool is64bit = Is64BitInstructionSet(builder_->GetIsa());
+ const InstructionSet isa = builder_->GetIsa();
+ const bool is64bit = Is64BitInstructionSet(isa);
const Elf_Addr base_address = compilation_unit.is_code_address_text_relative
? builder_->GetText()->GetAddress()
: 0;
@@ -66,7 +67,7 @@
std::unordered_map<std::string, size_t> directories_map;
int code_factor_bits_ = 0;
int dwarf_isa = -1;
- switch (builder_->GetIsa()) {
+ switch (isa) {
case kArm: // arm actually means thumb2.
case kThumb2:
code_factor_bits_ = 1; // 16-bit instuctions
@@ -103,7 +104,7 @@
for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) {
StackMap stack_map = code_info.GetStackMapAt(s, encoding);
DCHECK(stack_map.IsValid());
- const uint32_t pc = stack_map.GetNativePcOffset(encoding.stack_map_encoding);
+ const uint32_t pc = stack_map.GetNativePcOffset(encoding.stack_map_encoding, isa);
const int32_t dex = stack_map.GetDexPc(encoding.stack_map_encoding);
pc2dex_map.push_back({pc, dex});
if (stack_map.HasDexRegisterMap(encoding.stack_map_encoding)) {
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index 9645643..bce5387 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -92,7 +92,8 @@
bool is64bitValue,
uint64_t compilation_unit_code_address,
uint32_t dex_pc_low,
- uint32_t dex_pc_high) {
+ uint32_t dex_pc_high,
+ InstructionSet isa) {
std::vector<VariableLocation> variable_locations;
// Get stack maps sorted by pc (they might not be sorted internally).
@@ -111,7 +112,7 @@
// The main reason for this is to save space by avoiding undefined gaps.
continue;
}
- const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding.stack_map_encoding);
+ const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding.stack_map_encoding, isa);
DCHECK_LE(pc_offset, method_info->code_size);
DCHECK_LE(compilation_unit_code_address, method_info->code_address);
const uint32_t low_pc = dchecked_integral_cast<uint32_t>(
@@ -196,7 +197,8 @@
is64bitValue,
compilation_unit_code_address,
dex_pc_low,
- dex_pc_high);
+ dex_pc_high,
+ isa);
// Write .debug_loc entries.
dwarf::Writer<> debug_loc(debug_loc_buffer);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index faf8b41..c03ffca 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -284,7 +284,7 @@
verification_results_(verification_results),
compiler_(Compiler::Create(this, compiler_kind)),
compiler_kind_(compiler_kind),
- instruction_set_(instruction_set == kArm ? kThumb2: instruction_set),
+ instruction_set_(instruction_set == kArm ? kThumb2 : instruction_set),
instruction_set_features_(instruction_set_features),
requires_constructor_barrier_lock_("constructor barrier lock"),
compiled_classes_lock_("compiled classes lock"),
@@ -1251,7 +1251,7 @@
}
}
- // java.lang.Reference visitor for VisitReferences.
+ // java.lang.ref.Reference visitor for VisitReferences.
void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const {}
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index f9e5cb9..eac46e5 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -61,7 +61,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- StackMapStream stack_maps(&allocator);
+ StackMapStream stack_maps(&allocator, kRuntimeISA);
stack_maps.BeginStackMapEntry(/* dex_pc */ 3u,
/* native_pc_offset */ 3u,
/* register_mask */ 0u,
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 4109345..459aca3 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1166,9 +1166,9 @@
// belongs.
oat_index = GetOatIndexForDexCache(dex_cache);
ImageInfo& image_info = GetImageInfo(oat_index);
- {
- // Note: This table is only accessed from the image writer, avoid locking to prevent lock
- // order violations from root visiting.
+ if (!compile_app_image_) {
+ // Note: Avoid locking to prevent lock order violations from root visiting;
+ // image_info.class_table_ is only accessed from the image writer.
image_info.class_table_->InsertWithoutLocks(as_klass);
}
for (LengthPrefixedArray<ArtField>* cur_fields : fields) {
@@ -1265,7 +1265,14 @@
// class loader.
mirror::ClassLoader* class_loader = obj->AsClassLoader();
if (class_loader->GetClassTable() != nullptr) {
+ DCHECK(compile_app_image_);
+ DCHECK(class_loaders_.empty());
class_loaders_.insert(class_loader);
+ ImageInfo& image_info = GetImageInfo(oat_index);
+ // Note: Avoid locking to prevent lock order violations from root visiting;
+ // image_info.class_table_ table is only accessed from the image writer
+ // and class_loader->GetClassTable() is iterated but not modified.
+ image_info.class_table_->CopyWithoutLocks(*class_loader->GetClassTable());
}
}
AssignImageBinSlot(obj, oat_index);
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 8ca0ffe..ba654f4 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -160,7 +160,7 @@
while (HasNext()) {
ManagedRegister in_reg = CurrentParamRegister();
if (!in_reg.IsNoRegister()) {
- int32_t size = IsParamALongOrDouble(itr_args_)? 8 : 4;
+ int32_t size = IsParamALongOrDouble(itr_args_) ? 8 : 4;
int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
ManagedRegisterSpill spill(in_reg, size, spill_offset);
entry_spills_.push_back(spill);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 70c2738..99427f0 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -839,8 +839,8 @@
// last emitted is different than the native pc of the stack map just emitted.
size_t number_of_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
if (number_of_stack_maps > 1) {
- DCHECK_NE(stack_map_stream_.GetStackMap(number_of_stack_maps - 1).native_pc_offset,
- stack_map_stream_.GetStackMap(number_of_stack_maps - 2).native_pc_offset);
+ DCHECK_NE(stack_map_stream_.GetStackMap(number_of_stack_maps - 1).native_pc_code_offset,
+ stack_map_stream_.GetStackMap(number_of_stack_maps - 2).native_pc_code_offset);
}
}
}
@@ -848,7 +848,8 @@
bool CodeGenerator::HasStackMapAtCurrentPc() {
uint32_t pc = GetAssembler()->CodeSize();
size_t count = stack_map_stream_.GetNumberOfStackMaps();
- return count > 0 && stack_map_stream_.GetStackMap(count - 1).native_pc_offset == pc;
+ CodeOffset native_pc_offset = stack_map_stream_.GetStackMap(count - 1).native_pc_code_offset;
+ return (count > 0) && (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
}
void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 38d532e..2d129af 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -608,7 +608,7 @@
number_of_register_pairs_(number_of_register_pairs),
core_callee_save_mask_(core_callee_save_mask),
fpu_callee_save_mask_(fpu_callee_save_mask),
- stack_map_stream_(graph->GetArena()),
+ stack_map_stream_(graph->GetArena(), graph->GetInstructionSet()),
block_order_(nullptr),
jit_string_roots_(StringReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 9c9c604..f5b6ebe 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1239,7 +1239,8 @@
// Adjust native pc offsets in stack maps.
for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
- uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
+ uint32_t old_position =
+ stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kThumb2);
uint32_t new_position = __ GetAdjustedPosition(old_position);
stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
}
@@ -3331,7 +3332,7 @@
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+ // Note: divmod will compute both the quotient and the remainder as the pair R0 and R1, but
// we only need the former.
locations->SetOut(Location::RegisterLocation(R0));
}
@@ -3458,7 +3459,7 @@
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+ // Note: divmod will compute both the quotient and the remainder as the pair R0 and R1, but
// we only need the latter.
locations->SetOut(Location::RegisterLocation(R1));
}
@@ -7150,18 +7151,7 @@
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
- HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
- // We disable pc-relative load when there is an irreducible loop, as the optimization
- // is incompatible with it.
- // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- if (GetGraph()->HasIrreducibleLoops() &&
- (dispatch_info.method_load_kind ==
- HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative)) {
- dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
- }
-
- return dispatch_info;
+ return desired_dispatch_info;
}
Register CodeGeneratorARM::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
@@ -7181,8 +7171,7 @@
// save one load. However, since this is just an intrinsic slow path we prefer this
// simple and more robust approach rather that trying to determine if that's the case.
SlowPathCode* slow_path = GetCurrentSlowPath();
- DCHECK(slow_path != nullptr); // For intrinsified invokes the call is emitted on the slow path.
- if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
+ if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
__ LoadFromOffset(kLoadWord, temp, SP, stack_offset);
return temp;
@@ -7190,7 +7179,8 @@
return location.AsRegister<Register>();
}
-void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+Location CodeGeneratorARM::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp) {
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
@@ -7239,6 +7229,11 @@
break;
}
}
+ return callee_method;
+}
+
+void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+ Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 52d1857..df2dbc7 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -456,6 +456,7 @@
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke) OVERRIDE;
+ Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 68d0b86..9762ee8 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -3993,7 +3993,8 @@
return desired_dispatch_info;
}
-void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+Location CodeGeneratorARM64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp) {
// Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
@@ -4047,6 +4048,12 @@
break;
}
}
+ return callee_method;
+}
+
+void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+ // All registers are assumed to be correctly set up.
+ Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
@@ -4655,7 +4662,7 @@
}
void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
- codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject: kQuickUnlockObject,
+ codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject : kQuickUnlockObject,
instruction,
instruction->GetDexPc());
if (instruction->IsEnter()) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a9dca92..7d3c655 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -527,6 +527,7 @@
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke) OVERRIDE;
+ Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 592ee5a..ffaf18f 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3336,7 +3336,7 @@
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
- // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+ // Note: divmod will compute both the quotient and the remainder as the pair R0 and R1, but
// we only need the former.
locations->SetOut(LocationFrom(r0));
}
@@ -3450,7 +3450,7 @@
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
- // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+ // Note: divmod will compute both the quotient and the remainder as the pair R0 and R1, but
// we only need the latter.
locations->SetOut(LocationFrom(r1));
}
@@ -7233,18 +7233,7 @@
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
- HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
- // We disable pc-relative load when there is an irreducible loop, as the optimization
- // is incompatible with it.
- // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- if (GetGraph()->HasIrreducibleLoops() &&
- (dispatch_info.method_load_kind ==
- HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative)) {
- dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
- }
-
- return dispatch_info;
+ return desired_dispatch_info;
}
vixl32::Register CodeGeneratorARMVIXL::GetInvokeStaticOrDirectExtraParameter(
@@ -7273,7 +7262,7 @@
return RegisterFrom(location);
}
-void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
+Location CodeGeneratorARMVIXL::GenerateCalleeMethodStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp) {
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
@@ -7324,6 +7313,12 @@
break;
}
}
+ return callee_method;
+}
+
+void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp) {
+ Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index be65353..8ae3b7d 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -537,6 +537,7 @@
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke) OVERRIDE;
+ Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index a038382..76be74e 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -496,7 +496,8 @@
// Adjust native pc offsets in stack maps.
for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
- uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
+ uint32_t old_position =
+ stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 446dea6..192b4a5 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -450,7 +450,8 @@
// Adjust native pc offsets in stack maps.
for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
- uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
+ uint32_t old_position =
+ stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 853c91f..5c561f5 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1778,7 +1778,7 @@
cond = X86Condition(condition->GetCondition());
}
} else {
- // Must be a boolean condition, which needs to be compared to 0.
+ // Must be a Boolean condition, which needs to be compared to 0.
Register cond_reg = locations->InAt(2).AsRegister<Register>();
__ testl(cond_reg, cond_reg);
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 74c71cc..c4caf4b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1809,7 +1809,7 @@
cond = X86_64IntegerCondition(condition->GetCondition());
}
} else {
- // Must be a boolean condition, which needs to be compared to 0.
+ // Must be a Boolean condition, which needs to be compared to 0.
CpuRegister cond_reg = locations->InAt(2).AsRegister<CpuRegister>();
__ testl(cond_reg, cond_reg);
}
@@ -4210,7 +4210,7 @@
void CodeGeneratorX86_64::GenerateMemoryBarrier(MemBarrierKind kind) {
/*
- * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence.
+ * According to the JSR-133 Cookbook, for x86-64 only StoreLoad/AnyAny barriers need memory fence.
* All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86-64 memory model.
* For those cases, all we need to ensure is that there is a scheduling barrier in place.
*/
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.cc b/compiler/optimizing/dex_cache_array_fixups_arm.cc
index 9ddcd56..cfcb276 100644
--- a/compiler/optimizing/dex_cache_array_fixups_arm.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.cc
@@ -65,7 +65,7 @@
if (invoke->HasPcRelativeDexCache() &&
!IsCallFreeIntrinsic<IntrinsicLocationsBuilderARMType>(invoke, codegen_)) {
HArmDexCacheArraysBase* base =
- GetOrCreateDexCacheArrayBase(invoke->GetDexFileForPcRelativeDexCache());
+ GetOrCreateDexCacheArrayBase(invoke, invoke->GetDexFileForPcRelativeDexCache());
// Update the element offset in base.
DexCacheArraysLayout layout(kArmPointerSize, &invoke->GetDexFileForPcRelativeDexCache());
base->UpdateElementOffset(layout.MethodOffset(invoke->GetDexMethodIndex()));
@@ -75,21 +75,28 @@
}
}
- HArmDexCacheArraysBase* GetOrCreateDexCacheArrayBase(const DexFile& dex_file) {
- // Ensure we only initialize the pointer once for each dex file.
- auto lb = dex_cache_array_bases_.lower_bound(&dex_file);
- if (lb != dex_cache_array_bases_.end() &&
- !dex_cache_array_bases_.key_comp()(&dex_file, lb->first)) {
- return lb->second;
- }
+ HArmDexCacheArraysBase* GetOrCreateDexCacheArrayBase(HInstruction* cursor,
+ const DexFile& dex_file) {
+ if (GetGraph()->HasIrreducibleLoops()) {
+ HArmDexCacheArraysBase* base = new (GetGraph()->GetArena()) HArmDexCacheArraysBase(dex_file);
+ cursor->GetBlock()->InsertInstructionBefore(base, cursor);
+ return base;
+ } else {
+ // Ensure we only initialize the pointer once for each dex file.
+ auto lb = dex_cache_array_bases_.lower_bound(&dex_file);
+ if (lb != dex_cache_array_bases_.end() &&
+ !dex_cache_array_bases_.key_comp()(&dex_file, lb->first)) {
+ return lb->second;
+ }
- // Insert the base at the start of the entry block, move it to a better
- // position later in MoveBaseIfNeeded().
- HArmDexCacheArraysBase* base = new (GetGraph()->GetArena()) HArmDexCacheArraysBase(dex_file);
- HBasicBlock* entry_block = GetGraph()->GetEntryBlock();
- entry_block->InsertInstructionBefore(base, entry_block->GetFirstInstruction());
- dex_cache_array_bases_.PutBefore(lb, &dex_file, base);
- return base;
+ // Insert the base at the start of the entry block, move it to a better
+ // position later in MoveBaseIfNeeded().
+ HArmDexCacheArraysBase* base = new (GetGraph()->GetArena()) HArmDexCacheArraysBase(dex_file);
+ HBasicBlock* entry_block = GetGraph()->GetEntryBlock();
+ entry_block->InsertInstructionBefore(base, entry_block->GetFirstInstruction());
+ dex_cache_array_bases_.PutBefore(lb, &dex_file, base);
+ return base;
+ }
}
CodeGeneratorARMType* codegen_;
@@ -100,11 +107,6 @@
};
void DexCacheArrayFixups::Run() {
- if (graph_->HasIrreducibleLoops()) {
- // Do not run this optimization, as irreducible loops do not work with an instruction
- // that can be live-in at the irreducible loop header.
- return;
- }
DexCacheArrayFixupsVisitor visitor(graph_, codegen_);
visitor.VisitInsertionOrder();
visitor.MoveBasesIfNeeded();
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index f5931a2..c93bc21 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -399,7 +399,7 @@
ArenaVector<ValueSet*> sets_;
// BitVector which serves as a fast-access map from block id to
- // visited/unvisited boolean.
+ // visited/unvisited Boolean.
ArenaBitVector visited_blocks_;
DISALLOW_COPY_AND_ASSIGN(GlobalValueNumberer);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index ef8d74d..cac385c 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1,4 +1,3 @@
-
/*
* Copyright (C) 2016 The Android Open Source Project
*
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 8f64fae..c262cf9 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -2592,6 +2592,58 @@
__ Lsr(out, out, 5);
}
+void IntrinsicLocationsBuilderARM::VisitReferenceGetReferent(HInvoke* invoke) {
+ if (kEmitCompilerReadBarrier) {
+ // Do not intrinsify this call with the read barrier configuration.
+ return;
+ }
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM::VisitReferenceGetReferent(HInvoke* invoke) {
+ DCHECK(!kEmitCompilerReadBarrier);
+ ArmAssembler* const assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+
+ SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load ArtMethod first.
+ HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
+ DCHECK(invoke_direct != nullptr);
+ Register temp = codegen_->GenerateCalleeMethodStaticOrDirectCall(
+ invoke_direct, locations->GetTemp(0)).AsRegister<Register>();
+
+ // Now get declaring class.
+ __ ldr(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value()));
+
+ uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
+ uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
+ DCHECK_NE(slow_path_flag_offset, 0u);
+ DCHECK_NE(disable_flag_offset, 0u);
+ DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
+
+ // Check static flags that prevent using intrinsic.
+ __ ldr(IP, Address(temp, disable_flag_offset));
+ __ ldr(temp, Address(temp, slow_path_flag_offset));
+ __ orr(IP, IP, ShifterOperand(temp));
+ __ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
+
+ // Fast path.
+ __ ldr(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value()));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ __ MaybeUnpoisonHeapReference(out);
+ __ Bind(slow_path->GetExitLabel());
+}
+
UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble)
UNIMPLEMENTED_INTRINSIC(ARM, MathMinFloatFloat)
UNIMPLEMENTED_INTRINSIC(ARM, MathMaxDoubleDouble)
@@ -2605,7 +2657,6 @@
UNIMPLEMENTED_INTRINSIC(ARM, MathRoundFloat) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARM, UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(ARM, SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(ARM, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(ARM, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index d8a896e..bbf826c 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2773,7 +2773,65 @@
GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
}
-UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
+void IntrinsicLocationsBuilderARM64::VisitReferenceGetReferent(HInvoke* invoke) {
+ if (kEmitCompilerReadBarrier) {
+ // Do not intrinsify this call with the read barrier configuration.
+ return;
+ }
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitReferenceGetReferent(HInvoke* invoke) {
+ DCHECK(!kEmitCompilerReadBarrier);
+ MacroAssembler* masm = GetVIXLAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register obj = InputRegisterAt(invoke, 0);
+ Register out = OutputRegister(invoke);
+
+ SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load ArtMethod first.
+ HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
+ DCHECK(invoke_direct != nullptr);
+ Register temp0 = XRegisterFrom(codegen_->GenerateCalleeMethodStaticOrDirectCall(
+ invoke_direct, locations->GetTemp(0)));
+
+ // Now get declaring class.
+ __ Ldr(temp0.W(), MemOperand(temp0, ArtMethod::DeclaringClassOffset().Int32Value()));
+
+ uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
+ uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
+ DCHECK_NE(slow_path_flag_offset, 0u);
+ DCHECK_NE(disable_flag_offset, 0u);
+ DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
+
+ // Check static flags that prevent using intrinsic.
+ if (slow_path_flag_offset == disable_flag_offset + 1) {
+ // Load two adjacent flags in one 64-bit load.
+ __ Ldr(temp0, MemOperand(temp0, disable_flag_offset));
+ } else {
+ UseScratchRegisterScope temps(masm);
+ Register temp1 = temps.AcquireW();
+ __ Ldr(temp1.W(), MemOperand(temp0, disable_flag_offset));
+ __ Ldr(temp0.W(), MemOperand(temp0, slow_path_flag_offset));
+ __ Orr(temp0, temp1, temp0);
+ }
+ __ Cbnz(temp0, slow_path->GetEntryLabel());
+
+ // Fast path.
+ __ Ldr(out, HeapOperand(obj, mirror::Reference::ReferentOffset().Int32Value()));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ codegen_->GetAssembler()->MaybeUnpoisonHeapReference(out);
+ __ Bind(slow_path->GetExitLabel());
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 85e84d8..68c2d2e 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2688,6 +2688,60 @@
__ Lsr(out, out, 5);
}
+void IntrinsicLocationsBuilderARMVIXL::VisitReferenceGetReferent(HInvoke* invoke) {
+ if (kEmitCompilerReadBarrier) {
+ // Do not intrinsify this call with the read barrier configuration.
+ return;
+ }
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitReferenceGetReferent(HInvoke* invoke) {
+ DCHECK(!kEmitCompilerReadBarrier);
+ ArmVIXLAssembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ vixl32::Register obj = InputRegisterAt(invoke, 0);
+ vixl32::Register out = OutputRegister(invoke);
+
+ SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load ArtMethod first.
+ HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
+ DCHECK(invoke_direct != nullptr);
+ vixl32::Register temp0 = RegisterFrom(codegen_->GenerateCalleeMethodStaticOrDirectCall(
+ invoke_direct, locations->GetTemp(0)));
+
+ // Now get declaring class.
+ __ Ldr(temp0, MemOperand(temp0, ArtMethod::DeclaringClassOffset().Int32Value()));
+
+ uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
+ uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
+ DCHECK_NE(slow_path_flag_offset, 0u);
+ DCHECK_NE(disable_flag_offset, 0u);
+ DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
+
+ // Check static flags that prevent using intrinsic.
+ UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+ vixl32::Register temp1 = temps.Acquire();
+ __ Ldr(temp1, MemOperand(temp0, disable_flag_offset));
+ __ Ldr(temp0, MemOperand(temp0, slow_path_flag_offset));
+ __ Orr(temp0, temp1, temp0);
+ __ CompareAndBranchIfNonZero(temp0, slow_path->GetEntryLabel());
+
+ // Fast path.
+ __ Ldr(out, MemOperand(obj, mirror::Reference::ReferentOffset().Int32Value()));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ assembler->MaybeUnpoisonHeapReference(out);
+ __ Bind(slow_path->GetExitLabel());
+}
+
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinDoubleDouble)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinFloatFloat)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxDoubleDouble)
@@ -2701,7 +2755,6 @@
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index f1ae549..6cf9b83 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1878,7 +1878,7 @@
// If we use 'value' directly, we would lose 'value'
// in the case that the store fails. Whether the
// store succeeds, or fails, it will load the
- // correct boolean value into the 'out' register.
+ // correct Boolean value into the 'out' register.
// This test isn't really necessary. We only support Primitive::kPrimInt,
// Primitive::kPrimNot, and we already verified that we're working on one
// of those two types. It's left here in case the code needs to support
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 3022e97..00a1fa1 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1477,7 +1477,7 @@
// If we use 'value' directly, we would lose 'value'
// in the case that the store fails. Whether the
// store succeeds, or fails, it will load the
- // correct boolean value into the 'out' register.
+ // correct Boolean value into the 'out' register.
if (type == Primitive::kPrimLong) {
__ Scd(out, TMP);
} else {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index a2980dc..f0ea9e2 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -565,7 +565,7 @@
ArtMethod* GetArtMethod() const { return art_method_; }
void SetArtMethod(ArtMethod* method) { art_method_ = method; }
- // Returns an instruction with the opposite boolean value from 'cond'.
+ // Returns an instruction with the opposite Boolean value from 'cond'.
// The instruction has been inserted into the graph, either as a constant, or
// before cursor.
HInstruction* InsertOppositeCondition(HInstruction* cond, HInstruction* cursor);
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 6087e36..a9a1e6f 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -31,7 +31,7 @@
DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
DCHECK_NE(dex_pc, static_cast<uint32_t>(-1)) << "invalid dex_pc";
current_entry_.dex_pc = dex_pc;
- current_entry_.native_pc_offset = native_pc_offset;
+ current_entry_.native_pc_code_offset = CodeOffset::FromOffset(native_pc_offset, instruction_set_);
current_entry_.register_mask = register_mask;
current_entry_.sp_mask = sp_mask;
current_entry_.num_dex_registers = num_dex_registers;
@@ -144,10 +144,10 @@
current_inline_info_ = InlineInfoEntry();
}
-uint32_t StackMapStream::ComputeMaxNativePcOffset() const {
- uint32_t max_native_pc_offset = 0u;
+CodeOffset StackMapStream::ComputeMaxNativePcCodeOffset() const {
+ CodeOffset max_native_pc_offset;
for (const StackMapEntry& entry : stack_maps_) {
- max_native_pc_offset = std::max(max_native_pc_offset, entry.native_pc_offset);
+ max_native_pc_offset = std::max(max_native_pc_offset, entry.native_pc_code_offset);
}
return max_native_pc_offset;
}
@@ -157,8 +157,9 @@
dex_register_maps_size_ = ComputeDexRegisterMapsSize();
ComputeInlineInfoEncoding(); // needs dex_register_maps_size_.
inline_info_size_ = inline_infos_.size() * inline_info_encoding_.GetEntrySize();
- uint32_t max_native_pc_offset = ComputeMaxNativePcOffset();
- size_t stack_map_size = stack_map_encoding_.SetFromSizes(max_native_pc_offset,
+ CodeOffset max_native_pc_offset = ComputeMaxNativePcCodeOffset();
+ // The stack map contains compressed native offsets.
+ size_t stack_map_size = stack_map_encoding_.SetFromSizes(max_native_pc_offset.CompressedValue(),
dex_pc_max_,
dex_register_maps_size_,
inline_info_size_,
@@ -319,7 +320,7 @@
StackMapEntry entry = stack_maps_[i];
stack_map.SetDexPc(stack_map_encoding_, entry.dex_pc);
- stack_map.SetNativePcOffset(stack_map_encoding_, entry.native_pc_offset);
+ stack_map.SetNativePcCodeOffset(stack_map_encoding_, entry.native_pc_code_offset);
stack_map.SetRegisterMask(stack_map_encoding_, entry.register_mask);
size_t number_of_stack_mask_bits = stack_map.GetNumberOfStackMaskBits(stack_map_encoding_);
if (entry.sp_mask != nullptr) {
@@ -546,7 +547,8 @@
StackMapEntry entry = stack_maps_[s];
// Check main stack map fields.
- DCHECK_EQ(stack_map.GetNativePcOffset(stack_map_encoding), entry.native_pc_offset);
+ DCHECK_EQ(stack_map.GetNativePcOffset(stack_map_encoding, instruction_set_),
+ entry.native_pc_code_offset.Uint32Value(instruction_set_));
DCHECK_EQ(stack_map.GetDexPc(stack_map_encoding), entry.dex_pc);
DCHECK_EQ(stack_map.GetRegisterMask(stack_map_encoding), entry.register_mask);
size_t num_stack_mask_bits = stack_map.GetNumberOfStackMaskBits(stack_map_encoding);
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index d6f42b3..8fec472 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -59,8 +59,10 @@
*/
class StackMapStream : public ValueObject {
public:
- explicit StackMapStream(ArenaAllocator* allocator)
+ explicit StackMapStream(ArenaAllocator* allocator,
+ InstructionSet instruction_set)
: allocator_(allocator),
+ instruction_set_(instruction_set),
stack_maps_(allocator->Adapter(kArenaAllocStackMapStream)),
location_catalog_entries_(allocator->Adapter(kArenaAllocStackMapStream)),
location_catalog_entries_indices_(allocator->Adapter(kArenaAllocStackMapStream)),
@@ -95,7 +97,7 @@
// See runtime/stack_map.h to know what these fields contain.
struct StackMapEntry {
uint32_t dex_pc;
- uint32_t native_pc_offset;
+ CodeOffset native_pc_code_offset;
uint32_t register_mask;
BitVector* sp_mask;
uint32_t num_dex_registers;
@@ -141,11 +143,9 @@
}
void SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset) {
- stack_maps_[i].native_pc_offset = native_pc_offset;
+ stack_maps_[i].native_pc_code_offset = CodeOffset::FromOffset(native_pc_offset, instruction_set_);
}
- uint32_t ComputeMaxNativePcOffset() const;
-
// Prepares the stream to fill in a memory region. Must be called before FillIn.
// Returns the size (in bytes) needed to store this stream.
size_t PrepareForFillIn();
@@ -158,6 +158,8 @@
size_t ComputeDexRegisterMapsSize() const;
void ComputeInlineInfoEncoding();
+ CodeOffset ComputeMaxNativePcCodeOffset() const;
+
// Returns the index of an entry with the same dex register map as the current_entry,
// or kNoSameDexMapFound if no such entry exists.
size_t FindEntryWithTheSameDexMap();
@@ -175,6 +177,7 @@
void CheckCodeInfo(MemoryRegion region) const;
ArenaAllocator* allocator_;
+ const InstructionSet instruction_set_;
ArenaVector<StackMapEntry> stack_maps_;
// A catalog of unique [location_kind, register_value] pairs (per method).
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 22810ea..f68695b 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -47,7 +47,7 @@
TEST(StackMapTest, Test1) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
ArenaBitVector sp_mask(&arena, 0, false);
size_t number_of_dex_registers = 2;
@@ -78,7 +78,7 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask));
@@ -128,7 +128,7 @@
TEST(StackMapTest, Test2) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
ArtMethod art_method;
ArenaBitVector sp_mask1(&arena, 0, true);
@@ -193,7 +193,7 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask1));
@@ -252,7 +252,7 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u, encoding)));
ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0xFFu, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask2));
@@ -306,7 +306,7 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(2u, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u, encoding)));
ASSERT_EQ(2u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0xABu, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask3));
@@ -360,7 +360,7 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(3u, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u, encoding)));
ASSERT_EQ(3u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0xCDu, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask4));
@@ -412,7 +412,7 @@
TEST(StackMapTest, TestNonLiveDexRegisters) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 2;
@@ -442,7 +442,7 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
@@ -491,7 +491,7 @@
TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 1024;
@@ -554,7 +554,7 @@
TEST(StackMapTest, TestShareDexRegisterMap) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 2;
@@ -612,7 +612,7 @@
TEST(StackMapTest, TestNoDexRegisterMap) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 0;
@@ -620,7 +620,7 @@
stream.EndStackMapEntry();
number_of_dex_registers = 1;
- stream.BeginStackMapEntry(1, 67, 0x4, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(1, 68, 0x4, &sp_mask, number_of_dex_registers, 0);
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
@@ -641,7 +641,7 @@
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
@@ -649,9 +649,9 @@
stack_map = code_info.GetStackMapAt(1, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1, encoding)));
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(67, encoding)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(68, encoding)));
ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(67u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(68u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0x4u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
@@ -661,7 +661,7 @@
TEST(StackMapTest, InlineTest) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
ArtMethod art_method;
ArenaBitVector sp_mask1(&arena, 0, true);
@@ -823,4 +823,20 @@
}
}
+TEST(StackMapTest, CodeOffsetTest) {
+ // Test minimum alignments, encoding, and decoding.
+ CodeOffset offset_thumb2 = CodeOffset::FromOffset(kThumb2InstructionAlignment, kThumb2);
+ CodeOffset offset_arm64 = CodeOffset::FromOffset(kArm64InstructionAlignment, kArm64);
+ CodeOffset offset_x86 = CodeOffset::FromOffset(kX86InstructionAlignment, kX86);
+ CodeOffset offset_x86_64 = CodeOffset::FromOffset(kX86_64InstructionAlignment, kX86_64);
+ CodeOffset offset_mips = CodeOffset::FromOffset(kMipsInstructionAlignment, kMips);
+ CodeOffset offset_mips64 = CodeOffset::FromOffset(kMips64InstructionAlignment, kMips64);
+ EXPECT_EQ(offset_thumb2.Uint32Value(kThumb2), kThumb2InstructionAlignment);
+ EXPECT_EQ(offset_arm64.Uint32Value(kArm64), kArm64InstructionAlignment);
+ EXPECT_EQ(offset_x86.Uint32Value(kX86), kX86InstructionAlignment);
+ EXPECT_EQ(offset_x86_64.Uint32Value(kX86_64), kX86_64InstructionAlignment);
+ EXPECT_EQ(offset_mips.Uint32Value(kMips), kMipsInstructionAlignment);
+ EXPECT_EQ(offset_mips64.Uint32Value(kMips64), kMips64InstructionAlignment);
+}
+
} // namespace art
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index f2cbebb..74b8f06 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -283,6 +283,38 @@
// FP Operations //
///////////////////
+TEST_F(AssemblerMIPS64Test, AddS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::AddS, "add.s ${reg1}, ${reg2}, ${reg3}"), "add.s");
+}
+
+TEST_F(AssemblerMIPS64Test, AddD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::AddD, "add.d ${reg1}, ${reg2}, ${reg3}"), "add.d");
+}
+
+TEST_F(AssemblerMIPS64Test, SubS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::SubS, "sub.s ${reg1}, ${reg2}, ${reg3}"), "sub.s");
+}
+
+TEST_F(AssemblerMIPS64Test, SubD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::SubD, "sub.d ${reg1}, ${reg2}, ${reg3}"), "sub.d");
+}
+
+TEST_F(AssemblerMIPS64Test, MulS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::MulS, "mul.s ${reg1}, ${reg2}, ${reg3}"), "mul.s");
+}
+
+TEST_F(AssemblerMIPS64Test, MulD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::MulD, "mul.d ${reg1}, ${reg2}, ${reg3}"), "mul.d");
+}
+
+TEST_F(AssemblerMIPS64Test, DivS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::DivS, "div.s ${reg1}, ${reg2}, ${reg3}"), "div.s");
+}
+
+TEST_F(AssemblerMIPS64Test, DivD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::DivD, "div.d ${reg1}, ${reg2}, ${reg3}"), "div.d");
+}
+
TEST_F(AssemblerMIPS64Test, SqrtS) {
DriverStr(RepeatFF(&mips64::Mips64Assembler::SqrtS, "sqrt.s ${reg1}, ${reg2}"), "sqrt.s");
}
@@ -567,6 +599,26 @@
DriverStr(RepeatRF(&mips64::Mips64Assembler::Dmtc1, "dmtc1 ${reg1}, ${reg2}"), "Dmtc1");
}
+TEST_F(AssemblerMIPS64Test, Lwc1) {
+ DriverStr(RepeatFRIb(&mips64::Mips64Assembler::Lwc1, -16, "lwc1 ${reg1}, {imm}(${reg2})"),
+ "lwc1");
+}
+
+TEST_F(AssemblerMIPS64Test, Ldc1) {
+ DriverStr(RepeatFRIb(&mips64::Mips64Assembler::Ldc1, -16, "ldc1 ${reg1}, {imm}(${reg2})"),
+ "ldc1");
+}
+
+TEST_F(AssemblerMIPS64Test, Swc1) {
+ DriverStr(RepeatFRIb(&mips64::Mips64Assembler::Swc1, -16, "swc1 ${reg1}, {imm}(${reg2})"),
+ "swc1");
+}
+
+TEST_F(AssemblerMIPS64Test, Sdc1) {
+ DriverStr(RepeatFRIb(&mips64::Mips64Assembler::Sdc1, -16, "sdc1 ${reg1}, {imm}(${reg2})"),
+ "sdc1");
+}
+
////////////////
// CALL / JMP //
////////////////
@@ -850,6 +902,16 @@
DriverStr(RepeatRIb(&mips64::Mips64Assembler::Ldpc, 18, code), "Ldpc");
}
+TEST_F(AssemblerMIPS64Test, Auipc) {
+ DriverStr(RepeatRIb(&mips64::Mips64Assembler::Auipc, 16, "auipc ${reg}, {imm}"), "Auipc");
+}
+
+TEST_F(AssemblerMIPS64Test, Addiupc) {
+ // The comment from the Lwpc() test applies to this Addiupc() test as well.
+ const char* code = ".set imm, {imm}\naddiupc ${reg}, (imm - ((imm & 0x40000) << 1)) << 2";
+ DriverStr(RepeatRIb(&mips64::Mips64Assembler::Addiupc, 19, code), "Addiupc");
+}
+
TEST_F(AssemblerMIPS64Test, LoadFarthestNearLabelAddress) {
mips64::Mips64Label label;
__ LoadLabelAddress(mips64::V0, &label);
@@ -1079,6 +1141,188 @@
EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (5 + kAdduCount) * 4);
}
+TEST_F(AssemblerMIPS64Test, Addu) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Addu, "addu ${reg1}, ${reg2}, ${reg3}"), "addu");
+}
+
+TEST_F(AssemblerMIPS64Test, Addiu) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Addiu, -16, "addiu ${reg1}, ${reg2}, {imm}"),
+ "addiu");
+}
+
+TEST_F(AssemblerMIPS64Test, Daddu) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Daddu, "daddu ${reg1}, ${reg2}, ${reg3}"), "daddu");
+}
+
+TEST_F(AssemblerMIPS64Test, Daddiu) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Daddiu, -16, "daddiu ${reg1}, ${reg2}, {imm}"),
+ "daddiu");
+}
+
+TEST_F(AssemblerMIPS64Test, Subu) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Subu, "subu ${reg1}, ${reg2}, ${reg3}"), "subu");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsubu) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dsubu, "dsubu ${reg1}, ${reg2}, ${reg3}"), "dsubu");
+}
+
+TEST_F(AssemblerMIPS64Test, MulR6) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::MulR6, "mul ${reg1}, ${reg2}, ${reg3}"), "mulR6");
+}
+
+TEST_F(AssemblerMIPS64Test, DivR6) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::DivR6, "div ${reg1}, ${reg2}, ${reg3}"), "divR6");
+}
+
+TEST_F(AssemblerMIPS64Test, ModR6) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::ModR6, "mod ${reg1}, ${reg2}, ${reg3}"), "modR6");
+}
+
+TEST_F(AssemblerMIPS64Test, DivuR6) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::DivuR6, "divu ${reg1}, ${reg2}, ${reg3}"),
+ "divuR6");
+}
+
+TEST_F(AssemblerMIPS64Test, ModuR6) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::ModuR6, "modu ${reg1}, ${reg2}, ${reg3}"),
+ "moduR6");
+}
+
+TEST_F(AssemblerMIPS64Test, Dmul) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dmul, "dmul ${reg1}, ${reg2}, ${reg3}"), "dmul");
+}
+
+TEST_F(AssemblerMIPS64Test, Ddiv) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Ddiv, "ddiv ${reg1}, ${reg2}, ${reg3}"), "ddiv");
+}
+
+TEST_F(AssemblerMIPS64Test, Dmod) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dmod, "dmod ${reg1}, ${reg2}, ${reg3}"), "dmod");
+}
+
+TEST_F(AssemblerMIPS64Test, Ddivu) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Ddivu, "ddivu ${reg1}, ${reg2}, ${reg3}"), "ddivu");
+}
+
+TEST_F(AssemblerMIPS64Test, Dmodu) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dmodu, "dmodu ${reg1}, ${reg2}, ${reg3}"), "dmodu");
+}
+
+TEST_F(AssemblerMIPS64Test, And) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::And, "and ${reg1}, ${reg2}, ${reg3}"), "and");
+}
+
+TEST_F(AssemblerMIPS64Test, Andi) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Andi, 16, "andi ${reg1}, ${reg2}, {imm}"), "andi");
+}
+
+TEST_F(AssemblerMIPS64Test, Or) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Or, "or ${reg1}, ${reg2}, ${reg3}"), "or");
+}
+
+TEST_F(AssemblerMIPS64Test, Ori) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Ori, 16, "ori ${reg1}, ${reg2}, {imm}"), "ori");
+}
+
+TEST_F(AssemblerMIPS64Test, Xor) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Xor, "xor ${reg1}, ${reg2}, ${reg3}"), "xor");
+}
+
+TEST_F(AssemblerMIPS64Test, Xori) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Xori, 16, "xori ${reg1}, ${reg2}, {imm}"), "xori");
+}
+
+TEST_F(AssemblerMIPS64Test, Nor) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Nor, "nor ${reg1}, ${reg2}, ${reg3}"), "nor");
+}
+
+TEST_F(AssemblerMIPS64Test, Lb) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lb, -16, "lb ${reg1}, {imm}(${reg2})"), "lb");
+}
+
+TEST_F(AssemblerMIPS64Test, Lh) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lh, -16, "lh ${reg1}, {imm}(${reg2})"), "lh");
+}
+
+TEST_F(AssemblerMIPS64Test, Lw) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lw, -16, "lw ${reg1}, {imm}(${reg2})"), "lw");
+}
+
+TEST_F(AssemblerMIPS64Test, Ld) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Ld, -16, "ld ${reg1}, {imm}(${reg2})"), "ld");
+}
+
+TEST_F(AssemblerMIPS64Test, Lbu) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lbu, -16, "lbu ${reg1}, {imm}(${reg2})"), "lbu");
+}
+
+TEST_F(AssemblerMIPS64Test, Lhu) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lhu, -16, "lhu ${reg1}, {imm}(${reg2})"), "lhu");
+}
+
+TEST_F(AssemblerMIPS64Test, Lwu) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lwu, -16, "lwu ${reg1}, {imm}(${reg2})"), "lwu");
+}
+
+TEST_F(AssemblerMIPS64Test, Lui) {
+ DriverStr(RepeatRIb(&mips64::Mips64Assembler::Lui, 16, "lui ${reg}, {imm}"), "lui");
+}
+
+TEST_F(AssemblerMIPS64Test, Dahi) {
+ DriverStr(RepeatRIb(&mips64::Mips64Assembler::Dahi, 16, "dahi ${reg}, ${reg}, {imm}"), "dahi");
+}
+
+TEST_F(AssemblerMIPS64Test, Dati) {
+ DriverStr(RepeatRIb(&mips64::Mips64Assembler::Dati, 16, "dati ${reg}, ${reg}, {imm}"), "dati");
+}
+
+TEST_F(AssemblerMIPS64Test, Sb) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sb, -16, "sb ${reg1}, {imm}(${reg2})"), "sb");
+}
+
+TEST_F(AssemblerMIPS64Test, Sh) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sh, -16, "sh ${reg1}, {imm}(${reg2})"), "sh");
+}
+
+TEST_F(AssemblerMIPS64Test, Sw) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sw, -16, "sw ${reg1}, {imm}(${reg2})"), "sw");
+}
+
+TEST_F(AssemblerMIPS64Test, Sd) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sd, -16, "sd ${reg1}, {imm}(${reg2})"), "sd");
+}
+
+TEST_F(AssemblerMIPS64Test, Slt) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Slt, "slt ${reg1}, ${reg2}, ${reg3}"), "slt");
+}
+
+TEST_F(AssemblerMIPS64Test, Sltu) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Sltu, "sltu ${reg1}, ${reg2}, ${reg3}"), "sltu");
+}
+
+TEST_F(AssemblerMIPS64Test, Slti) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Slti, -16, "slti ${reg1}, ${reg2}, {imm}"),
+ "slti");
+}
+
+TEST_F(AssemblerMIPS64Test, Sltiu) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sltiu, -16, "sltiu ${reg1}, ${reg2}, {imm}"),
+ "sltiu");
+}
+
+TEST_F(AssemblerMIPS64Test, Move) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Move, "or ${reg1}, ${reg2}, $zero"), "move");
+}
+
+TEST_F(AssemblerMIPS64Test, Clear) {
+ DriverStr(RepeatR(&mips64::Mips64Assembler::Clear, "or ${reg}, $zero, $zero"), "clear");
+}
+
+TEST_F(AssemblerMIPS64Test, Not) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Not, "nor ${reg1}, ${reg2}, $zero"), "not");
+}
+
TEST_F(AssemblerMIPS64Test, Bitswap) {
DriverStr(RepeatRR(&mips64::Mips64Assembler::Bitswap, "bitswap ${reg1}, ${reg2}"), "bitswap");
}
@@ -1230,6 +1474,18 @@
"dsra32");
}
+TEST_F(AssemblerMIPS64Test, Dsllv) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dsllv, "dsllv ${reg1}, ${reg2}, ${reg3}"), "dsllv");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsrlv) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dsrlv, "dsrlv ${reg1}, ${reg2}, ${reg3}"), "dsrlv");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsrav) {
+ DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dsrav, "dsrav ${reg1}, ${reg2}, ${reg3}"), "dsrav");
+}
+
TEST_F(AssemblerMIPS64Test, Sc) {
DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sc, -9, "sc ${reg1}, {imm}(${reg2})"), "sc");
}
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index a2d1190..e2ee940 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -741,7 +741,7 @@
uint32_t GetAccessFlags() const { return access_flags_; }
const TypeId* Superclass() const { return superclass_; }
const TypeIdVector* Interfaces()
- { return interfaces_ == nullptr ? nullptr: interfaces_->GetTypeList(); }
+ { return interfaces_ == nullptr ? nullptr : interfaces_->GetTypeList(); }
uint32_t InterfacesOffset() { return interfaces_ == nullptr ? 0 : interfaces_->GetOffset(); }
const StringId* SourceFile() const { return source_file_; }
AnnotationsDirectoryItem* Annotations() const { return annotations_; }
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 148ee88..69901c1 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -529,6 +529,12 @@
}
}
+ {
+ os << "OAT FILE STATS:\n";
+ VariableIndentationOutputStream vios(&os);
+ stats_.Dump(vios);
+ }
+
os << std::flush;
return success;
}
@@ -574,6 +580,116 @@
return nullptr;
}
+ struct Stats {
+ enum ByteKind {
+ kByteKindCode,
+ kByteKindQuickMethodHeader,
+ kByteKindCodeInfoLocationCatalog,
+ kByteKindCodeInfoDexRegisterMap,
+ kByteKindCodeInfoInlineInfo,
+ kByteKindCodeInfoEncoding,
+ kByteKindCodeInfoOther,
+ kByteKindStackMapNativePc,
+ kByteKindStackMapDexPc,
+ kByteKindStackMapDexRegisterMap,
+ kByteKindStackMapInlineInfo,
+ kByteKindStackMapRegisterMask,
+ kByteKindStackMapMask,
+ kByteKindStackMapOther,
+ kByteKindCount,
+ kByteKindStackMapFirst = kByteKindCodeInfoOther,
+ kByteKindStackMapLast = kByteKindStackMapOther,
+ };
+ int64_t bits[kByteKindCount] = {};
+ // Since code has deduplication, seen tracks already seen pointers to avoid double counting
+ // deduplicated code and tables.
+ std::unordered_set<const void*> seen;
+
+ // Returns true if it was newly added.
+ bool AddBitsIfUnique(ByteKind kind, int64_t count, const void* address) {
+ if (seen.insert(address).second == true) {
+ // True means the address was not already in the set.
+ AddBits(kind, count);
+ return true;
+ }
+ return false;
+ }
+
+ void AddBits(ByteKind kind, int64_t count) {
+ bits[kind] += count;
+ }
+
+ void Dump(VariableIndentationOutputStream& os) {
+ const int64_t sum = std::accumulate(bits, bits + kByteKindCount, 0u);
+ os.Stream() << "Dumping cumulative use of " << sum / kBitsPerByte << " accounted bytes\n";
+ if (sum > 0) {
+ const int64_t stack_map_bits = std::accumulate(bits + kByteKindStackMapFirst,
+ bits + kByteKindStackMapLast + 1,
+ 0u);
+ Dump(os, "Code ", bits[kByteKindCode], sum);
+ Dump(os, "QuickMethodHeader ", bits[kByteKindQuickMethodHeader], sum);
+ Dump(os, "CodeInfoEncoding ", bits[kByteKindCodeInfoEncoding], sum);
+ Dump(os, "CodeInfoLocationCatalog ", bits[kByteKindCodeInfoLocationCatalog], sum);
+ Dump(os, "CodeInfoDexRegisterMap ", bits[kByteKindCodeInfoDexRegisterMap], sum);
+ Dump(os, "CodeInfoInlineInfo ", bits[kByteKindCodeInfoInlineInfo], sum);
+ Dump(os, "CodeInfoStackMap ", stack_map_bits, sum);
+ {
+ ScopedIndentation indent1(&os);
+ Dump(os,
+ "StackMapNativePc ",
+ bits[kByteKindStackMapNativePc],
+ stack_map_bits,
+ "stack map");
+ Dump(os,
+ "StackMapDexPcEncoding ",
+ bits[kByteKindStackMapDexPc],
+ stack_map_bits,
+ "stack map");
+ Dump(os,
+ "StackMapDexRegisterMap ",
+ bits[kByteKindStackMapDexRegisterMap],
+ stack_map_bits,
+ "stack map");
+ Dump(os,
+ "StackMapInlineInfo ",
+ bits[kByteKindStackMapInlineInfo],
+ stack_map_bits,
+ "stack map");
+ Dump(os,
+ "StackMapRegisterMaskEncoding ",
+ bits[kByteKindStackMapRegisterMask],
+ stack_map_bits,
+ "stack map");
+ Dump(os,
+ "StackMapMask ",
+ bits[kByteKindStackMapMask],
+ stack_map_bits,
+ "stack map");
+ Dump(os,
+ "StackMapOther ",
+ bits[kByteKindStackMapOther],
+ stack_map_bits,
+ "stack map");
+ }
+ }
+ os.Stream() << "\n" << std::flush;
+ }
+
+ private:
+ void Dump(VariableIndentationOutputStream& os,
+ const char* name,
+ int64_t size,
+ int64_t total,
+ const char* sum_of = "total") {
+ const double percent = (static_cast<double>(size) / static_cast<double>(total)) * 100;
+ os.Stream() << StringPrintf("%s = %8" PRId64 " (%2.0f%% of %s)\n",
+ name,
+ size / kBitsPerByte,
+ percent,
+ sum_of);
+ }
+ };
+
private:
void AddAllOffsets() {
// We don't know the length of the code for each method, but we need to know where to stop
@@ -1046,7 +1162,9 @@
vios->Stream() << "OatQuickMethodHeader ";
uint32_t method_header_offset = oat_method.GetOatQuickMethodHeaderOffset();
const OatQuickMethodHeader* method_header = oat_method.GetOatQuickMethodHeader();
-
+ stats_.AddBitsIfUnique(Stats::kByteKindQuickMethodHeader,
+ sizeof(*method_header) * kBitsPerByte,
+ method_header);
if (options_.absolute_addresses_) {
vios->Stream() << StringPrintf("%p ", method_header);
}
@@ -1118,6 +1236,7 @@
const void* code = oat_method.GetQuickCode();
uint32_t aligned_code_begin = AlignCodeOffset(code_offset);
uint64_t aligned_code_end = aligned_code_begin + code_size;
+ stats_.AddBitsIfUnique(Stats::kByteKindCode, code_size * kBitsPerByte, code);
if (options_.absolute_addresses_) {
vios->Stream() << StringPrintf("%p ", code);
@@ -1223,7 +1342,8 @@
code_info.Dump(vios,
oat_method.GetCodeOffset(),
code_item.registers_size_,
- options_.dump_code_info_stack_maps_);
+ options_.dump_code_info_stack_maps_,
+ instruction_set_);
}
void DumpVregLocations(std::ostream& os, const OatFile::OatMethod& oat_method,
@@ -1329,21 +1449,22 @@
// For identical native PCs, the order from the CodeInfo is preserved.
class StackMapsHelper {
public:
- explicit StackMapsHelper(const uint8_t* raw_code_info)
+ explicit StackMapsHelper(const uint8_t* raw_code_info, InstructionSet instruction_set)
: code_info_(raw_code_info),
encoding_(code_info_.ExtractEncoding()),
number_of_stack_maps_(code_info_.GetNumberOfStackMaps(encoding_)),
indexes_(),
- offset_(static_cast<size_t>(-1)),
- stack_map_index_(0u) {
+ offset_(static_cast<uint32_t>(-1)),
+ stack_map_index_(0u),
+ instruction_set_(instruction_set) {
if (number_of_stack_maps_ != 0u) {
// Check if native PCs are ordered.
bool ordered = true;
StackMap last = code_info_.GetStackMapAt(0u, encoding_);
for (size_t i = 1; i != number_of_stack_maps_; ++i) {
StackMap current = code_info_.GetStackMapAt(i, encoding_);
- if (last.GetNativePcOffset(encoding_.stack_map_encoding) >
- current.GetNativePcOffset(encoding_.stack_map_encoding)) {
+ if (last.GetNativePcOffset(encoding_.stack_map_encoding, instruction_set) >
+ current.GetNativePcOffset(encoding_.stack_map_encoding, instruction_set)) {
ordered = false;
break;
}
@@ -1359,14 +1480,17 @@
indexes_.end(),
[this](size_t lhs, size_t rhs) {
StackMap left = code_info_.GetStackMapAt(lhs, encoding_);
- uint32_t left_pc = left.GetNativePcOffset(encoding_.stack_map_encoding);
+ uint32_t left_pc = left.GetNativePcOffset(encoding_.stack_map_encoding,
+ instruction_set_);
StackMap right = code_info_.GetStackMapAt(rhs, encoding_);
- uint32_t right_pc = right.GetNativePcOffset(encoding_.stack_map_encoding);
+ uint32_t right_pc = right.GetNativePcOffset(encoding_.stack_map_encoding,
+ instruction_set_);
// If the PCs are the same, compare indexes to preserve the original order.
return (left_pc < right_pc) || (left_pc == right_pc && lhs < rhs);
});
}
- offset_ = GetStackMapAt(0).GetNativePcOffset(encoding_.stack_map_encoding);
+ offset_ = GetStackMapAt(0).GetNativePcOffset(encoding_.stack_map_encoding,
+ instruction_set_);
}
}
@@ -1378,7 +1502,7 @@
return encoding_;
}
- size_t GetOffset() const {
+ uint32_t GetOffset() const {
return offset_;
}
@@ -1389,8 +1513,9 @@
void Next() {
++stack_map_index_;
offset_ = (stack_map_index_ == number_of_stack_maps_)
- ? static_cast<size_t>(-1)
- : GetStackMapAt(stack_map_index_).GetNativePcOffset(encoding_.stack_map_encoding);
+ ? static_cast<uint32_t>(-1)
+ : GetStackMapAt(stack_map_index_).GetNativePcOffset(encoding_.stack_map_encoding,
+ instruction_set_);
}
private:
@@ -1406,8 +1531,9 @@
const CodeInfoEncoding encoding_;
const size_t number_of_stack_maps_;
dchecked_vector<size_t> indexes_; // Used if stack map native PCs are not ordered.
- size_t offset_;
+ uint32_t offset_;
size_t stack_map_index_;
+ const InstructionSet instruction_set_;
};
void DumpCode(VariableIndentationOutputStream* vios,
@@ -1423,7 +1549,61 @@
return;
} else if (!bad_input && IsMethodGeneratedByOptimizingCompiler(oat_method, code_item)) {
// The optimizing compiler outputs its CodeInfo data in the vmap table.
- StackMapsHelper helper(oat_method.GetVmapTable());
+ StackMapsHelper helper(oat_method.GetVmapTable(), instruction_set_);
+ {
+ CodeInfoEncoding encoding(helper.GetEncoding());
+ StackMapEncoding stack_map_encoding(encoding.stack_map_encoding);
+ // helper.GetCodeInfo().GetStackMapAt(0, encoding).;
+ const size_t num_stack_maps = encoding.number_of_stack_maps;
+ std::vector<uint8_t> size_vector;
+ encoding.Compress(&size_vector);
+ if (stats_.AddBitsIfUnique(Stats::kByteKindCodeInfoEncoding,
+ size_vector.size() * kBitsPerByte,
+ oat_method.GetVmapTable())) {
+ stats_.AddBits(
+ Stats::kByteKindStackMapNativePc,
+ stack_map_encoding.GetNativePcEncoding().BitSize() * num_stack_maps);
+ stats_.AddBits(
+ Stats::kByteKindStackMapDexPc,
+ stack_map_encoding.GetDexPcEncoding().BitSize() * num_stack_maps);
+ stats_.AddBits(
+ Stats::kByteKindStackMapDexRegisterMap,
+ stack_map_encoding.GetDexRegisterMapEncoding().BitSize() * num_stack_maps);
+ stats_.AddBits(
+ Stats::kByteKindStackMapInlineInfo,
+ stack_map_encoding.GetInlineInfoEncoding().BitSize() * num_stack_maps);
+ stats_.AddBits(
+ Stats::kByteKindStackMapRegisterMask,
+ stack_map_encoding.GetRegisterMaskEncoding().BitSize() * num_stack_maps);
+ const size_t stack_mask_bits = encoding.stack_map_size_in_bytes * kBitsPerByte -
+ stack_map_encoding.GetStackMaskBitOffset();
+ stats_.AddBits(
+ Stats::kByteKindStackMapMask,
+ stack_mask_bits * num_stack_maps);
+ const size_t stack_map_bits =
+ stack_map_encoding.GetStackMaskBitOffset() + stack_mask_bits;
+ stats_.AddBits(
+ Stats::kByteKindStackMapOther,
+ (encoding.stack_map_size_in_bytes * kBitsPerByte - stack_map_bits) * num_stack_maps);
+ const size_t stack_map_bytes = helper.GetCodeInfo().GetStackMapsSize(encoding);
+ const size_t location_catalog_bytes =
+ helper.GetCodeInfo().GetDexRegisterLocationCatalogSize(encoding);
+ stats_.AddBits(Stats::kByteKindCodeInfoLocationCatalog,
+ kBitsPerByte * location_catalog_bytes);
+ const size_t dex_register_bytes =
+ helper.GetCodeInfo().GetDexRegisterMapsSize(encoding, code_item->registers_size_);
+ stats_.AddBits(
+ Stats::kByteKindCodeInfoDexRegisterMap,
+ kBitsPerByte * dex_register_bytes);
+ const size_t inline_info_bytes =
+ encoding.non_header_size -
+ stack_map_bytes -
+ location_catalog_bytes -
+ dex_register_bytes;
+ stats_.AddBits(Stats::kByteKindCodeInfoInlineInfo,
+ inline_info_bytes * kBitsPerByte);
+ }
+ }
const uint8_t* quick_native_pc = reinterpret_cast<const uint8_t*>(quick_code);
size_t offset = 0;
while (offset < code_size) {
@@ -1436,7 +1616,8 @@
helper.GetCodeInfo(),
helper.GetEncoding(),
oat_method.GetCodeOffset(),
- code_item->registers_size_);
+ code_item->registers_size_,
+ instruction_set_);
do {
helper.Next();
// There may be multiple stack maps at a given PC. We display only the first one.
@@ -1460,6 +1641,7 @@
const InstructionSet instruction_set_;
std::set<uintptr_t> offsets_;
Disassembler* disassembler_;
+ Stats stats_;
};
class ImageDumper {
@@ -1776,7 +1958,7 @@
os << StringPrintf("%d (0x%x)\n", field->GetShort(obj), field->GetShort(obj));
break;
case Primitive::kPrimBoolean:
- os << StringPrintf("%s (0x%x)\n", field->GetBoolean(obj)? "true" : "false",
+ os << StringPrintf("%s (0x%x)\n", field->GetBoolean(obj) ? "true" : "false",
field->GetBoolean(obj));
break;
case Primitive::kPrimByte:
@@ -2132,7 +2314,6 @@
size_t managed_code_bytes;
size_t managed_code_bytes_ignoring_deduplication;
- size_t managed_to_native_code_bytes;
size_t native_to_managed_code_bytes;
size_t class_initializer_code_bytes;
size_t large_initializer_code_bytes;
@@ -2161,7 +2342,6 @@
alignment_bytes(0),
managed_code_bytes(0),
managed_code_bytes_ignoring_deduplication(0),
- managed_to_native_code_bytes(0),
native_to_managed_code_bytes(0),
class_initializer_code_bytes(0),
large_initializer_code_bytes(0),
@@ -2359,7 +2539,6 @@
os << StringPrintf("oat_file_bytes = %8zd\n"
"managed_code_bytes = %8zd (%2.0f%% of oat file bytes)\n"
- "managed_to_native_code_bytes = %8zd (%2.0f%% of oat file bytes)\n"
"native_to_managed_code_bytes = %8zd (%2.0f%% of oat file bytes)\n\n"
"class_initializer_code_bytes = %8zd (%2.0f%% of oat file bytes)\n"
"large_initializer_code_bytes = %8zd (%2.0f%% of oat file bytes)\n"
@@ -2367,8 +2546,6 @@
oat_file_bytes,
managed_code_bytes,
PercentOfOatBytes(managed_code_bytes),
- managed_to_native_code_bytes,
- PercentOfOatBytes(managed_to_native_code_bytes),
native_to_managed_code_bytes,
PercentOfOatBytes(native_to_managed_code_bytes),
class_initializer_code_bytes,
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index e77d03b..ba57d18 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -102,6 +102,7 @@
// Code and dex code do not show up if list only.
expected_prefixes.push_back("DEX CODE:");
expected_prefixes.push_back("CODE:");
+ expected_prefixes.push_back("CodeInfoEncoding");
}
if (mode == kModeArt) {
exec_argv.push_back("--image=" + core_art_location_);
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
index 4b23c77..35f1948 100644
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ b/runtime/arch/arm/quick_method_frame_info_arm.h
@@ -62,7 +62,7 @@
constexpr uint32_t ArmCalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills: 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills : 0) |
(type == Runtime::kSaveAllCalleeSaves ? kArmCalleeSaveFpAllSpills : 0) |
(type == Runtime::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
}
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index c598743..01bd177 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -33,7 +33,16 @@
const std::string& variant, std::string* error_msg) {
// Look for variants that need a fix for a53 erratum 835769.
static const char* arm64_variants_with_a53_835769_bug[] = {
- "default", "generic", "cortex-a53" // Pessimistically assume all generic ARM64s are A53s.
+ // Pessimistically assume all generic CPUs are cortex-a53.
+ "default",
+ "generic",
+ "cortex-a53",
+ "cortex-a53.a57",
+ "cortex-a53.a72",
+ // Pessimistically assume all "big" cortex CPUs are paired with a cortex-a53.
+ "cortex-a57",
+ "cortex-a72",
+ "cortex-a73",
};
bool needs_a53_835769_fix = FindVariantInArray(arm64_variants_with_a53_835769_bug,
arraysize(arm64_variants_with_a53_835769_bug),
@@ -42,7 +51,10 @@
if (!needs_a53_835769_fix) {
// Check to see if this is an expected variant.
static const char* arm64_known_variants[] = {
- "denver64", "kryo", "exynos-m1"
+ "cortex-a35",
+ "exynos-m1",
+ "denver64",
+ "kryo"
};
if (!FindVariantInArray(arm64_known_variants, arraysize(arm64_known_variants), variant)) {
std::ostringstream os;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index cefa499..91cb58f 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -30,6 +30,40 @@
EXPECT_TRUE(arm64_features->Equals(arm64_features.get()));
EXPECT_STREQ("a53", arm64_features->GetFeatureString().c_str());
EXPECT_EQ(arm64_features->AsBitmap(), 1U);
+
+ std::unique_ptr<const InstructionSetFeatures> cortex_a57_features(
+ InstructionSetFeatures::FromVariant(kArm64, "cortex-a57", &error_msg));
+ ASSERT_TRUE(cortex_a57_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(cortex_a57_features->GetInstructionSet(), kArm64);
+ EXPECT_TRUE(cortex_a57_features->Equals(cortex_a57_features.get()));
+ EXPECT_STREQ("a53", cortex_a57_features->GetFeatureString().c_str());
+ EXPECT_EQ(cortex_a57_features->AsBitmap(), 1U);
+
+ std::unique_ptr<const InstructionSetFeatures> cortex_a73_features(
+ InstructionSetFeatures::FromVariant(kArm64, "cortex-a73", &error_msg));
+ ASSERT_TRUE(cortex_a73_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(cortex_a73_features->GetInstructionSet(), kArm64);
+ EXPECT_TRUE(cortex_a73_features->Equals(cortex_a73_features.get()));
+ EXPECT_STREQ("a53", cortex_a73_features->GetFeatureString().c_str());
+ EXPECT_EQ(cortex_a73_features->AsBitmap(), 1U);
+
+ std::unique_ptr<const InstructionSetFeatures> cortex_a35_features(
+ InstructionSetFeatures::FromVariant(kArm64, "cortex-a35", &error_msg));
+ ASSERT_TRUE(cortex_a35_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(cortex_a35_features->GetInstructionSet(), kArm64);
+ EXPECT_TRUE(cortex_a35_features->Equals(cortex_a35_features.get()));
+ EXPECT_STREQ("-a53", cortex_a35_features->GetFeatureString().c_str());
+ EXPECT_EQ(cortex_a35_features->AsBitmap(), 0U);
+
+ std::unique_ptr<const InstructionSetFeatures> kryo_features(
+ InstructionSetFeatures::FromVariant(kArm64, "kryo", &error_msg));
+ ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(kryo_features->GetInstructionSet(), kArm64);
+ EXPECT_TRUE(kryo_features->Equals(kryo_features.get()));
+ EXPECT_TRUE(kryo_features->Equals(cortex_a35_features.get()));
+ EXPECT_FALSE(kryo_features->Equals(cortex_a57_features.get()));
+ EXPECT_STREQ("-a53", kryo_features->GetFeatureString().c_str());
+ EXPECT_EQ(kryo_features->AsBitmap(), 0U);
}
} // namespace art
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
index 36f283b..32d9d08 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -85,7 +85,7 @@
constexpr uint32_t Arm64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills: 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills : 0) |
(type == Runtime::kSaveAllCalleeSaves ? kArm64CalleeSaveFpAllSpills : 0) |
(type == Runtime::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
}
diff --git a/runtime/arch/code_offset.h b/runtime/arch/code_offset.h
new file mode 100644
index 0000000..ab04b1e
--- /dev/null
+++ b/runtime/arch/code_offset.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_CODE_OFFSET_H_
+#define ART_RUNTIME_ARCH_CODE_OFFSET_H_
+
+#include <iosfwd>
+
+#include "base/bit_utils.h"
+#include "base/logging.h"
+#include "instruction_set.h"
+
+namespace art {
+
+// CodeOffset is a holder for compressed code offsets. Since some architectures have alignment
+// requirements it is possible to compress code offsets to reduce stack map sizes.
+class CodeOffset {
+ public:
+ ALWAYS_INLINE static CodeOffset FromOffset(uint32_t offset, InstructionSet isa = kRuntimeISA) {
+ return CodeOffset(offset / GetInstructionSetInstructionAlignment(isa));
+ }
+
+ ALWAYS_INLINE static CodeOffset FromCompressedOffset(uint32_t offset) {
+ return CodeOffset(offset);
+ }
+
+ ALWAYS_INLINE uint32_t Uint32Value(InstructionSet isa = kRuntimeISA) const {
+ uint32_t decoded = value_ * GetInstructionSetInstructionAlignment(isa);
+ DCHECK_GE(decoded, value_) << "Integer overflow";
+ return decoded;
+ }
+
+ // Return compressed internal value.
+ ALWAYS_INLINE uint32_t CompressedValue() const {
+ return value_;
+ }
+
+ ALWAYS_INLINE CodeOffset() = default;
+ ALWAYS_INLINE CodeOffset(const CodeOffset&) = default;
+ ALWAYS_INLINE CodeOffset& operator=(const CodeOffset&) = default;
+ ALWAYS_INLINE CodeOffset& operator=(CodeOffset&&) = default;
+
+ private:
+ ALWAYS_INLINE explicit CodeOffset(uint32_t value) : value_(value) {}
+
+ uint32_t value_ = 0u;
+};
+
+inline bool operator==(const CodeOffset& a, const CodeOffset& b) {
+ return a.CompressedValue() == b.CompressedValue();
+}
+
+inline bool operator!=(const CodeOffset& a, const CodeOffset& b) {
+ return !(a == b);
+}
+
+inline bool operator<(const CodeOffset& a, const CodeOffset& b) {
+ return a.CompressedValue() < b.CompressedValue();
+}
+
+inline bool operator<=(const CodeOffset& a, const CodeOffset& b) {
+ return a.CompressedValue() <= b.CompressedValue();
+}
+
+inline bool operator>(const CodeOffset& a, const CodeOffset& b) {
+ return a.CompressedValue() > b.CompressedValue();
+}
+
+inline bool operator>=(const CodeOffset& a, const CodeOffset& b) {
+ return a.CompressedValue() >= b.CompressedValue();
+}
+
+inline std::ostream& operator<<(std::ostream& os, const CodeOffset& offset) {
+ return os << offset.Uint32Value();
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_CODE_OFFSET_H_
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index 4a8bea4..99aea62 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -75,6 +75,14 @@
// X86 instruction alignment. This is the recommended alignment for maximum performance.
static constexpr size_t kX86Alignment = 16;
+// Different than code alignment since code alignment is only first instruction of method.
+static constexpr size_t kThumb2InstructionAlignment = 2;
+static constexpr size_t kArm64InstructionAlignment = 4;
+static constexpr size_t kX86InstructionAlignment = 1;
+static constexpr size_t kX86_64InstructionAlignment = 1;
+static constexpr size_t kMipsInstructionAlignment = 2;
+static constexpr size_t kMips64InstructionAlignment = 2;
+
const char* GetInstructionSetString(InstructionSet isa);
// Note: Returns kNone when the string cannot be parsed to a known value.
@@ -106,6 +114,17 @@
}
}
+ALWAYS_INLINE static inline constexpr size_t GetInstructionSetInstructionAlignment(
+ InstructionSet isa) {
+ return (isa == kThumb2 || isa == kArm) ? kThumb2InstructionAlignment :
+ (isa == kArm64) ? kArm64InstructionAlignment :
+ (isa == kX86) ? kX86InstructionAlignment :
+ (isa == kX86_64) ? kX86_64InstructionAlignment :
+ (isa == kMips) ? kMipsInstructionAlignment :
+ (isa == kMips64) ? kMips64InstructionAlignment :
+ 0; // Invalid case, but constexpr doesn't support asserts.
+}
+
static inline bool IsValidInstructionSet(InstructionSet isa) {
switch (isa) {
case kArm:
diff --git a/runtime/arch/instruction_set_test.cc b/runtime/arch/instruction_set_test.cc
index 5aae93a..b251b57 100644
--- a/runtime/arch/instruction_set_test.cc
+++ b/runtime/arch/instruction_set_test.cc
@@ -44,6 +44,15 @@
EXPECT_STREQ("none", GetInstructionSetString(kNone));
}
+TEST(InstructionSetTest, GetInstructionSetInstructionAlignment) {
+ EXPECT_EQ(GetInstructionSetInstructionAlignment(kThumb2), kThumb2InstructionAlignment);
+ EXPECT_EQ(GetInstructionSetInstructionAlignment(kArm64), kArm64InstructionAlignment);
+ EXPECT_EQ(GetInstructionSetInstructionAlignment(kX86), kX86InstructionAlignment);
+ EXPECT_EQ(GetInstructionSetInstructionAlignment(kX86_64), kX86_64InstructionAlignment);
+ EXPECT_EQ(GetInstructionSetInstructionAlignment(kMips), kMipsInstructionAlignment);
+ EXPECT_EQ(GetInstructionSetInstructionAlignment(kMips64), kMips64InstructionAlignment);
+}
+
TEST(InstructionSetTest, TestRoundTrip) {
EXPECT_EQ(kRuntimeISA, GetInstructionSetFromString(GetInstructionSetString(kRuntimeISA)));
}
diff --git a/runtime/arch/mips64/quick_method_frame_info_mips64.h b/runtime/arch/mips64/quick_method_frame_info_mips64.h
index 397776e..d774473 100644
--- a/runtime/arch/mips64/quick_method_frame_info_mips64.h
+++ b/runtime/arch/mips64/quick_method_frame_info_mips64.h
@@ -78,7 +78,7 @@
constexpr uint32_t Mips64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kMips64CalleeSaveFpRefSpills |
- (type == Runtime::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills: 0) |
+ (type == Runtime::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills : 0) |
(type == Runtime::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
(type == Runtime::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index ddb3245..b8ed530 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1398,7 +1398,11 @@
class_loader_(class_loader) {}
bool operator()(ObjPtr<mirror::Class> klass) const REQUIRES_SHARED(Locks::mutator_lock_) {
- klass->SetClassLoader(class_loader_);
+ // Do not update class loader for boot image classes where the app image
+ // class loader is only the initiating loader but not the defining loader.
+ if (klass->GetClassLoader() != nullptr) {
+ klass->SetClassLoader(class_loader_);
+ }
return true;
}
@@ -2458,10 +2462,8 @@
return EnsureResolved(self, descriptor, klass);
}
// Class is not yet loaded.
- if (descriptor[0] == '[') {
- return CreateArrayClass(self, descriptor, hash, class_loader);
- } else if (class_loader.Get() == nullptr) {
- // The boot class loader, search the boot class path.
+ if (descriptor[0] != '[' && class_loader.Get() == nullptr) {
+ // Non-array class and the boot class loader, search the boot class path.
ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
if (pair.second != nullptr) {
return DefineClass(self,
@@ -2474,14 +2476,21 @@
// The boot class loader is searched ahead of the application class loader, failures are
// expected and will be wrapped in a ClassNotFoundException. Use the pre-allocated error to
// trigger the chaining with a proper stack trace.
- ObjPtr<mirror::Throwable> pre_allocated = Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+ ObjPtr<mirror::Throwable> pre_allocated =
+ Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
self->SetException(pre_allocated);
return nullptr;
}
+ }
+ ObjPtr<mirror::Class> result_ptr;
+ bool descriptor_equals;
+ if (descriptor[0] == '[') {
+ result_ptr = CreateArrayClass(self, descriptor, hash, class_loader);
+ DCHECK_EQ(result_ptr == nullptr, self->IsExceptionPending());
+ DCHECK(result_ptr == nullptr || result_ptr->DescriptorEquals(descriptor));
+ descriptor_equals = true;
} else {
ScopedObjectAccessUnchecked soa(self);
- ObjPtr<mirror::Class> result_ptr;
- bool descriptor_equals;
bool known_hierarchy =
FindClassInBaseDexClassLoader(soa, self, descriptor, hash, class_loader, &result_ptr);
if (result_ptr != nullptr) {
@@ -2525,16 +2534,7 @@
WellKnownClasses::java_lang_ClassLoader_loadClass,
class_name_object.get()));
}
- if (self->IsExceptionPending()) {
- // If the ClassLoader threw, pass that exception up.
- // However, to comply with the RI behavior, first check if another thread succeeded.
- result_ptr = LookupClass(self, descriptor, hash, class_loader.Get());
- if (result_ptr != nullptr && !result_ptr->IsErroneous()) {
- self->ClearException();
- return EnsureResolved(self, descriptor, result_ptr);
- }
- return nullptr;
- } else if (result.get() == nullptr) {
+ if (result.get() == nullptr && !self->IsExceptionPending()) {
// broken loader - throw NPE to be compatible with Dalvik
ThrowNullPointerException(StringPrintf("ClassLoader.loadClass returned null for %s",
class_name_string.c_str()).c_str());
@@ -2542,50 +2542,60 @@
}
result_ptr = soa.Decode<mirror::Class>(result.get());
// Check the name of the returned class.
- descriptor_equals = result_ptr->DescriptorEquals(descriptor);
+ descriptor_equals = (result_ptr != nullptr) && result_ptr->DescriptorEquals(descriptor);
}
-
- // Try to insert the class to the class table, checking for mismatch.
- ObjPtr<mirror::Class> old;
- {
- WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ClassTable* const class_table = InsertClassTableForClassLoader(class_loader.Get());
- old = class_table->Lookup(descriptor, hash);
- if (old == nullptr) {
- old = result_ptr; // For the comparison below, after releasing the lock.
- if (descriptor_equals) {
- class_table->InsertWithHash(result_ptr.Ptr(), hash);
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
- } // else throw below, after releasing the lock.
- }
- }
- if (UNLIKELY(old != result_ptr)) {
- // Return `old` (even if `!descriptor_equals`) to mimic the RI behavior for parallel
- // capable class loaders. (All class loaders are considered parallel capable on Android.)
- mirror::Class* loader_class = class_loader->GetClass();
- const char* loader_class_name =
- loader_class->GetDexFile().StringByTypeIdx(loader_class->GetDexTypeIndex());
- LOG(WARNING) << "Initiating class loader of type " << DescriptorToDot(loader_class_name)
- << " is not well-behaved; it returned a different Class for racing loadClass(\""
- << DescriptorToDot(descriptor) << "\").";
- return EnsureResolved(self, descriptor, old);
- }
- if (UNLIKELY(!descriptor_equals)) {
- std::string result_storage;
- const char* result_name = result_ptr->GetDescriptor(&result_storage);
- std::string loader_storage;
- const char* loader_class_name = class_loader->GetClass()->GetDescriptor(&loader_storage);
- ThrowNoClassDefFoundError(
- "Initiating class loader of type %s returned class %s instead of %s.",
- DescriptorToDot(loader_class_name).c_str(),
- DescriptorToDot(result_name).c_str(),
- DescriptorToDot(descriptor).c_str());
- return nullptr;
- }
- // success, return mirror::Class*
- return result_ptr.Ptr();
}
- UNREACHABLE();
+
+ if (self->IsExceptionPending()) {
+ // If the ClassLoader threw or array class allocation failed, pass that exception up.
+ // However, to comply with the RI behavior, first check if another thread succeeded.
+ result_ptr = LookupClass(self, descriptor, hash, class_loader.Get());
+ if (result_ptr != nullptr && !result_ptr->IsErroneous()) {
+ self->ClearException();
+ return EnsureResolved(self, descriptor, result_ptr);
+ }
+ return nullptr;
+ }
+
+ // Try to insert the class to the class table, checking for mismatch.
+ ObjPtr<mirror::Class> old;
+ {
+ WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ ClassTable* const class_table = InsertClassTableForClassLoader(class_loader.Get());
+ old = class_table->Lookup(descriptor, hash);
+ if (old == nullptr) {
+ old = result_ptr; // For the comparison below, after releasing the lock.
+ if (descriptor_equals) {
+ class_table->InsertWithHash(result_ptr.Ptr(), hash);
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
+ } // else throw below, after releasing the lock.
+ }
+ }
+ if (UNLIKELY(old != result_ptr)) {
+ // Return `old` (even if `!descriptor_equals`) to mimic the RI behavior for parallel
+ // capable class loaders. (All class loaders are considered parallel capable on Android.)
+ mirror::Class* loader_class = class_loader->GetClass();
+ const char* loader_class_name =
+ loader_class->GetDexFile().StringByTypeIdx(loader_class->GetDexTypeIndex());
+ LOG(WARNING) << "Initiating class loader of type " << DescriptorToDot(loader_class_name)
+ << " is not well-behaved; it returned a different Class for racing loadClass(\""
+ << DescriptorToDot(descriptor) << "\").";
+ return EnsureResolved(self, descriptor, old);
+ }
+ if (UNLIKELY(!descriptor_equals)) {
+ std::string result_storage;
+ const char* result_name = result_ptr->GetDescriptor(&result_storage);
+ std::string loader_storage;
+ const char* loader_class_name = class_loader->GetClass()->GetDescriptor(&loader_storage);
+ ThrowNoClassDefFoundError(
+ "Initiating class loader of type %s returned class %s instead of %s.",
+ DescriptorToDot(loader_class_name).c_str(),
+ DescriptorToDot(result_name).c_str(),
+ DescriptorToDot(descriptor).c_str());
+ return nullptr;
+ }
+ // success, return mirror::Class*
+ return result_ptr.Ptr();
}
mirror::Class* ClassLinker::DefineClass(Thread* self,
@@ -3507,7 +3517,8 @@
// class to the hash table --- necessary because of possible races with
// other threads.)
if (class_loader.Get() != component_type->GetClassLoader()) {
- ObjPtr<mirror::Class> new_class = LookupClass(self, descriptor, hash, component_type->GetClassLoader());
+ ObjPtr<mirror::Class> new_class =
+ LookupClass(self, descriptor, hash, component_type->GetClassLoader());
if (new_class != nullptr) {
return new_class.Ptr();
}
@@ -7725,7 +7736,7 @@
type = LookupClass(self, descriptor, hash, class_loader.Ptr());
}
}
- if (type != nullptr || type->IsResolved()) {
+ if (type != nullptr && type->IsResolved()) {
return type.Ptr();
}
return nullptr;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index d98daa5..7b6c0dc 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -487,7 +487,7 @@
// says AccessibleObject is 9 bytes but sizeof(AccessibleObject) is 12 bytes due to padding.
// The RoundUp is to get around this case.
static constexpr size_t kPackAlignment = 4;
- size_t expected_size = RoundUp(is_static ? klass->GetClassSize(): klass->GetObjectSize(),
+ size_t expected_size = RoundUp(is_static ? klass->GetClassSize() : klass->GetObjectSize(),
kPackAlignment);
if (sizeof(T) != expected_size) {
LOG(ERROR) << "Class size mismatch:"
@@ -906,6 +906,41 @@
klass);
}
+TEST_F(ClassLinkerTest, LookupResolvedTypeArray) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader>(LoadDex("AllFields"))));
+ // Get the AllFields class for the dex cache and dex file.
+ ObjPtr<mirror::Class> all_fields_klass
+ = class_linker_->FindClass(soa.Self(), "LAllFields;", class_loader);
+ ASSERT_OBJ_PTR_NE(all_fields_klass, ObjPtr<mirror::Class>(nullptr));
+ Handle<mirror::DexCache> dex_cache = hs.NewHandle(all_fields_klass->GetDexCache());
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ // Get the index of the array class we want to test.
+ const DexFile::TypeId* array_id = dex_file.FindTypeId("[Ljava/lang/Object;");
+ ASSERT_TRUE(array_id != nullptr);
+ dex::TypeIndex array_idx = dex_file.GetIndexForTypeId(*array_id);
+ // Check that the array class wasn't resolved yet.
+ EXPECT_OBJ_PTR_EQ(
+ class_linker_->LookupResolvedType(dex_file, array_idx, dex_cache.Get(), class_loader.Get()),
+ ObjPtr<mirror::Class>(nullptr));
+ // Resolve the array class we want to test.
+ ObjPtr<mirror::Class> array_klass
+ = class_linker_->FindClass(soa.Self(), "[Ljava/lang/Object;", class_loader);
+ ASSERT_OBJ_PTR_NE(array_klass, ObjPtr<mirror::Class>(nullptr));
+ // Test that LookupResolvedType() finds the array class.
+ EXPECT_OBJ_PTR_EQ(
+ class_linker_->LookupResolvedType(dex_file, array_idx, dex_cache.Get(), class_loader.Get()),
+ array_klass);
+ // Zero out the resolved type and make sure LookupResolvedType() still finds it.
+ dex_cache->SetResolvedType(array_idx, nullptr);
+ EXPECT_TRUE(dex_cache->GetResolvedType(array_idx) == nullptr);
+ EXPECT_OBJ_PTR_EQ(
+ class_linker_->LookupResolvedType(dex_file, array_idx, dex_cache.Get(), class_loader.Get()),
+ array_klass);
+}
+
TEST_F(ClassLinkerTest, LibCore) {
ScopedObjectAccess soa(Thread::Current());
ASSERT_TRUE(java_lang_dex_file_ != nullptr);
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index 0f985c6..ff846a7 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -129,6 +129,19 @@
classes_.back().InsertWithHash(TableSlot(klass, hash), hash);
}
+void ClassTable::CopyWithoutLocks(const ClassTable& source_table) {
+ if (kIsDebugBuild) {
+ for (ClassSet& class_set : classes_) {
+ CHECK(class_set.Empty());
+ }
+ }
+ for (const ClassSet& class_set : source_table.classes_) {
+ for (const TableSlot& slot : class_set) {
+ classes_.back().Insert(slot);
+ }
+ }
+}
+
void ClassTable::InsertWithoutLocks(ObjPtr<mirror::Class> klass) {
const uint32_t hash = TableSlot::HashDescriptor(klass);
classes_.back().InsertWithHash(TableSlot(klass, hash), hash);
diff --git a/runtime/class_table.h b/runtime/class_table.h
index f27d809..c8ec28e 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -240,6 +240,7 @@
}
private:
+ void CopyWithoutLocks(const ClassTable& source_table) NO_THREAD_SAFETY_ANALYSIS;
void InsertWithoutLocks(ObjPtr<mirror::Class> klass) NO_THREAD_SAFETY_ANALYSIS;
size_t CountDefiningLoaderClasses(ObjPtr<mirror::ClassLoader> defining_loader,
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 34d8284..268cca0 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -360,7 +360,7 @@
// If we are the zygote, the non moving space becomes the zygote space when we run
// PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
// rename the mem map later.
- const char* space_name = is_zygote ? kZygoteSpaceName: kNonMovingSpaceName;
+ const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
// Reserve the non moving mem map before the other two since it needs to be at a specific
// address.
non_moving_space_mem_map.reset(
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index b15544d..38b68cb 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -42,7 +42,7 @@
class Heap;
-// Used to process java.lang.References concurrently or paused.
+// Used to process java.lang.ref.Reference instances concurrently or paused.
class ReferenceProcessor {
public:
explicit ReferenceProcessor();
diff --git a/runtime/gc/scoped_gc_critical_section.h b/runtime/gc/scoped_gc_critical_section.h
index ec93bca..1271ff7 100644
--- a/runtime/gc/scoped_gc_critical_section.h
+++ b/runtime/gc/scoped_gc_critical_section.h
@@ -27,8 +27,8 @@
namespace gc {
-// Wait until the GC is finished and then prevent GC from starting until the destructor. Used
-// to prevent deadlocks in places where we call ClassLinker::VisitClass with all th threads
+// Wait until the GC is finished and then prevent the GC from starting until the destructor. Used
+// to prevent deadlocks in places where we call ClassLinker::VisitClass with all the threads
// suspended.
class ScopedGCCriticalSection {
public:
diff --git a/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S b/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
index 6cec363..28e831a 100644
--- a/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
+++ b/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
@@ -4,7 +4,7 @@
GET_VREG w2, w2 // w2<- fp[B], the object pointer
ubfx w0, wINST, #8, #4 // w0<- A
cbz w2, common_errNullObject // object was null
- GET_VREG_WIDE x0, w0 // x0-< fp[A]
+ GET_VREG_WIDE x0, w0 // x0<- fp[A]
FETCH_ADVANCE_INST 2 // advance rPC, load wINST
str x0, [x2, x3] // obj.field<- x0
GET_INST_OPCODE ip // extract opcode from wINST
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index 681790d..7d442c0 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -6593,7 +6593,7 @@
GET_VREG w2, w2 // w2<- fp[B], the object pointer
ubfx w0, wINST, #8, #4 // w0<- A
cbz w2, common_errNullObject // object was null
- GET_VREG_WIDE x0, w0 // x0-< fp[A]
+ GET_VREG_WIDE x0, w0 // x0<- fp[A]
FETCH_ADVANCE_INST 2 // advance rPC, load wINST
str x0, [x2, x3] // obj.field<- x0
GET_INST_OPCODE ip // extract opcode from wINST
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 7dd3d3d..feb6e08 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1443,7 +1443,7 @@
ObjPtr<mirror::Object> java_method_obj = shadow_frame->GetVRegReference(arg_offset);
ScopedLocalRef<jobject> java_method(env,
- java_method_obj == nullptr ? nullptr :env->AddLocalReference<jobject>(java_method_obj));
+ java_method_obj == nullptr ? nullptr : env->AddLocalReference<jobject>(java_method_obj));
ObjPtr<mirror::Object> java_receiver_obj = shadow_frame->GetVRegReference(arg_offset + 1);
ScopedLocalRef<jobject> java_receiver(env,
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 2bb8819..6deb03d 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -514,7 +514,7 @@
}
}
- native_pc = stack_map.GetNativePcOffset(encoding.stack_map_encoding) +
+ native_pc = stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA) +
osr_method->GetEntryPoint();
VLOG(jit) << "Jumping to "
<< method_name
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 9c09275..071b0e2 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1394,6 +1394,12 @@
}
}
+size_t MonitorList::Size() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, monitor_list_lock_);
+ return list_.size();
+}
+
class MonitorDeflateVisitor : public IsMarkedVisitor {
public:
MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
diff --git a/runtime/monitor.h b/runtime/monitor.h
index c3da563..1fa4682 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -331,6 +331,7 @@
void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
// Returns how many monitors were deflated.
size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_);
+ size_t Size() REQUIRES(!monitor_list_lock_);
typedef std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>> Monitors;
diff --git a/runtime/oat.h b/runtime/oat.h
index 953b445..29821a2 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '1', '0', '2', '\0' }; // Enabling CC
+ static constexpr uint8_t kOatVersion[] = { '1', '0', '3', '\0' }; // Native pc change
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index 9c2378d..fd84426 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -80,7 +80,7 @@
: code_info.GetStackMapForDexPc(dex_pc, encoding);
if (stack_map.IsValid()) {
return reinterpret_cast<uintptr_t>(entry_point) +
- stack_map.GetNativePcOffset(encoding.stack_map_encoding);
+ stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA);
}
if (abort_on_failure) {
ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index a731c17..976a1e7 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -22,6 +22,7 @@
"OpenjdkJvmTi.cc",
"ti_class.cc",
"ti_class_definition.cc",
+ "ti_dump.cc",
"ti_field.cc",
"ti_heap.cc",
"ti_jni.cc",
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index fcedd4e..417d104 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -49,6 +49,7 @@
#include "thread-inl.h"
#include "thread_list.h"
#include "ti_class.h"
+#include "ti_dump.h"
#include "ti_field.h"
#include "ti_heap.h"
#include "ti_jni.h"
@@ -1309,6 +1310,9 @@
PhaseUtil::SetToOnLoad();
}
PhaseUtil::Register(&gEventHandler);
+ ThreadUtil::Register(&gEventHandler);
+ ClassUtil::Register(&gEventHandler);
+ DumpUtil::Register(&gEventHandler);
runtime->GetJavaVM()->AddEnvironmentHook(GetEnvHandler);
runtime->AddSystemWeakHolder(&gObjectTagTable);
@@ -1316,6 +1320,15 @@
return true;
}
+extern "C" bool ArtPlugin_Deinitialize() {
+ PhaseUtil::Unregister();
+ ThreadUtil::Unregister();
+ ClassUtil::Unregister();
+ DumpUtil::Unregister();
+
+ return true;
+}
+
// The actual struct holding all of the entrypoints into the jvmti interface.
const jvmtiInterface_1 gJvmtiInterface = {
nullptr, // reserved1
diff --git a/runtime/openjdkjvmti/events.cc b/runtime/openjdkjvmti/events.cc
index 7182055..d3f8001 100644
--- a/runtime/openjdkjvmti/events.cc
+++ b/runtime/openjdkjvmti/events.cc
@@ -212,7 +212,7 @@
thread.get(),
object.get(),
klass.get(),
- byte_count);
+ static_cast<jlong>(byte_count));
}
}
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index abcc849..450f75a 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -31,16 +31,119 @@
#include "ti_class.h"
+#include <mutex>
+#include <unordered_set>
+
#include "art_jvmti.h"
+#include "base/macros.h"
#include "class_table-inl.h"
#include "class_linker.h"
+#include "events-inl.h"
+#include "handle.h"
+#include "jni_env_ext-inl.h"
#include "jni_internal.h"
#include "runtime.h"
+#include "runtime_callbacks.h"
+#include "ScopedLocalRef.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
+#include "thread_list.h"
namespace openjdkjvmti {
+struct ClassCallback : public art::ClassLoadCallback {
+ void ClassLoad(art::Handle<art::mirror::Class> klass) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassLoad)) {
+ art::Thread* thread = art::Thread::Current();
+ ScopedLocalRef<jclass> jklass(thread->GetJniEnv(),
+ thread->GetJniEnv()->AddLocalReference<jclass>(klass.Get()));
+ ScopedLocalRef<jclass> jthread(
+ thread->GetJniEnv(), thread->GetJniEnv()->AddLocalReference<jclass>(thread->GetPeer()));
+ {
+ art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
+ event_handler->DispatchEvent(thread,
+ ArtJvmtiEvent::kClassLoad,
+ reinterpret_cast<JNIEnv*>(thread->GetJniEnv()),
+ jthread.get(),
+ jklass.get());
+ }
+ AddTempClass(thread, jklass.get());
+ }
+ }
+
+ void ClassPrepare(art::Handle<art::mirror::Class> temp_klass ATTRIBUTE_UNUSED,
+ art::Handle<art::mirror::Class> klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassPrepare)) {
+ art::Thread* thread = art::Thread::Current();
+ ScopedLocalRef<jclass> jklass(thread->GetJniEnv(),
+ thread->GetJniEnv()->AddLocalReference<jclass>(klass.Get()));
+ ScopedLocalRef<jclass> jthread(
+ thread->GetJniEnv(), thread->GetJniEnv()->AddLocalReference<jclass>(thread->GetPeer()));
+ art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
+ event_handler->DispatchEvent(thread,
+ ArtJvmtiEvent::kClassPrepare,
+ reinterpret_cast<JNIEnv*>(thread->GetJniEnv()),
+ jthread.get(),
+ jklass.get());
+ }
+ }
+
+ void AddTempClass(art::Thread* self, jclass klass) {
+ std::unique_lock<std::mutex> mu(temp_classes_lock);
+ temp_classes.push_back(reinterpret_cast<jclass>(self->GetJniEnv()->NewGlobalRef(klass)));
+ }
+
+ void HandleTempClass(art::Handle<art::mirror::Class> temp_klass,
+ art::Handle<art::mirror::Class> klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ std::unique_lock<std::mutex> mu(temp_classes_lock);
+ if (temp_classes.empty()) {
+ return;
+ }
+
+ art::Thread* self = art::Thread::Current();
+ for (auto it = temp_classes.begin(); it != temp_classes.end(); ++it) {
+ if (temp_klass.Get() == art::ObjPtr<art::mirror::Class>::DownCast(self->DecodeJObject(*it))) {
+ temp_classes.erase(it);
+ FixupTempClass(temp_klass, klass);
+ }
+ }
+ }
+
+ void FixupTempClass(art::Handle<art::mirror::Class> temp_klass ATTRIBUTE_UNUSED,
+ art::Handle<art::mirror::Class> klass ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // TODO: Implement.
+ }
+
+ // A set of all the temp classes we have handed out. We have to fix up references to these.
+ // For simplicity, we store the temp classes as JNI global references in a vector. Normally a
+ // Prepare event will closely follow, so the vector should be small.
+ std::mutex temp_classes_lock;
+ std::vector<jclass> temp_classes;
+
+ EventHandler* event_handler = nullptr;
+};
+
+ClassCallback gClassCallback;
+
+void ClassUtil::Register(EventHandler* handler) {
+ gClassCallback.event_handler = handler;
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("Add load callback");
+ art::Runtime::Current()->GetRuntimeCallbacks()->AddClassLoadCallback(&gClassCallback);
+}
+
+void ClassUtil::Unregister() {
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("Remove thread callback");
+ art::Runtime* runtime = art::Runtime::Current();
+ runtime->GetRuntimeCallbacks()->RemoveClassLoadCallback(&gClassCallback);
+}
+
jvmtiError ClassUtil::GetClassFields(jvmtiEnv* env,
jclass jklass,
jint* field_count_ptr,
@@ -200,7 +303,9 @@
}
// TODO: Support generic signature.
- *generic_ptr = nullptr;
+ if (generic_ptr != nullptr) {
+ *generic_ptr = nullptr;
+ }
// Everything is fine, release the buffers.
sig_copy.release();
diff --git a/runtime/openjdkjvmti/ti_class.h b/runtime/openjdkjvmti/ti_class.h
index 9558894..aa2260f 100644
--- a/runtime/openjdkjvmti/ti_class.h
+++ b/runtime/openjdkjvmti/ti_class.h
@@ -37,8 +37,13 @@
namespace openjdkjvmti {
+class EventHandler;
+
class ClassUtil {
public:
+ static void Register(EventHandler* event_handler);
+ static void Unregister();
+
static jvmtiError GetClassFields(jvmtiEnv* env,
jclass klass,
jint* field_count_ptr,
diff --git a/runtime/openjdkjvmti/ti_dump.cc b/runtime/openjdkjvmti/ti_dump.cc
new file mode 100644
index 0000000..2ee5c40
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_dump.cc
@@ -0,0 +1,74 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "ti_dump.h"
+
+#include <limits>
+
+
+#include "art_jvmti.h"
+#include "base/mutex.h"
+#include "events-inl.h"
+#include "runtime_callbacks.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace openjdkjvmti {
+
+struct DumpCallback : public art::RuntimeSigQuitCallback {
+ void SigQuit() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::Thread* thread = art::Thread::Current();
+ art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
+ event_handler->DispatchEvent(nullptr, ArtJvmtiEvent::kDataDumpRequest);
+ }
+
+ EventHandler* event_handler = nullptr;
+};
+
+static DumpCallback gDumpCallback;
+
+void DumpUtil::Register(EventHandler* handler) {
+ gDumpCallback.event_handler = handler;
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("Add sigquit callback");
+ art::Runtime::Current()->GetRuntimeCallbacks()->AddRuntimeSigQuitCallback(&gDumpCallback);
+}
+
+void DumpUtil::Unregister() {
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("Remove sigquit callback");
+ art::Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimeSigQuitCallback(&gDumpCallback);
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_dump.h b/runtime/openjdkjvmti/ti_dump.h
new file mode 100644
index 0000000..67cb239
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_dump.h
@@ -0,0 +1,50 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_DUMP_H_
+#define ART_RUNTIME_OPENJDKJVMTI_TI_DUMP_H_
+
+#include "jni.h"
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+class EventHandler;
+
+class DumpUtil {
+ public:
+ static void Register(EventHandler* event_handler);
+ static void Unregister();
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_TI_DUMP_H_
diff --git a/runtime/openjdkjvmti/ti_phase.cc b/runtime/openjdkjvmti/ti_phase.cc
index 85d6b72..154406a 100644
--- a/runtime/openjdkjvmti/ti_phase.cc
+++ b/runtime/openjdkjvmti/ti_phase.cc
@@ -55,19 +55,23 @@
return soa.AddLocalReference<jthread>(soa.Self()->GetPeer());
}
- void NextRuntimePhase(RuntimePhase phase) OVERRIDE {
+ void NextRuntimePhase(RuntimePhase phase) REQUIRES_SHARED(art::Locks::mutator_lock_) OVERRIDE {
// TODO: Events.
switch (phase) {
case RuntimePhase::kInitialAgents:
PhaseUtil::current_phase_ = JVMTI_PHASE_PRIMORDIAL;
break;
case RuntimePhase::kStart:
- event_handler->DispatchEvent(nullptr, ArtJvmtiEvent::kVmStart, GetJniEnv());
- PhaseUtil::current_phase_ = JVMTI_PHASE_START;
+ {
+ art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative);
+ event_handler->DispatchEvent(nullptr, ArtJvmtiEvent::kVmStart, GetJniEnv());
+ PhaseUtil::current_phase_ = JVMTI_PHASE_START;
+ }
break;
case RuntimePhase::kInit:
{
ScopedLocalRef<jthread> thread(GetJniEnv(), GetCurrentJThread());
+ art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative);
event_handler->DispatchEvent(nullptr,
ArtJvmtiEvent::kVmInit,
GetJniEnv(),
@@ -76,8 +80,11 @@
}
break;
case RuntimePhase::kDeath:
- event_handler->DispatchEvent(nullptr, ArtJvmtiEvent::kVmDeath, GetJniEnv());
- PhaseUtil::current_phase_ = JVMTI_PHASE_DEAD;
+ {
+ art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative);
+ event_handler->DispatchEvent(nullptr, ArtJvmtiEvent::kVmDeath, GetJniEnv());
+ PhaseUtil::current_phase_ = JVMTI_PHASE_DEAD;
+ }
// TODO: Block events now.
break;
}
@@ -125,5 +132,11 @@
art::Runtime::Current()->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&gPhaseCallback);
}
+void PhaseUtil::Unregister() {
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("Remove phase callback");
+ art::Runtime::Current()->GetRuntimeCallbacks()->RemoveRuntimePhaseCallback(&gPhaseCallback);
+}
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_phase.h b/runtime/openjdkjvmti/ti_phase.h
index 054652a..bd15fa6 100644
--- a/runtime/openjdkjvmti/ti_phase.h
+++ b/runtime/openjdkjvmti/ti_phase.h
@@ -44,6 +44,7 @@
static jvmtiError GetPhase(jvmtiEnv* env, jvmtiPhase* phase_ptr);
static void Register(EventHandler* event_handler);
+ static void Unregister();
// Move the phase from unitialized to LOAD.
static void SetToOnLoad();
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
index 970cc24..9f81d6b 100644
--- a/runtime/openjdkjvmti/ti_thread.cc
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -31,10 +31,12 @@
#include "ti_thread.h"
+#include "android-base/strings.h"
#include "art_field.h"
#include "art_jvmti.h"
#include "base/logging.h"
#include "base/mutex.h"
+#include "events-inl.h"
#include "gc/system_weak.h"
#include "gc_root-inl.h"
#include "jni_internal.h"
@@ -43,6 +45,8 @@
#include "mirror/string.h"
#include "obj_ptr.h"
#include "runtime.h"
+#include "runtime_callbacks.h"
+#include "ScopedLocalRef.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
#include "thread_list.h"
@@ -50,6 +54,77 @@
namespace openjdkjvmti {
+struct ThreadCallback : public art::ThreadLifecycleCallback, public art::RuntimePhaseCallback {
+ jthread GetThreadObject(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (self->GetPeer() == nullptr) {
+ return nullptr;
+ }
+ return self->GetJniEnv()->AddLocalReference<jthread>(self->GetPeer());
+ }
+ void Post(art::Thread* self, ArtJvmtiEvent type) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK_EQ(self, art::Thread::Current());
+ ScopedLocalRef<jthread> thread(self->GetJniEnv(), GetThreadObject(self));
+ art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
+ event_handler->DispatchEvent(self, type, self->GetJniEnv(), thread.get());
+ }
+
+ void ThreadStart(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (!started) {
+ // Runtime isn't started. We only expect at most the signal handler or JIT threads to be
+ // started here.
+ if (art::kIsDebugBuild) {
+ std::string name;
+ self->GetThreadName(name);
+ if (name != "Signal Catcher" && !android::base::StartsWith(name, "Jit thread pool")) {
+ LOG(FATAL) << "Unexpected thread before start: " << name;
+ }
+ }
+ return;
+ }
+ Post(self, ArtJvmtiEvent::kThreadStart);
+ }
+
+ void ThreadDeath(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ Post(self, ArtJvmtiEvent::kThreadEnd);
+ }
+
+ void NextRuntimePhase(RuntimePhase phase) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (phase == RuntimePhase::kInit) {
+ // We moved to VMInit. Report the main thread as started (it was attached early, and must
+ // not be reported until Init.
+ started = true;
+ Post(art::Thread::Current(), ArtJvmtiEvent::kThreadStart);
+ }
+ }
+
+ EventHandler* event_handler = nullptr;
+ bool started = false;
+};
+
+ThreadCallback gThreadCallback;
+
+void ThreadUtil::Register(EventHandler* handler) {
+ art::Runtime* runtime = art::Runtime::Current();
+
+ gThreadCallback.started = runtime->IsStarted();
+ gThreadCallback.event_handler = handler;
+
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("Add thread callback");
+ runtime->GetRuntimeCallbacks()->AddThreadLifecycleCallback(&gThreadCallback);
+ runtime->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&gThreadCallback);
+}
+
+void ThreadUtil::Unregister() {
+ art::ScopedThreadStateChange stsc(art::Thread::Current(),
+ art::ThreadState::kWaitingForDebuggerToAttach);
+ art::ScopedSuspendAll ssa("Remove thread callback");
+ art::Runtime* runtime = art::Runtime::Current();
+ runtime->GetRuntimeCallbacks()->RemoveThreadLifecycleCallback(&gThreadCallback);
+ runtime->GetRuntimeCallbacks()->RemoveRuntimePhaseCallback(&gThreadCallback);
+}
+
jvmtiError ThreadUtil::GetCurrentThread(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread* thread_ptr) {
art::Thread* self = art::Thread::Current();
diff --git a/runtime/openjdkjvmti/ti_thread.h b/runtime/openjdkjvmti/ti_thread.h
index 5aaec58..f6f93ee 100644
--- a/runtime/openjdkjvmti/ti_thread.h
+++ b/runtime/openjdkjvmti/ti_thread.h
@@ -37,8 +37,13 @@
namespace openjdkjvmti {
+class EventHandler;
+
class ThreadUtil {
public:
+ static void Register(EventHandler* event_handler);
+ static void Unregister();
+
static jvmtiError GetAllThreads(jvmtiEnv* env, jint* threads_count_ptr, jthread** threads_ptr);
static jvmtiError GetCurrentThread(jvmtiEnv* env, jthread* thread_ptr);
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index a72159b..d1ad77c 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -302,6 +302,9 @@
.IntoKey(M::Plugins)
.Define("-Xfully-deoptable")
.IntoKey(M::FullyDeoptable)
+ .Define("-XX:ThreadSuspendTimeout=_") // in ms
+ .WithType<MillisecondsToNanoseconds>() // store as ns
+ .IntoKey(M::ThreadSuspendTimeout)
.Ignore({
"-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
@@ -724,6 +727,7 @@
UsageMessage(stream, " -XX:MaxSpinsBeforeThinLockInflation=integervalue\n");
UsageMessage(stream, " -XX:LongPauseLogThreshold=integervalue\n");
UsageMessage(stream, " -XX:LongGCLogThreshold=integervalue\n");
+ UsageMessage(stream, " -XX:ThreadSuspendTimeout=integervalue\n");
UsageMessage(stream, " -XX:DumpGCPerformanceOnShutdown\n");
UsageMessage(stream, " -XX:DumpJITInfoOnShutdown\n");
UsageMessage(stream, " -XX:IgnoreMaxFootprint\n");
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 4936a2f..06cd7ff 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -735,6 +735,13 @@
GetInstructionSetString(kRuntimeISA));
}
+ // Send the initialized phase event. Send it before starting daemons, as otherwise
+ // sending thread events becomes complicated.
+ {
+ ScopedObjectAccess soa(self);
+ callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kInit);
+ }
+
StartDaemonThreads();
{
@@ -756,12 +763,6 @@
0);
}
- // Send the initialized phase event.
- {
- ScopedObjectAccess soa(self);
- callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kInit);
- }
-
return true;
}
@@ -1045,7 +1046,7 @@
monitor_list_ = new MonitorList;
monitor_pool_ = MonitorPool::Create();
- thread_list_ = new ThreadList;
+ thread_list_ = new ThreadList(runtime_options.GetOrDefault(Opt::ThreadSuspendTimeout));
intern_table_ = new InternTable;
verify_ = runtime_options.GetOrDefault(Opt::Verify);
diff --git a/runtime/runtime_options.cc b/runtime/runtime_options.cc
index e75481c..aa14719 100644
--- a/runtime/runtime_options.cc
+++ b/runtime/runtime_options.cc
@@ -21,6 +21,7 @@
#include "gc/heap.h"
#include "monitor.h"
#include "runtime.h"
+#include "thread_list.h"
#include "trace.h"
#include "utils.h"
#include "debugger.h"
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index ecabf9a..749a36e 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -60,6 +60,8 @@
LongPauseLogThreshold, gc::Heap::kDefaultLongPauseLogThreshold)
RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
LongGCLogThreshold, gc::Heap::kDefaultLongGCLogThreshold)
+RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
+ ThreadSuspendTimeout, ThreadList::kDefaultThreadSuspendTimeout)
RUNTIME_OPTIONS_KEY (Unit, DumpGCPerformanceOnShutdown)
RUNTIME_OPTIONS_KEY (Unit, DumpJITInfoOnShutdown)
RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 9ebf9a7..690b069 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -116,7 +116,8 @@
void CodeInfo::Dump(VariableIndentationOutputStream* vios,
uint32_t code_offset,
uint16_t number_of_dex_registers,
- bool dump_stack_maps) const {
+ bool dump_stack_maps,
+ InstructionSet instruction_set) const {
CodeInfoEncoding encoding = ExtractEncoding();
size_t number_of_stack_maps = GetNumberOfStackMaps(encoding);
vios->Stream()
@@ -139,6 +140,7 @@
encoding,
code_offset,
number_of_dex_registers,
+ instruction_set,
" " + std::to_string(i));
}
}
@@ -188,14 +190,17 @@
const CodeInfoEncoding& encoding,
uint32_t code_offset,
uint16_t number_of_dex_registers,
+ InstructionSet instruction_set,
const std::string& header_suffix) const {
StackMapEncoding stack_map_encoding = encoding.stack_map_encoding;
+ const uint32_t pc_offset = GetNativePcOffset(stack_map_encoding, instruction_set);
vios->Stream()
<< "StackMap" << header_suffix
<< std::hex
- << " [native_pc=0x" << code_offset + GetNativePcOffset(stack_map_encoding) << "]"
+ << " [native_pc=0x" << code_offset + pc_offset << "]"
+ << " [entry_size=0x" << encoding.stack_map_size_in_bytes << "]"
<< " (dex_pc=0x" << GetDexPc(stack_map_encoding)
- << ", native_pc_offset=0x" << GetNativePcOffset(stack_map_encoding)
+ << ", native_pc_offset=0x" << pc_offset
<< ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(stack_map_encoding)
<< ", inline_info_offset=0x" << GetInlineDescriptorOffset(stack_map_encoding)
<< ", register_mask=0x" << GetRegisterMask(stack_map_encoding)
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 13886f2..cd9a3f0 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_STACK_MAP_H_
#define ART_RUNTIME_STACK_MAP_H_
+#include "arch/code_offset.h"
#include "base/bit_vector.h"
#include "base/bit_utils.h"
#include "dex_file.h"
@@ -805,12 +806,16 @@
encoding.GetDexPcEncoding().Store(region_, dex_pc);
}
- ALWAYS_INLINE uint32_t GetNativePcOffset(const StackMapEncoding& encoding) const {
- return encoding.GetNativePcEncoding().Load(region_);
+ ALWAYS_INLINE uint32_t GetNativePcOffset(const StackMapEncoding& encoding,
+ InstructionSet instruction_set) const {
+ CodeOffset offset(
+ CodeOffset::FromCompressedOffset(encoding.GetNativePcEncoding().Load(region_)));
+ return offset.Uint32Value(instruction_set);
}
- ALWAYS_INLINE void SetNativePcOffset(const StackMapEncoding& encoding, uint32_t native_pc_offset) {
- encoding.GetNativePcEncoding().Store(region_, native_pc_offset);
+ ALWAYS_INLINE void SetNativePcCodeOffset(const StackMapEncoding& encoding,
+ CodeOffset native_pc_offset) {
+ encoding.GetNativePcEncoding().Store(region_, native_pc_offset.CompressedValue());
}
ALWAYS_INLINE uint32_t GetDexRegisterMapOffset(const StackMapEncoding& encoding) const {
@@ -866,6 +871,7 @@
const CodeInfoEncoding& encoding,
uint32_t code_offset,
uint16_t number_of_dex_registers,
+ InstructionSet instruction_set,
const std::string& header_suffix = "") const;
// Special (invalid) offset for the DexRegisterMapOffset field meaning
@@ -1176,6 +1182,17 @@
}
}
+ size_t GetDexRegisterMapsSize(const CodeInfoEncoding& encoding,
+ uint32_t number_of_dex_registers) const {
+ size_t total = 0;
+ for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
+ StackMap stack_map = GetStackMapAt(i, encoding);
+ DexRegisterMap map(GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers));
+ total += map.Size();
+ }
+ return total;
+ }
+
// Return the `DexRegisterMap` pointed by `inline_info` at depth `depth`.
DexRegisterMap GetDexRegisterMapAtDepth(uint8_t depth,
InlineInfo inline_info,
@@ -1234,15 +1251,16 @@
if (stack_map.GetDexPc(stack_map_encoding) == dex_pc) {
StackMap other = GetStackMapAt(i + 1, encoding);
if (other.GetDexPc(stack_map_encoding) == dex_pc &&
- other.GetNativePcOffset(stack_map_encoding) ==
- stack_map.GetNativePcOffset(stack_map_encoding)) {
+ other.GetNativePcOffset(stack_map_encoding, kRuntimeISA) ==
+ stack_map.GetNativePcOffset(stack_map_encoding, kRuntimeISA)) {
DCHECK_EQ(other.GetDexRegisterMapOffset(stack_map_encoding),
stack_map.GetDexRegisterMapOffset(stack_map_encoding));
DCHECK(!stack_map.HasInlineInfo(stack_map_encoding));
if (i < e - 2) {
// Make sure there are not three identical stack maps following each other.
- DCHECK_NE(stack_map.GetNativePcOffset(stack_map_encoding),
- GetStackMapAt(i + 2, encoding).GetNativePcOffset(stack_map_encoding));
+ DCHECK_NE(
+ stack_map.GetNativePcOffset(stack_map_encoding, kRuntimeISA),
+ GetStackMapAt(i + 2, encoding).GetNativePcOffset(stack_map_encoding, kRuntimeISA));
}
return stack_map;
}
@@ -1258,7 +1276,8 @@
// we could do binary search.
for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
StackMap stack_map = GetStackMapAt(i, encoding);
- if (stack_map.GetNativePcOffset(encoding.stack_map_encoding) == native_pc_offset) {
+ if (stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA) ==
+ native_pc_offset) {
return stack_map;
}
}
@@ -1273,7 +1292,8 @@
void Dump(VariableIndentationOutputStream* vios,
uint32_t code_offset,
uint16_t number_of_dex_registers,
- bool dump_stack_maps) const;
+ bool dump_stack_maps,
+ InstructionSet instruction_set) const;
// Check that the code info has valid stack map and abort if it does not.
void AssertValidStackMap(const CodeInfoEncoding& encoding) const {
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index c5c7e2c..01c940e 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -57,7 +57,6 @@
using android::base::StringPrintf;
static constexpr uint64_t kLongThreadSuspendThreshold = MsToNs(5);
-static constexpr uint64_t kThreadSuspendTimeoutMs = 30 * 1000; // 30s.
// Use 0 since we want to yield to prevent blocking for an unpredictable amount of time.
static constexpr useconds_t kThreadSuspendInitialSleepUs = 0;
static constexpr useconds_t kThreadSuspendMaxYieldUs = 3000;
@@ -68,12 +67,13 @@
// Turned off again. b/29248079
static constexpr bool kDumpUnattachedThreadNativeStackForSigQuit = false;
-ThreadList::ThreadList()
+ThreadList::ThreadList(uint64_t thread_suspend_timeout_ns)
: suspend_all_count_(0),
debug_suspend_all_count_(0),
unregistering_count_(0),
suspend_all_historam_("suspend all histogram", 16, 64),
long_suspend_(false),
+ thread_suspend_timeout_ns_(thread_suspend_timeout_ns),
empty_checkpoint_barrier_(new Barrier(0)) {
CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
}
@@ -554,12 +554,14 @@
// Make sure this thread grabs exclusive access to the mutator lock and its protected data.
#if HAVE_TIMED_RWLOCK
while (true) {
- if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self, kThreadSuspendTimeoutMs, 0)) {
+ if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self,
+ NsToMs(thread_suspend_timeout_ns_),
+ 0)) {
break;
} else if (!long_suspend_) {
// Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this
// could result in a thread suspend timeout.
- // Timeout if we wait more than kThreadSuspendTimeoutMs seconds.
+ // Timeout if we wait more than thread_suspend_timeout_ns_ nanoseconds.
UnsafeLogFatalForThreadSuspendAllTimeout();
}
}
@@ -653,7 +655,7 @@
// is done with a timeout so that we can detect problems.
#if ART_USE_FUTEXES
timespec wait_timeout;
- InitTimeSpec(false, CLOCK_MONOTONIC, kIsDebugBuild ? 50000 : 10000, 0, &wait_timeout);
+ InitTimeSpec(false, CLOCK_MONOTONIC, NsToMs(thread_suspend_timeout_ns_), 0, &wait_timeout);
#endif
const uint64_t start_time = NanoTime();
while (true) {
@@ -863,7 +865,7 @@
return thread;
}
const uint64_t total_delay = NanoTime() - start_time;
- if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) {
+ if (total_delay >= thread_suspend_timeout_ns_) {
ThreadSuspendByPeerWarning(self,
::android::base::FATAL,
"Thread suspension timed out",
@@ -969,7 +971,7 @@
return thread;
}
const uint64_t total_delay = NanoTime() - start_time;
- if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) {
+ if (total_delay >= thread_suspend_timeout_ns_) {
ThreadSuspendByThreadIdWarning(::android::base::WARNING,
"Thread suspension timed out",
thread_id);
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 658db00..b60fca1 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -20,6 +20,7 @@
#include "barrier.h"
#include "base/histogram.h"
#include "base/mutex.h"
+#include "base/time_utils.h"
#include "base/value_object.h"
#include "gc_root.h"
#include "jni.h"
@@ -41,11 +42,12 @@
class ThreadList {
public:
- static const uint32_t kMaxThreadId = 0xFFFF;
- static const uint32_t kInvalidThreadId = 0;
- static const uint32_t kMainThreadId = 1;
+ static constexpr uint32_t kMaxThreadId = 0xFFFF;
+ static constexpr uint32_t kInvalidThreadId = 0;
+ static constexpr uint32_t kMainThreadId = 1;
+ static constexpr uint64_t kDefaultThreadSuspendTimeout = MsToNs(kIsDebugBuild ? 50000 : 10000);
- explicit ThreadList();
+ explicit ThreadList(uint64_t thread_suspend_timeout_ns);
~ThreadList();
void DumpForSigQuit(std::ostream& os)
@@ -219,6 +221,9 @@
// Whether or not the current thread suspension is long.
bool long_suspend_;
+ // Thread suspension timeout in nanoseconds.
+ const uint64_t thread_suspend_timeout_ns_;
+
std::unique_ptr<Barrier> empty_checkpoint_barrier_;
friend class Thread;
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 9d9360e..2add955 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -54,6 +54,7 @@
static_cast<size_t>(kTraceMethodActionMask));
static constexpr uint8_t kOpNewMethod = 1U;
static constexpr uint8_t kOpNewThread = 2U;
+static constexpr uint8_t kOpTraceSummary = 3U;
class BuildStackTraceVisitor : public StackVisitor {
public:
@@ -700,20 +701,19 @@
std::string header(os.str());
if (trace_output_mode_ == TraceOutputMode::kStreaming) {
- File file(streaming_file_name_ + ".sec", O_CREAT | O_WRONLY, true);
- if (!file.IsOpened()) {
- LOG(WARNING) << "Could not open secondary trace file!";
- return;
- }
- if (!file.WriteFully(header.c_str(), header.length())) {
- file.Erase();
- std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno)));
- PLOG(ERROR) << detail;
- ThrowRuntimeException("%s", detail.c_str());
- }
- if (file.FlushCloseOrErase() != 0) {
- PLOG(ERROR) << "Could not write secondary file";
- }
+ MutexLock mu(Thread::Current(), *streaming_lock_); // To serialize writing.
+ // Write a special token to mark the end of trace records and the start of
+ // trace summary.
+ uint8_t buf[7];
+ Append2LE(buf, 0);
+ buf[2] = kOpTraceSummary;
+ Append4LE(buf + 3, static_cast<uint32_t>(header.length()));
+ WriteToBuf(buf, sizeof(buf));
+ // Write the trace summary. The summary is identical to the file header when
+ // the output mode is not streaming (except for methods).
+ WriteToBuf(reinterpret_cast<const uint8_t*>(header.c_str()), header.length());
+ // Flush the buffer, which may include some trace records before the summary.
+ FlushBuf();
} else {
if (trace_file_.get() == nullptr) {
iovec iov[2];
@@ -894,6 +894,14 @@
memcpy(buf_.get() + old_offset, src, src_size);
}
+void Trace::FlushBuf() {
+ int32_t offset = cur_offset_.LoadRelaxed();
+ if (!trace_file_->WriteFully(buf_.get(), offset)) {
+ PLOG(WARNING) << "Failed flush the remaining data in streaming.";
+ }
+ cur_offset_.StoreRelease(0);
+}
+
void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method,
instrumentation::Instrumentation::InstrumentationEvent event,
uint32_t thread_clock_diff, uint32_t wall_clock_diff) {
diff --git a/runtime/trace.h b/runtime/trace.h
index 824b150..485e9a1 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -202,7 +202,8 @@
// This causes the negative annotations to incorrectly have a false positive. TODO: Figure out
// how to annotate this.
NO_THREAD_SAFETY_ANALYSIS;
- void FinishTracing() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+ void FinishTracing()
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff);
@@ -229,6 +230,9 @@
// annotation.
void WriteToBuf(const uint8_t* src, size_t src_size)
REQUIRES(streaming_lock_);
+ // Flush the main buffer to file. Used for streaming. Exposed here for lock annotation.
+ void FlushBuf()
+ REQUIRES(streaming_lock_);
uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!*unique_methods_lock_);
uint32_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action)
diff --git a/test/596-monitor-inflation/expected.txt b/test/596-monitor-inflation/expected.txt
new file mode 100644
index 0000000..2add696
--- /dev/null
+++ b/test/596-monitor-inflation/expected.txt
@@ -0,0 +1,6 @@
+JNI_OnLoad called
+Monitor list grew by at least 4000 monitors
+Monitor list shrank correctly
+Finished first check
+Finished second check
+Total checks: 10000
diff --git a/test/596-monitor-inflation/info.txt b/test/596-monitor-inflation/info.txt
new file mode 100644
index 0000000..81dedb6
--- /dev/null
+++ b/test/596-monitor-inflation/info.txt
@@ -0,0 +1,5 @@
+A simple test that forces many monitors to be inflated, while checking
+that hashcodes are consistently maintained.
+
+This allocates more monitors and hence may exercise the monitor pool
+differently, and with more context, than the monitor_pool_test gtest.
diff --git a/test/596-monitor-inflation/monitor_inflation.cc b/test/596-monitor-inflation/monitor_inflation.cc
new file mode 100644
index 0000000..fb4275b
--- /dev/null
+++ b/test/596-monitor-inflation/monitor_inflation.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/heap.h"
+#include "jni.h"
+#include "monitor.h"
+#include "runtime.h"
+#include "thread-inl.h"
+
+namespace art {
+namespace {
+
+extern "C" JNIEXPORT void JNICALL Java_Main_trim(JNIEnv*, jclass) {
+ Runtime::Current()->GetHeap()->Trim(Thread::Current());
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_monitorListSize(JNIEnv*, jclass) {
+ return Runtime::Current()->GetMonitorList()->Size();
+}
+
+} // namespace
+} // namespace art
diff --git a/test/596-monitor-inflation/src/Main.java b/test/596-monitor-inflation/src/Main.java
new file mode 100644
index 0000000..d97c766
--- /dev/null
+++ b/test/596-monitor-inflation/src/Main.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.util.IdentityHashMap;
+import dalvik.system.VMRuntime;
+
+public class Main {
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ int initialSize = monitorListSize();
+ IdentityHashMap<Object, Integer> all = new IdentityHashMap();
+ for (int i = 0; i < 5000; ++i) {
+ Object obj = new Object();
+ synchronized(obj) {
+ // Should force inflation.
+ all.put(obj, obj.hashCode());
+ }
+ }
+ // Since monitor deflation is delayed significantly, we believe that even with an intervening
+ // GC, monitors should remain inflated. We allow some slop for unrelated concurrent runtime
+ // actions.
+ int inflatedSize = monitorListSize();
+ if (inflatedSize >= initialSize + 4000) {
+ System.out.println("Monitor list grew by at least 4000 monitors");
+ } else {
+ System.out.println("Monitor list did not grow as expected");
+ }
+ // Encourage monitor deflation.
+ // trim() (Heap::Trim()) deflates only in JANK_IMPERCEPTIBLE state.
+ // Some of this mirrors code in ActivityThread.java.
+ final int DALVIK_PROCESS_STATE_JANK_PERCEPTIBLE = 0;
+ final int DALVIK_PROCESS_STATE_JANK_IMPERCEPTIBLE = 1;
+ VMRuntime.getRuntime().updateProcessState(DALVIK_PROCESS_STATE_JANK_IMPERCEPTIBLE);
+ System.gc();
+ System.runFinalization();
+ trim();
+ VMRuntime.getRuntime().updateProcessState(DALVIK_PROCESS_STATE_JANK_PERCEPTIBLE);
+ int finalSize = monitorListSize();
+ if (finalSize > initialSize + 1000) {
+ System.out.println("Monitor list failed to shrink properly");
+ } else {
+ System.out.println("Monitor list shrank correctly");
+ }
+ int j = 0;
+ for (Object obj: all.keySet()) {
+ ++j;
+ if (obj.hashCode() != all.get(obj)) {
+ throw new AssertionError("Failed hashcode test!");
+ }
+ }
+ System.out.println("Finished first check");
+ for (Object obj: all.keySet()) {
+ ++j;
+ synchronized(obj) {
+ if (obj.hashCode() != all.get(obj)) {
+ throw new AssertionError("Failed hashcode test!");
+ }
+ }
+ }
+ System.out.println("Finished second check");
+ System.out.println("Total checks: " + j);
+ }
+
+ private static native void trim();
+
+ private static native int monitorListSize();
+}
diff --git a/test/901-hello-ti-agent/src/Main.java b/test/901-hello-ti-agent/src/Main.java
index faf2dc2..4d62ed3 100644
--- a/test/901-hello-ti-agent/src/Main.java
+++ b/test/901-hello-ti-agent/src/Main.java
@@ -16,8 +16,6 @@
public class Main {
public static void main(String[] args) {
- System.loadLibrary(args[1]);
-
System.out.println("Hello, world!");
if (checkLivePhase()) {
diff --git a/test/902-hello-transformation/src/Main.java b/test/902-hello-transformation/src/Main.java
index ec47119..471c82b 100644
--- a/test/902-hello-transformation/src/Main.java
+++ b/test/902-hello-transformation/src/Main.java
@@ -49,7 +49,6 @@
"AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=");
public static void main(String[] args) {
- System.loadLibrary(args[1]);
doTest(new Transform());
}
diff --git a/test/903-hello-tagging/src/Main.java b/test/903-hello-tagging/src/Main.java
index a8aedb4..2f0365a 100644
--- a/test/903-hello-tagging/src/Main.java
+++ b/test/903-hello-tagging/src/Main.java
@@ -20,7 +20,6 @@
public class Main {
public static void main(String[] args) {
- System.loadLibrary(args[1]);
doTest();
testGetTaggedObjects();
}
diff --git a/test/904-object-allocation/src/Main.java b/test/904-object-allocation/src/Main.java
index fc8a112..df59179 100644
--- a/test/904-object-allocation/src/Main.java
+++ b/test/904-object-allocation/src/Main.java
@@ -18,8 +18,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
// Use a list to ensure objects must be allocated.
ArrayList<Object> l = new ArrayList<>(100);
diff --git a/test/905-object-free/src/Main.java b/test/905-object-free/src/Main.java
index 16dec5d..e41e378 100644
--- a/test/905-object-free/src/Main.java
+++ b/test/905-object-free/src/Main.java
@@ -19,8 +19,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
diff --git a/test/906-iterate-heap/src/Main.java b/test/906-iterate-heap/src/Main.java
index 544a365..cab27be 100644
--- a/test/906-iterate-heap/src/Main.java
+++ b/test/906-iterate-heap/src/Main.java
@@ -19,8 +19,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
diff --git a/test/907-get-loaded-classes/src/Main.java b/test/907-get-loaded-classes/src/Main.java
index 468d037..370185a 100644
--- a/test/907-get-loaded-classes/src/Main.java
+++ b/test/907-get-loaded-classes/src/Main.java
@@ -20,8 +20,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
diff --git a/test/908-gc-start-finish/src/Main.java b/test/908-gc-start-finish/src/Main.java
index 2be0eea..05388c9 100644
--- a/test/908-gc-start-finish/src/Main.java
+++ b/test/908-gc-start-finish/src/Main.java
@@ -18,8 +18,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
diff --git a/test/910-methods/src/Main.java b/test/910-methods/src/Main.java
index bf25a0d..932a1ea 100644
--- a/test/910-methods/src/Main.java
+++ b/test/910-methods/src/Main.java
@@ -20,8 +20,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
index dad08c9..2687f85 100644
--- a/test/911-get-stack-trace/expected.txt
+++ b/test/911-get-stack-trace/expected.txt
@@ -22,7 +22,7 @@
bar (IIILControlData;)J 0 24
foo (IIILControlData;)I 0 19
doTest ()V 38 23
- main ([Ljava/lang/String;)V 6 21
+ main ([Ljava/lang/String;)V 3 21
---------
print (Ljava/lang/Thread;II)V 0 34
printOrWait (IILControlData;)V 6 39
@@ -42,7 +42,7 @@
bar (IIILControlData;)J 0 24
foo (IIILControlData;)I 0 19
doTest ()V 42 24
- main ([Ljava/lang/String;)V 6 21
+ main ([Ljava/lang/String;)V 3 21
---------
getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
print (Ljava/lang/Thread;II)V 0 34
@@ -57,13 +57,13 @@
baz (IIILControlData;)Ljava/lang/Object; 9 32
From bottom
---------
- main ([Ljava/lang/String;)V 6 21
+ main ([Ljava/lang/String;)V 3 21
---------
baz (IIILControlData;)Ljava/lang/Object; 9 32
bar (IIILControlData;)J 0 24
foo (IIILControlData;)I 0 19
doTest ()V 65 30
- main ([Ljava/lang/String;)V 6 21
+ main ([Ljava/lang/String;)V 3 21
---------
bar (IIILControlData;)J 0 24
foo (IIILControlData;)I 0 19
@@ -358,7 +358,7 @@
getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
printAll (I)V 0 73
doTest ()V 102 57
- main ([Ljava/lang/String;)V 30 33
+ main ([Ljava/lang/String;)V 27 33
---------
FinalizerDaemon
@@ -590,7 +590,7 @@
getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
printAll (I)V 0 73
doTest ()V 107 59
- main ([Ljava/lang/String;)V 30 33
+ main ([Ljava/lang/String;)V 27 33
########################################
@@ -659,7 +659,7 @@
getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
printList ([Ljava/lang/Thread;I)V 0 66
doTest ()V 96 52
- main ([Ljava/lang/String;)V 38 37
+ main ([Ljava/lang/String;)V 35 37
---------
Thread-14
@@ -771,7 +771,7 @@
getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
printList ([Ljava/lang/Thread;I)V 0 66
doTest ()V 101 54
- main ([Ljava/lang/String;)V 38 37
+ main ([Ljava/lang/String;)V 35 37
###################
@@ -782,7 +782,7 @@
[public static native java.lang.Object[] Frames.getFrameLocation(java.lang.Thread,int), ffffffff]
[public static void Frames.doTestSameThread(), 38]
[public static void Frames.doTest() throws java.lang.Exception, 0]
-[public static void Main.main(java.lang.String[]) throws java.lang.Exception, 2e]
+[public static void Main.main(java.lang.String[]) throws java.lang.Exception, 2b]
JVMTI_ERROR_NO_MORE_FRAMES
################################
diff --git a/test/911-get-stack-trace/src/Main.java b/test/911-get-stack-trace/src/Main.java
index b199033..96a427d 100644
--- a/test/911-get-stack-trace/src/Main.java
+++ b/test/911-get-stack-trace/src/Main.java
@@ -16,7 +16,7 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
+ bindTest911Classes();
SameThread.doTest();
@@ -42,4 +42,6 @@
System.out.println("Done");
}
+
+ private static native void bindTest911Classes();
}
diff --git a/test/911-get-stack-trace/stack_trace.cc b/test/911-get-stack-trace/stack_trace.cc
index d162e8a..68f6d8d 100644
--- a/test/911-get-stack-trace/stack_trace.cc
+++ b/test/911-get-stack-trace/stack_trace.cc
@@ -34,6 +34,14 @@
using android::base::StringPrintf;
+extern "C" JNIEXPORT void JNICALL Java_Main_bindTest911Classes(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+ BindFunctions(jvmti_env, env, "AllTraces");
+ BindFunctions(jvmti_env, env, "Frames");
+ BindFunctions(jvmti_env, env, "PrintThread");
+ BindFunctions(jvmti_env, env, "ThreadListTraces");
+}
+
static jint FindLineNumber(jint line_number_count,
jvmtiLineNumberEntry* line_number_table,
jlocation location) {
diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc
index 29eeff6..d13436e 100644
--- a/test/912-classes/classes.cc
+++ b/test/912-classes/classes.cc
@@ -20,6 +20,7 @@
#include "jni.h"
#include "openjdkjvmti/jvmti.h"
#include "ScopedLocalRef.h"
+#include "thread-inl.h"
#include "ti-agent/common_helper.h"
#include "ti-agent/common_load.h"
@@ -259,5 +260,120 @@
return int_array;
}
+static std::string GetClassName(jvmtiEnv* jenv, JNIEnv* jni_env, jclass klass) {
+ char* name;
+ jvmtiError result = jenv->GetClassSignature(klass, &name, nullptr);
+ if (result != JVMTI_ERROR_NONE) {
+ if (jni_env != nullptr) {
+ JvmtiErrorToException(jni_env, result);
+ } else {
+ printf("Failed to get class signature.\n");
+ }
+ return "";
+ }
+
+ std::string tmp(name);
+ jenv->Deallocate(reinterpret_cast<unsigned char*>(name));
+
+ return tmp;
+}
+
+static std::string GetThreadName(jvmtiEnv* jenv, JNIEnv* jni_env, jthread thread) {
+ jvmtiThreadInfo info;
+ jvmtiError result = jenv->GetThreadInfo(thread, &info);
+ if (result != JVMTI_ERROR_NONE) {
+ if (jni_env != nullptr) {
+ JvmtiErrorToException(jni_env, result);
+ } else {
+ printf("Failed to get thread name.\n");
+ }
+ return "";
+ }
+
+ std::string tmp(info.name);
+ jenv->Deallocate(reinterpret_cast<unsigned char*>(info.name));
+ jni_env->DeleteLocalRef(info.context_class_loader);
+ jni_env->DeleteLocalRef(info.thread_group);
+
+ return tmp;
+}
+
+static std::string GetThreadName(Thread* thread) {
+ std::string tmp;
+ thread->GetThreadName(tmp);
+ return tmp;
+}
+
+static void JNICALL ClassPrepareCallback(jvmtiEnv* jenv,
+ JNIEnv* jni_env,
+ jthread thread,
+ jclass klass) {
+ std::string name = GetClassName(jenv, jni_env, klass);
+ if (name == "") {
+ return;
+ }
+ std::string thread_name = GetThreadName(jenv, jni_env, thread);
+ if (thread_name == "") {
+ return;
+ }
+ std::string cur_thread_name = GetThreadName(Thread::Current());
+ printf("Prepare: %s on %s (cur=%s)\n",
+ name.c_str(),
+ thread_name.c_str(),
+ cur_thread_name.c_str());
+}
+
+static void JNICALL ClassLoadCallback(jvmtiEnv* jenv,
+ JNIEnv* jni_env,
+ jthread thread,
+ jclass klass) {
+ std::string name = GetClassName(jenv, jni_env, klass);
+ if (name == "") {
+ return;
+ }
+ std::string thread_name = GetThreadName(jenv, jni_env, thread);
+ if (thread_name == "") {
+ return;
+ }
+ printf("Load: %s on %s\n", name.c_str(), thread_name.c_str());
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_enableClassLoadEvents(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jboolean b) {
+ if (b == JNI_FALSE) {
+ jvmtiError ret = jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_CLASS_LOAD,
+ nullptr);
+ if (JvmtiErrorToException(env, ret)) {
+ return;
+ }
+ ret = jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_CLASS_PREPARE,
+ nullptr);
+ JvmtiErrorToException(env, ret);
+ return;
+ }
+
+ jvmtiEventCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
+ callbacks.ClassLoad = ClassLoadCallback;
+ callbacks.ClassPrepare = ClassPrepareCallback;
+ jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
+ if (JvmtiErrorToException(env, ret)) {
+ return;
+ }
+
+ ret = jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_CLASS_LOAD,
+ nullptr);
+ if (JvmtiErrorToException(env, ret)) {
+ return;
+ }
+ ret = jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_CLASS_PREPARE,
+ nullptr);
+ JvmtiErrorToException(env, ret);
+}
+
} // namespace Test912Classes
} // namespace art
diff --git a/test/912-classes/expected.txt b/test/912-classes/expected.txt
index f3cb261..4976553 100644
--- a/test/912-classes/expected.txt
+++ b/test/912-classes/expected.txt
@@ -61,3 +61,35 @@
[class A, class B, class java.lang.Object]
[37, 0]
+
+B, false
+Load: LB; on main
+Prepare: LB; on main (cur=main)
+B, true
+Load: LB; on main
+Prepare: LB; on main (cur=main)
+C, false
+Load: LA; on main
+Prepare: LA; on main (cur=main)
+Load: LC; on main
+Prepare: LC; on main (cur=main)
+A, false
+C, true
+Load: LA; on main
+Prepare: LA; on main (cur=main)
+Load: LC; on main
+Prepare: LC; on main (cur=main)
+A, true
+A, true
+Load: LA; on main
+Prepare: LA; on main (cur=main)
+C, true
+Load: LC; on main
+Prepare: LC; on main (cur=main)
+Load: LMain$1; on main
+Prepare: LMain$1; on main (cur=main)
+C, true
+Load: LA; on TestRunner
+Prepare: LA; on TestRunner (cur=TestRunner)
+Load: LC; on TestRunner
+Prepare: LC; on TestRunner (cur=TestRunner)
diff --git a/test/912-classes/src-ex/A.java b/test/912-classes/src-ex/A.java
index 64acb2f..2c43cfb 100644
--- a/test/912-classes/src-ex/A.java
+++ b/test/912-classes/src-ex/A.java
@@ -15,4 +15,4 @@
*/
public class A {
-}
\ No newline at end of file
+}
diff --git a/test/912-classes/src-ex/C.java b/test/912-classes/src-ex/C.java
new file mode 100644
index 0000000..97f8021
--- /dev/null
+++ b/test/912-classes/src-ex/C.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class C extends A {
+}
diff --git a/test/912-classes/src/B.java b/test/912-classes/src/B.java
index f1458c3..52ce4dd 100644
--- a/test/912-classes/src/B.java
+++ b/test/912-classes/src/B.java
@@ -15,4 +15,4 @@
*/
public class B {
-}
\ No newline at end of file
+}
diff --git a/test/912-classes/src/Main.java b/test/912-classes/src/Main.java
index cbf2392..859f80c 100644
--- a/test/912-classes/src/Main.java
+++ b/test/912-classes/src/Main.java
@@ -21,8 +21,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
@@ -84,6 +82,10 @@
System.out.println();
testClassVersion();
+
+ System.out.println();
+
+ testClassEvents();
}
private static Class<?> proxyClass = null;
@@ -210,6 +212,60 @@
System.out.println(Arrays.toString(getClassVersion(Main.class)));
}
+ private static void testClassEvents() throws Exception {
+ ClassLoader cl = Main.class.getClassLoader();
+ while (cl.getParent() != null) {
+ cl = cl.getParent();
+ }
+ final ClassLoader boot = cl;
+
+ enableClassLoadEvents(true);
+
+ ClassLoader cl1 = create(boot, DEX1, DEX2);
+ System.out.println("B, false");
+ Class.forName("B", false, cl1);
+
+ ClassLoader cl2 = create(boot, DEX1, DEX2);
+ System.out.println("B, true");
+ Class.forName("B", true, cl2);
+
+ ClassLoader cl3 = create(boot, DEX1, DEX2);
+ System.out.println("C, false");
+ Class.forName("C", false, cl3);
+ System.out.println("A, false");
+ Class.forName("A", false, cl3);
+
+ ClassLoader cl4 = create(boot, DEX1, DEX2);
+ System.out.println("C, true");
+ Class.forName("C", true, cl4);
+ System.out.println("A, true");
+ Class.forName("A", true, cl4);
+
+ ClassLoader cl5 = create(boot, DEX1, DEX2);
+ System.out.println("A, true");
+ Class.forName("A", true, cl5);
+ System.out.println("C, true");
+ Class.forName("C", true, cl5);
+
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ ClassLoader cl6 = create(boot, DEX1, DEX2);
+ System.out.println("C, true");
+ Class.forName("C", true, cl6);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+ Thread t = new Thread(r, "TestRunner");
+ t.start();
+ t.join();
+
+ enableClassLoadEvents(false);
+ }
+
private static void printClassLoaderClasses(ClassLoader cl) {
for (;;) {
if (cl == null || !cl.getClass().getName().startsWith("dalvik.system")) {
@@ -272,6 +328,8 @@
private static native int[] getClassVersion(Class<?> c);
+ private static native void enableClassLoadEvents(boolean b);
+
private static class TestForNonInit {
public static double dummy = Math.random(); // So it can't be compile-time initialized.
}
diff --git a/test/913-heaps/src/Main.java b/test/913-heaps/src/Main.java
index 564596e..5a11a5b 100644
--- a/test/913-heaps/src/Main.java
+++ b/test/913-heaps/src/Main.java
@@ -21,8 +21,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
doFollowReferencesTest();
}
diff --git a/test/914-hello-obsolescence/src/Main.java b/test/914-hello-obsolescence/src/Main.java
index 46266ef..8a14716 100644
--- a/test/914-hello-obsolescence/src/Main.java
+++ b/test/914-hello-obsolescence/src/Main.java
@@ -53,7 +53,6 @@
"AAACIAAAEQAAAKIBAAADIAAAAgAAAJECAAAAIAAAAQAAAJ8CAAAAEAAAAQAAALACAAA=");
public static void main(String[] args) {
- System.loadLibrary(args[1]);
doTest(new Transform());
}
diff --git a/test/915-obsolete-2/src/Main.java b/test/915-obsolete-2/src/Main.java
index bbeb726..0e3145c 100644
--- a/test/915-obsolete-2/src/Main.java
+++ b/test/915-obsolete-2/src/Main.java
@@ -79,7 +79,6 @@
"IAAAFwAAAD4CAAADIAAABAAAAAgEAAAAIAAAAQAAACYEAAAAEAAAAQAAADwEAAA=");
public static void main(String[] args) {
- System.loadLibrary(args[1]);
doTest(new Transform());
}
diff --git a/test/916-obsolete-jit/src/Main.java b/test/916-obsolete-jit/src/Main.java
index 1e43f7e..1b03200 100644
--- a/test/916-obsolete-jit/src/Main.java
+++ b/test/916-obsolete-jit/src/Main.java
@@ -113,7 +113,6 @@
}
public static void main(String[] args) {
- System.loadLibrary(args[1]);
doTest(new Transform(), new TestWatcher());
}
diff --git a/test/917-fields-transformation/src/Main.java b/test/917-fields-transformation/src/Main.java
index 5378bb7..632a5c8 100644
--- a/test/917-fields-transformation/src/Main.java
+++ b/test/917-fields-transformation/src/Main.java
@@ -55,7 +55,6 @@
"AAIgAAAMAAAAXAEAAAMgAAACAAAA4QEAAAAgAAABAAAA8AEAAAAQAAABAAAABAIAAA==");
public static void main(String[] args) {
- System.loadLibrary(args[1]);
doTest(new Transform("Hello", "Goodbye"),
new Transform("start", "end"));
}
diff --git a/test/918-fields/src/Main.java b/test/918-fields/src/Main.java
index 8af6e7b..3ba535b 100644
--- a/test/918-fields/src/Main.java
+++ b/test/918-fields/src/Main.java
@@ -19,8 +19,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
diff --git a/test/919-obsolete-fields/src/Main.java b/test/919-obsolete-fields/src/Main.java
index 895c7a3..1d893f1 100644
--- a/test/919-obsolete-fields/src/Main.java
+++ b/test/919-obsolete-fields/src/Main.java
@@ -116,7 +116,6 @@
}
public static void main(String[] args) {
- System.loadLibrary(args[1]);
TestWatcher w = new TestWatcher();
doTest(new Transform(w), w);
}
diff --git a/test/921-hello-failure/src/Main.java b/test/921-hello-failure/src/Main.java
index 43d6e9e..67ca1e1 100644
--- a/test/921-hello-failure/src/Main.java
+++ b/test/921-hello-failure/src/Main.java
@@ -18,7 +18,6 @@
public class Main {
public static void main(String[] args) {
- System.loadLibrary(args[1]);
NewName.doTest(new Transform());
DifferentAccess.doTest(new Transform());
NewInterface.doTest(new Transform2());
diff --git a/test/922-properties/src/Main.java b/test/922-properties/src/Main.java
index 6cec6e9..8ad742f 100644
--- a/test/922-properties/src/Main.java
+++ b/test/922-properties/src/Main.java
@@ -19,8 +19,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
diff --git a/test/923-monitors/src/Main.java b/test/923-monitors/src/Main.java
index e35ce12..ef00728 100644
--- a/test/923-monitors/src/Main.java
+++ b/test/923-monitors/src/Main.java
@@ -21,8 +21,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
diff --git a/test/924-threads/expected.txt b/test/924-threads/expected.txt
index 3b7fb24..67d20eb 100644
--- a/test/924-threads/expected.txt
+++ b/test/924-threads/expected.txt
@@ -31,3 +31,7 @@
[Thread[FinalizerDaemon,5,system], Thread[FinalizerWatchdogDaemon,5,system], Thread[HeapTaskDaemon,5,system], Thread[ReferenceQueueDaemon,5,system], Thread[Signal Catcher,5,system], Thread[main,5,main]]
JVMTI_ERROR_THREAD_NOT_ALIVE
JVMTI_ERROR_THREAD_NOT_ALIVE
+Constructed thread
+Thread(EventTestThread): start
+Thread(EventTestThread): end
+Thread joined
diff --git a/test/924-threads/src/Main.java b/test/924-threads/src/Main.java
index dec49a8..29c4aa3 100644
--- a/test/924-threads/src/Main.java
+++ b/test/924-threads/src/Main.java
@@ -25,8 +25,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
@@ -58,6 +56,8 @@
doAllThreadsTests();
doTLSTests();
+
+ doTestEvents();
}
private static class Holder {
@@ -228,6 +228,22 @@
}
}
+ private static void doTestEvents() throws Exception {
+ enableThreadEvents(true);
+
+ Thread t = new Thread("EventTestThread");
+
+ System.out.println("Constructed thread");
+ Thread.yield();
+
+ t.start();
+ t.join();
+
+ System.out.println("Thread joined");
+
+ enableThreadEvents(false);
+ }
+
private final static Comparator<Thread> THREAD_COMP = new Comparator<Thread>() {
public int compare(Thread o1, Thread o2) {
return o1.getName().compareTo(o2.getName());
@@ -295,4 +311,5 @@
private static native Thread[] getAllThreads();
private static native void setTLS(Thread t, long l);
private static native long getTLS(Thread t);
+ private static native void enableThreadEvents(boolean b);
}
diff --git a/test/924-threads/threads.cc b/test/924-threads/threads.cc
index d35eaa8..0380433 100644
--- a/test/924-threads/threads.cc
+++ b/test/924-threads/threads.cc
@@ -137,5 +137,71 @@
JvmtiErrorToException(env, result);
}
+static void JNICALL ThreadEvent(jvmtiEnv* jvmti_env,
+ JNIEnv* jni_env,
+ jthread thread,
+ bool is_start) {
+ jvmtiThreadInfo info;
+ jvmtiError result = jvmti_env->GetThreadInfo(thread, &info);
+ if (result != JVMTI_ERROR_NONE) {
+ printf("Error getting thread info");
+ return;
+ }
+ printf("Thread(%s): %s\n", info.name, is_start ? "start" : "end");
+
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(info.name));
+ jni_env->DeleteLocalRef(info.thread_group);
+ jni_env->DeleteLocalRef(info.context_class_loader);
+}
+
+static void JNICALL ThreadStart(jvmtiEnv* jvmti_env,
+ JNIEnv* jni_env,
+ jthread thread) {
+ ThreadEvent(jvmti_env, jni_env, thread, true);
+}
+
+static void JNICALL ThreadEnd(jvmtiEnv* jvmti_env,
+ JNIEnv* jni_env,
+ jthread thread) {
+ ThreadEvent(jvmti_env, jni_env, thread, false);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_enableThreadEvents(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jboolean b) {
+ if (b == JNI_FALSE) {
+ jvmtiError ret = jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_THREAD_START,
+ nullptr);
+ if (JvmtiErrorToException(env, ret)) {
+ return;
+ }
+ ret = jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_THREAD_END,
+ nullptr);
+ JvmtiErrorToException(env, ret);
+ return;
+ }
+
+ jvmtiEventCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
+ callbacks.ThreadStart = ThreadStart;
+ callbacks.ThreadEnd = ThreadEnd;
+ jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
+ if (JvmtiErrorToException(env, ret)) {
+ return;
+ }
+
+ ret = jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_THREAD_START,
+ nullptr);
+ if (JvmtiErrorToException(env, ret)) {
+ return;
+ }
+ ret = jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_THREAD_END,
+ nullptr);
+ JvmtiErrorToException(env, ret);
+}
+
} // namespace Test924Threads
} // namespace art
diff --git a/test/925-threadgroups/src/Main.java b/test/925-threadgroups/src/Main.java
index c59efe2..3d7a4ca 100644
--- a/test/925-threadgroups/src/Main.java
+++ b/test/925-threadgroups/src/Main.java
@@ -19,8 +19,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
diff --git a/test/926-multi-obsolescence/src/Main.java b/test/926-multi-obsolescence/src/Main.java
index 8a6cf84..6d9f96c 100644
--- a/test/926-multi-obsolescence/src/Main.java
+++ b/test/926-multi-obsolescence/src/Main.java
@@ -92,7 +92,6 @@
"AACUAQAAAiAAABEAAACiAQAAAyAAAAIAAACXAgAAACAAAAEAAAClAgAAABAAAAEAAAC0AgAA"));
public static void main(String[] args) {
- System.loadLibrary(args[1]);
doTest(new Transform(), new Transform2());
}
diff --git a/test/927-timers/src/Main.java b/test/927-timers/src/Main.java
index 2f5c85c..b67f66d 100644
--- a/test/927-timers/src/Main.java
+++ b/test/927-timers/src/Main.java
@@ -18,8 +18,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
diff --git a/test/928-jni-table/src/Main.java b/test/928-jni-table/src/Main.java
index b0baea1..fd61b7d 100644
--- a/test/928-jni-table/src/Main.java
+++ b/test/928-jni-table/src/Main.java
@@ -16,8 +16,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doJNITableTest();
System.out.println("Done");
diff --git a/test/929-search/src/Main.java b/test/929-search/src/Main.java
index d253e6f..bbeb081 100644
--- a/test/929-search/src/Main.java
+++ b/test/929-search/src/Main.java
@@ -18,8 +18,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
doTest();
}
diff --git a/test/930-hello-retransform/src/Main.java b/test/930-hello-retransform/src/Main.java
index 12194c3..0063c82 100644
--- a/test/930-hello-retransform/src/Main.java
+++ b/test/930-hello-retransform/src/Main.java
@@ -49,7 +49,6 @@
"AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=");
public static void main(String[] args) {
- System.loadLibrary(args[1]);
doTest(new Transform());
}
diff --git a/test/931-agent-thread/src/Main.java b/test/931-agent-thread/src/Main.java
index 6471bc8..a7639fb 100644
--- a/test/931-agent-thread/src/Main.java
+++ b/test/931-agent-thread/src/Main.java
@@ -18,8 +18,6 @@
public class Main {
public static void main(String[] args) throws Exception {
- System.loadLibrary(args[1]);
-
testAgentThread();
System.out.println("Done");
diff --git a/test/932-transform-saves/src/Main.java b/test/932-transform-saves/src/Main.java
index d98ba6d..d960322 100644
--- a/test/932-transform-saves/src/Main.java
+++ b/test/932-transform-saves/src/Main.java
@@ -79,7 +79,6 @@
"AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=");
public static void main(String[] args) {
- System.loadLibrary(args[1]);
doTest(new Transform());
}
diff --git a/test/933-misc-events/build b/test/933-misc-events/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/933-misc-events/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/933-misc-events/expected.txt b/test/933-misc-events/expected.txt
new file mode 100644
index 0000000..024c560
--- /dev/null
+++ b/test/933-misc-events/expected.txt
@@ -0,0 +1,2 @@
+Received dump request.
+Done
diff --git a/test/933-misc-events/info.txt b/test/933-misc-events/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/933-misc-events/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/933-misc-events/misc_events.cc b/test/933-misc-events/misc_events.cc
new file mode 100644
index 0000000..860d4b5
--- /dev/null
+++ b/test/933-misc-events/misc_events.cc
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+#include <signal.h>
+#include <sys/types.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test933MiscEvents {
+
+static std::atomic<bool> saw_dump_request(false);
+
+static void DumpRequestCallback(jvmtiEnv* jenv ATTRIBUTE_UNUSED) {
+ printf("Received dump request.\n");
+ saw_dump_request.store(true, std::memory_order::memory_order_relaxed);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_testSigQuit(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
+ jvmtiEventCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
+ callbacks.DataDumpRequest = DumpRequestCallback;
+ jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
+ if (JvmtiErrorToException(env, ret)) {
+ return;
+ }
+
+ ret = jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_DATA_DUMP_REQUEST,
+ nullptr);
+ if (JvmtiErrorToException(env, ret)) {
+ return;
+ }
+
+ // Send sigquit to self.
+ kill(getpid(), SIGQUIT);
+
+ // Busy-wait for request.
+ for (;;) {
+ sleep(1);
+ if (saw_dump_request.load(std::memory_order::memory_order_relaxed)) {
+ break;
+ }
+ }
+
+ ret = jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_DATA_DUMP_REQUEST, nullptr);
+ JvmtiErrorToException(env, ret);
+}
+
+} // namespace Test933MiscEvents
+} // namespace art
diff --git a/test/933-misc-events/run b/test/933-misc-events/run
new file mode 100755
index 0000000..0a8d067
--- /dev/null
+++ b/test/933-misc-events/run
@@ -0,0 +1,23 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This test checks whether dex files can be injected into parent classloaders. App images preload
+# classes, which will make the injection moot. Turn off app images to avoid the issue.
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --jvmti \
+ --no-app-image
diff --git a/test/933-misc-events/src/Main.java b/test/933-misc-events/src/Main.java
new file mode 100644
index 0000000..89801a3
--- /dev/null
+++ b/test/933-misc-events/src/Main.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ testSigQuit();
+
+ System.out.println("Done");
+ }
+
+ private static native void testSigQuit();
+}
diff --git a/test/Android.bp b/test/Android.bp
index be5bc59..89e4092 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -270,6 +270,7 @@
"928-jni-table/jni_table.cc",
"929-search/search.cc",
"931-agent-thread/agent_thread.cc",
+ "933-misc-events/misc_events.cc",
],
shared_libs: [
"libbase",
@@ -326,6 +327,7 @@
"570-checker-osr/osr.cc",
"595-profile-saving/profile-saving.cc",
"596-app-images/app_images.cc",
+ "596-monitor-inflation/monitor_inflation.cc",
"597-deopt-new-string/deopt.cc",
"626-const-class-linking/clear_dex_cache_types.cc",
],
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 639996e..814f968 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -277,42 +277,6 @@
147-stripped-dex-fallback \
569-checker-pattern-replacement
-# These 9** tests are not supported in current form due to linker
-# restrictions. See b/31681198
-TEST_ART_BROKEN_TARGET_TESTS += \
- 901-hello-ti-agent \
- 902-hello-transformation \
- 903-hello-tagging \
- 904-object-allocation \
- 905-object-free \
- 906-iterate-heap \
- 907-get-loaded-classes \
- 908-gc-start-finish \
- 909-attach-agent \
- 910-methods \
- 911-get-stack-trace \
- 912-classes \
- 913-heaps \
- 914-hello-obsolescence \
- 915-obsolete-2 \
- 916-obsolete-jit \
- 917-fields-transformation \
- 918-fields \
- 919-obsolete-fields \
- 920-objects \
- 921-hello-failure \
- 922-properties \
- 923-monitors \
- 924-threads \
- 925-threadgroups \
- 926-multi-obsolescence \
- 927-timers \
- 928-jni-table \
- 929-search \
- 930-hello-retransform \
- 931-agent-thread \
- 932-transform-saves \
-
ifneq (,$(filter target,$(TARGET_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
diff --git a/test/run-test b/test/run-test
index 9b17802..c78fa35 100755
--- a/test/run-test
+++ b/test/run-test
@@ -131,6 +131,7 @@
multi_image_suffix=""
android_root="/system"
bisection_search="no"
+suspend_timeout="500000"
# By default we will use optimizing.
image_args=""
image_suffix=""
@@ -219,6 +220,10 @@
basic_verify="true"
gc_stress="true"
shift
+ elif [ "x$1" = "x--suspend-timeout" ]; then
+ shift
+ suspend_timeout="$1"
+ shift
elif [ "x$1" = "x--image" ]; then
shift
image="$1"
@@ -402,6 +407,9 @@
tmp_dir="`cd $oldwd ; python -c "import os; print os.path.realpath('$tmp_dir')"`"
mkdir -p $tmp_dir
+# Add thread suspend timeout flag
+run_args="${run_args} --runtime-option -XX:ThreadSuspendTimeout=$suspend_timeout"
+
if [ "$basic_verify" = "true" ]; then
# Set HspaceCompactForOOMMinIntervalMs to zero to run hspace compaction for OOM more frequently in tests.
run_args="${run_args} --runtime-option -Xgc:preverify --runtime-option -Xgc:postverify --runtime-option -XX:HspaceCompactForOOMMinIntervalMs=0"
@@ -649,6 +657,7 @@
echo " --quiet Don't print anything except failure messages"
echo " --bisection-search Perform bisection bug search."
echo " --vdex Test using vdex as in input to dex2oat. Only works with --prebuild."
+ echo " --suspend-timeout Change thread suspend timeout ms (default 500000)."
) 1>&2 # Direct to stderr so usage is not printed if --quiet is set.
exit 1
fi
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
index 4bceef5..80e1797 100644
--- a/test/ti-agent/common_helper.cc
+++ b/test/ti-agent/common_helper.cc
@@ -16,14 +16,18 @@
#include "ti-agent/common_helper.h"
+#include <dlfcn.h>
#include <stdio.h>
#include <sstream>
#include <deque>
+#include "android-base/stringprintf.h"
#include "art_method.h"
#include "jni.h"
+#include "jni_internal.h"
#include "openjdkjvmti/jvmti.h"
#include "scoped_thread_state_change-inl.h"
+#include "ScopedLocalRef.h"
#include "stack.h"
#include "ti-agent/common_load.h"
#include "utils.h"
@@ -325,4 +329,123 @@
} // namespace common_retransform
+static void BindMethod(jvmtiEnv* jenv,
+ JNIEnv* env,
+ jclass klass,
+ jmethodID method) {
+ char* name;
+ char* signature;
+ jvmtiError name_result = jenv->GetMethodName(method, &name, &signature, nullptr);
+ if (name_result != JVMTI_ERROR_NONE) {
+ LOG(FATAL) << "Could not get methods";
+ }
+
+ ArtMethod* m = jni::DecodeArtMethod(method);
+
+ std::string names[2];
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ names[0] = m->JniShortName();
+ names[1] = m->JniLongName();
+ }
+ for (const std::string& mangled_name : names) {
+ void* sym = dlsym(RTLD_DEFAULT, mangled_name.c_str());
+ if (sym == nullptr) {
+ continue;
+ }
+
+ JNINativeMethod native_method;
+ native_method.fnPtr = sym;
+ native_method.name = name;
+ native_method.signature = signature;
+
+ env->RegisterNatives(klass, &native_method, 1);
+
+ jenv->Deallocate(reinterpret_cast<unsigned char*>(name));
+ jenv->Deallocate(reinterpret_cast<unsigned char*>(signature));
+ return;
+ }
+
+ LOG(FATAL) << "Could not find " << names[0];
+}
+
+static jclass FindClassWithSystemClassLoader(JNIEnv* env, const char* class_name) {
+ // Find the system classloader.
+ ScopedLocalRef<jclass> cl_klass(env, env->FindClass("java/lang/ClassLoader"));
+ if (cl_klass.get() == nullptr) {
+ return nullptr;
+ }
+ jmethodID getsystemclassloader_method = env->GetStaticMethodID(cl_klass.get(),
+ "getSystemClassLoader",
+ "()Ljava/lang/ClassLoader;");
+ if (getsystemclassloader_method == nullptr) {
+ return nullptr;
+ }
+ ScopedLocalRef<jobject> cl(env, env->CallStaticObjectMethod(cl_klass.get(),
+ getsystemclassloader_method));
+ if (cl.get() == nullptr) {
+ return nullptr;
+ }
+
+ // Create a String of the name.
+ std::string descriptor = android::base::StringPrintf("L%s;", class_name);
+ std::string dot_name = DescriptorToDot(descriptor.c_str());
+ ScopedLocalRef<jstring> name_str(env, env->NewStringUTF(dot_name.c_str()));
+
+ // Call Class.forName with it.
+ ScopedLocalRef<jclass> c_klass(env, env->FindClass("java/lang/Class"));
+ if (c_klass.get() == nullptr) {
+ return nullptr;
+ }
+ jmethodID forname_method = env->GetStaticMethodID(
+ c_klass.get(),
+ "forName",
+ "(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;");
+ if (forname_method == nullptr) {
+ return nullptr;
+ }
+
+ return reinterpret_cast<jclass>(env->CallStaticObjectMethod(c_klass.get(),
+ forname_method,
+ name_str.get(),
+ JNI_FALSE,
+ cl.get()));
+}
+
+void BindFunctions(jvmtiEnv* jenv, JNIEnv* env, const char* class_name) {
+ // Use JNI to load the class.
+ ScopedLocalRef<jclass> klass(env, env->FindClass(class_name));
+ if (klass.get() == nullptr) {
+ // We may be called with the wrong classloader. Try explicitly using the system classloader.
+ env->ExceptionClear();
+ klass.reset(FindClassWithSystemClassLoader(env, class_name));
+ if (klass.get() == nullptr) {
+ LOG(FATAL) << "Could not load " << class_name;
+ }
+ }
+
+ // Use JVMTI to get the methods.
+ jint method_count;
+ jmethodID* methods;
+ jvmtiError methods_result = jenv->GetClassMethods(klass.get(), &method_count, &methods);
+ if (methods_result != JVMTI_ERROR_NONE) {
+ LOG(FATAL) << "Could not get methods";
+ }
+
+ // Check each method.
+ for (jint i = 0; i < method_count; ++i) {
+ jint modifiers;
+ jvmtiError mod_result = jenv->GetMethodModifiers(methods[i], &modifiers);
+ if (mod_result != JVMTI_ERROR_NONE) {
+ LOG(FATAL) << "Could not get methods";
+ }
+ constexpr jint kNative = static_cast<jint>(kAccNative);
+ if ((modifiers & kNative) != 0) {
+ BindMethod(jenv, env, klass.get(), methods[i]);
+ }
+ }
+
+ jenv->Deallocate(reinterpret_cast<unsigned char*>(methods));
+}
+
} // namespace art
diff --git a/test/ti-agent/common_helper.h b/test/ti-agent/common_helper.h
index 8599fc4..c60553d 100644
--- a/test/ti-agent/common_helper.h
+++ b/test/ti-agent/common_helper.h
@@ -71,6 +71,12 @@
bool JvmtiErrorToException(JNIEnv* env, jvmtiError error);
+// Load the class through JNI. Inspect it, find all native methods. Construct the corresponding
+// mangled name, run dlsym and bind the method.
+//
+// This will abort on failure.
+void BindFunctions(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name);
+
} // namespace art
#endif // ART_TEST_TI_AGENT_COMMON_HELPER_H_
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index f4ce4c3..8ed8e67 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "common_load.h"
+
#include <jni.h>
#include <stdio.h>
// TODO I don't know?
@@ -22,7 +24,6 @@
#include "art_method-inl.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "common_load.h"
#include "common_helper.h"
#include "901-hello-ti-agent/basics.h"
@@ -32,6 +33,8 @@
jvmtiEnv* jvmti_env;
+namespace {
+
using OnLoad = jint (*)(JavaVM* vm, char* options, void* reserved);
using OnAttach = jint (*)(JavaVM* vm, char* options, void* reserved);
@@ -41,11 +44,50 @@
OnAttach attach;
};
+static void JNICALL VMInitCallback(jvmtiEnv *jvmti_env,
+ JNIEnv* jni_env,
+ jthread thread ATTRIBUTE_UNUSED) {
+ // Bind Main native methods.
+ BindFunctions(jvmti_env, jni_env, "Main");
+}
+
+// Install a phase callback that will bind JNI functions on VMInit.
+bool InstallBindCallback(JavaVM* vm) {
+ // Use a new jvmtiEnv. Otherwise we might collide with table changes.
+ jvmtiEnv* install_env;
+ if (vm->GetEnv(reinterpret_cast<void**>(&install_env), JVMTI_VERSION_1_0) != 0) {
+ return false;
+ }
+ SetAllCapabilities(install_env);
+
+ {
+ jvmtiEventCallbacks callbacks;
+ memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
+ callbacks.VMInit = VMInitCallback;
+
+ jvmtiError install_error = install_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
+ if (install_error != JVMTI_ERROR_NONE) {
+ return false;
+ }
+ }
+
+ {
+ jvmtiError enable_error = install_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_VM_INIT,
+ nullptr);
+ if (enable_error != JVMTI_ERROR_NONE) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
// A trivial OnLoad implementation that only initializes the global jvmti_env.
static jint MinimalOnLoad(JavaVM* vm,
char* options ATTRIBUTE_UNUSED,
void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0) != 0) {
printf("Unable to get jvmti env!\n");
return 1;
}
@@ -55,7 +97,7 @@
// A list of all non-standard the agents we have for testing. All other agents will use
// MinimalOnLoad.
-AgentLib agents[] = {
+static AgentLib agents[] = {
{ "901-hello-ti-agent", Test901HelloTi::OnLoad, nullptr },
{ "902-hello-transformation", common_redefine::OnLoad, nullptr },
{ "909-attach-agent", nullptr, Test909AttachAgent::OnAttach },
@@ -101,6 +143,28 @@
RuntimeIsJVM = strncmp(options, "jvm", 3) == 0;
}
+static bool BindFunctionsAttached(JavaVM* vm, const char* class_name) {
+ // Get a JNIEnv. As the thread is attached, we must not destroy it.
+ JNIEnv* env;
+ if (vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6) != 0) {
+ printf("Unable to get JNI env!\n");
+ return false;
+ }
+
+ jvmtiEnv* jenv;
+ if (vm->GetEnv(reinterpret_cast<void**>(&jenv), JVMTI_VERSION_1_0) != 0) {
+ printf("Unable to get jvmti env!\n");
+ return false;
+ }
+ SetAllCapabilities(jenv);
+
+ BindFunctions(jenv, env, class_name);
+
+ return true;
+}
+
+} // namespace
+
extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm, char* options, void* reserved) {
char* remaining_options = nullptr;
char* name_option = nullptr;
@@ -111,6 +175,10 @@
SetIsJVM(remaining_options);
+ if (!InstallBindCallback(vm)) {
+ return 1;
+ }
+
AgentLib* lib = FindAgent(name_option);
OnLoad fn = nullptr;
if (lib == nullptr) {
@@ -132,6 +200,9 @@
printf("Unable to find agent name in options: %s\n", options);
return -1;
}
+
+ BindFunctionsAttached(vm, "Main");
+
AgentLib* lib = FindAgent(name_option);
if (lib == nullptr) {
printf("Unable to find agent named: %s, add it to the list in test/ti-agent/common_load.cc\n",
diff --git a/test/ti-agent/common_load.h b/test/ti-agent/common_load.h
index fac94b4..d254421 100644
--- a/test/ti-agent/common_load.h
+++ b/test/ti-agent/common_load.h
@@ -17,6 +17,7 @@
#ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_
#define ART_TEST_TI_AGENT_COMMON_LOAD_H_
+#include "jni.h"
#include "openjdkjvmti/jvmti.h"
namespace art {
diff --git a/test/valgrind-suppressions.txt b/test/valgrind-suppressions.txt
index c148ad0..c775f98 100644
--- a/test/valgrind-suppressions.txt
+++ b/test/valgrind-suppressions.txt
@@ -46,3 +46,26 @@
fun:_ZN14UnwindMapLocal5BuildEv
fun:_ZN12BacktraceMap6CreateEib
}
+{
+ BackTraceReading64
+ Memcheck:Addr8
+ fun:access_mem_unrestricted
+ fun:_Uelf64_memory_read
+ fun:_Uelf64_get_load_base
+ fun:map_create_list
+ fun:unw_map_local_create
+ fun:_ZN14UnwindMapLocal5BuildEv
+ fun:_ZN12BacktraceMap6CreateEib
+}
+{
+ BackTraceReading32
+ Memcheck:Addr4
+ fun:access_mem_unrestricted
+ fun:_Uelf32_memory_read
+ fun:_Uelf32_get_load_base
+ fun:map_create_list
+ fun:unw_map_local_create
+ fun:_ZN14UnwindMapLocal5BuildEv
+ fun:_ZN12BacktraceMap6CreateEib
+}
+
diff --git a/tools/dexfuzz/src/dexfuzz/Options.java b/tools/dexfuzz/src/dexfuzz/Options.java
index 99e03e8..af8a05c 100644
--- a/tools/dexfuzz/src/dexfuzz/Options.java
+++ b/tools/dexfuzz/src/dexfuzz/Options.java
@@ -50,6 +50,7 @@
public static String deviceName = "";
public static boolean usingSpecificDevice = false;
public static int repeat = 1;
+ public static int divergenceRetry = 10;
public static String executeDirectory = "/data/art-test";
public static String androidRoot = "";
public static String dumpMutationsFile = "mutations.dump";
@@ -118,6 +119,8 @@
Log.always(" --repeat=<n> : Fuzz N programs, executing each one.");
Log.always(" --short-timeouts : Shorten timeouts (faster; use if");
Log.always(" you want to focus on output divergences)");
+ Log.always(" --divergence-retry=<n> : Number of retries when checking if test is");
+ Log.always(" self-divergent. (Default: 10)");
Log.always(" --seed=<seed> : RNG seed to use");
Log.always(" --method-mutations=<n> : Maximum number of mutations to perform on each method.");
Log.always(" (Default: 3)");
@@ -239,6 +242,8 @@
maxMethods = Integer.parseInt(value);
} else if (key.equals("repeat")) {
repeat = Integer.parseInt(value);
+ } else if (key.equals("divergence-retry")) {
+ divergenceRetry = Integer.parseInt(value);
} else if (key.equals("log")) {
Log.setLoggingLevel(LogTag.valueOf(value.toUpperCase()));
} else if (key.equals("likelihoods")) {
@@ -360,6 +365,10 @@
Log.error("--repeat must be at least 1!");
return false;
}
+ if (divergenceRetry < 0) {
+ Log.error("--divergence-retry cannot be negative!");
+ return false;
+ }
if (usingProvidedSeed && repeat > 1) {
Log.error("Cannot use --repeat with --seed");
return false;
diff --git a/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java b/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java
index 1797d90..ccc426c 100644
--- a/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java
+++ b/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java
@@ -298,13 +298,13 @@
}
private boolean checkGoldenExecutorForSelfDivergence(String programName) {
- // Run golden executor 5 times, make sure it always produces
+ // Run golden executor multiple times, make sure it always produces
// the same output, otherwise report that it is self-divergent.
// TODO: Instead, produce a list of acceptable outputs, and see if the divergent
// outputs of the backends fall within this set of outputs.
String seenOutput = null;
- for (int i = 0; i < 5; i++) {
+ for (int i = 0; i < Options.divergenceRetry + 1; i++) {
goldenExecutor.reset();
goldenExecutor.execute(programName);
String output = goldenExecutor.getResult().getFlattenedOutput();
diff --git a/tools/stream-trace-converter.py b/tools/stream-trace-converter.py
index 951b05b..7e341f2 100755
--- a/tools/stream-trace-converter.py
+++ b/tools/stream-trace-converter.py
@@ -124,12 +124,20 @@
self._threads.append('%d\t%s\n' % (tid, str))
print 'New thread: %d/%s' % (tid, str)
+ def ProcessTraceSummary(self, input):
+ summaryLength = ReadIntLE(input)
+ str = input.read(summaryLength)
+ self._summary = str
+ print 'Summary: \"%s\"' % str
+
def ProcessSpecial(self, input):
code = ord(input.read(1))
if code == 1:
self.ProcessMethod(input)
elif code == 2:
self.ProcessThread(input)
+ elif code == 3:
+ self.ProcessTraceSummary(input)
else:
raise MyException("Unknown special!")
@@ -147,9 +155,24 @@
print 'Buffer underrun, file was probably truncated. Results should still be usable.'
def Finalize(self, header):
- header.write('*threads\n')
- for t in self._threads:
- header.write(t)
+ # If the summary is present in the input file, use it as the header except
+ # for the methods section which is emtpy in the input file. If not present,
+ # apppend header with the threads that are recorded in the input stream.
+ if (self._summary):
+ # Erase the contents that's already written earlier by PrintHeader.
+ header.seek(0)
+ header.truncate()
+ # Copy the lines from the input summary to the output header until
+ # the methods section is seen.
+ for line in self._summary.splitlines(True):
+ if line == "*methods\n":
+ break
+ else:
+ header.write(line)
+ else:
+ header.write('*threads\n')
+ for t in self._threads:
+ header.write(t)
header.write('*methods\n')
for m in self._methods:
header.write(m)
@@ -166,6 +189,7 @@
self._methods = []
self._threads = []
+ self._summary = None
self.Process(input, body)
self.Finalize(header)