Merge "Change dump-classes profman option to dump-classes-and-methods"
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d463830..794e05c 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -153,7 +153,8 @@
codegen->GetNumberOfFloatingPointRegisters()));
CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize, core_spills);
- CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize, fp_spills);
+ unsigned v_reg_size = codegen->GetGraph()->HasSIMD() ? kQRegSize : kDRegSize;
+ CPURegList fp_list = CPURegList(CPURegister::kVRegister, v_reg_size, fp_spills);
MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
@@ -464,10 +465,13 @@
: SlowPathCodeARM64(instruction), successor_(successor) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations); // Only saves live 128-bit regs for SIMD.
arm64_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
+ RestoreLiveRegisters(codegen, locations); // Only restores live 128-bit regs for SIMD.
if (successor_ == nullptr) {
__ B(GetReturnLabel());
} else {
@@ -5520,7 +5524,11 @@
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ // In suspend check slow path, usually there are no caller-save registers at all.
+ // If SIMD instructions are present, however, we force spilling all live SIMD
+ // registers in full width (since the runtime only saves/restores lower part).
+ locations->SetCustomSlowPathCallerSaves(
+ GetGraph()->HasSIMD() ? RegisterSet::AllFpu() : RegisterSet::Empty());
}
void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 80776e8..08a752f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -186,10 +186,10 @@
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations); // only saves full width XMM for SIMD
+ SaveLiveRegisters(codegen, locations); // Only saves full width XMM for SIMD.
x86_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, locations); // only saves full width XMM for SIMD
+ RestoreLiveRegisters(codegen, locations); // Only restores full width XMM for SIMD.
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 49f099f..ff6e099 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -143,10 +143,10 @@
LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, locations); // only saves full width XMM for SIMD
+ SaveLiveRegisters(codegen, locations); // Only saves full width XMM for SIMD.
x86_64_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, locations); // only saves full width XMM for SIMD
+ RestoreLiveRegisters(codegen, locations); // Only restores full width XMM for SIMD.
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index d6513c8..1c8674d 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -383,12 +383,13 @@
return false;
}
-bool InductionVarRange::IsUnitStride(HInstruction* instruction,
+bool InductionVarRange::IsUnitStride(HInstruction* context,
+ HInstruction* instruction,
/*out*/ HInstruction** offset) const {
HLoopInformation* loop = nullptr;
HInductionVarAnalysis::InductionInfo* info = nullptr;
HInductionVarAnalysis::InductionInfo* trip = nullptr;
- if (HasInductionInfo(instruction, instruction, &loop, &info, &trip)) {
+ if (HasInductionInfo(context, instruction, &loop, &info, &trip)) {
if (info->induction_class == HInductionVarAnalysis::kLinear &&
info->op_b->operation == HInductionVarAnalysis::kFetch &&
!HInductionVarAnalysis::IsNarrowingLinear(info)) {
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 0858d73..a8ee829 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -156,10 +156,14 @@
bool IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const;
/**
- * Checks if instruction is a unit stride induction inside the closest enveloping loop.
- * Returns invariant offset on success.
+ * Checks if the given instruction is a unit stride induction inside the closest enveloping
+ * loop of the context that is defined by the first parameter (e.g. pass an array reference
+ * as context and the index as instruction to make sure the stride is tested against the
+ * loop that envelops the reference the closest). Returns invariant offset on success.
*/
- bool IsUnitStride(HInstruction* instruction, /*out*/ HInstruction** offset) const;
+ bool IsUnitStride(HInstruction* context,
+ HInstruction* instruction,
+ /*out*/ HInstruction** offset) const;
/**
* Generates the trip count expression for the given loop. Code is generated in given block
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index fcdf8eb..d01d314 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -770,7 +770,7 @@
EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
EXPECT_EQ(1000, tc);
HInstruction* offset = nullptr;
- EXPECT_TRUE(range_.IsUnitStride(phi, &offset));
+ EXPECT_TRUE(range_.IsUnitStride(phi, phi, &offset));
EXPECT_TRUE(offset == nullptr);
HInstruction* tce = range_.GenerateTripCount(
loop_header_->GetLoopInformation(), graph_, loop_preheader_);
@@ -826,7 +826,7 @@
EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
EXPECT_EQ(1000, tc);
HInstruction* offset = nullptr;
- EXPECT_FALSE(range_.IsUnitStride(phi, &offset));
+ EXPECT_FALSE(range_.IsUnitStride(phi, phi, &offset));
HInstruction* tce = range_.GenerateTripCount(
loop_header_->GetLoopInformation(), graph_, loop_preheader_);
ASSERT_TRUE(tce != nullptr);
@@ -908,7 +908,7 @@
EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
EXPECT_EQ(0, tc); // unknown
HInstruction* offset = nullptr;
- EXPECT_TRUE(range_.IsUnitStride(phi, &offset));
+ EXPECT_TRUE(range_.IsUnitStride(phi, phi, &offset));
EXPECT_TRUE(offset == nullptr);
HInstruction* tce = range_.GenerateTripCount(
loop_header_->GetLoopInformation(), graph_, loop_preheader_);
@@ -994,7 +994,7 @@
EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc));
EXPECT_EQ(0, tc); // unknown
HInstruction* offset = nullptr;
- EXPECT_FALSE(range_.IsUnitStride(phi, &offset));
+ EXPECT_FALSE(range_.IsUnitStride(phi, phi, &offset));
HInstruction* tce = range_.GenerateTripCount(
loop_header_->GetLoopInformation(), graph_, loop_preheader_);
ASSERT_TRUE(tce != nullptr);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 79cd704..298ae5c 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -371,6 +371,12 @@
// invoke-virtual because a proxy method doesn't have a real dex file.
return nullptr;
}
+ if (!single_impl->GetDeclaringClass()->IsResolved()) {
+ // There's a race with the class loading, which updates the CHA info
+ // before setting the class to resolved. So we just bail for this
+ // rare occurence.
+ return nullptr;
+ }
return single_impl;
}
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 6098767..c2518a7 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -834,15 +834,15 @@
__ Bnezc(AT, &done);
// Long outLong = floor/ceil(in);
- // if outLong == Long.MAX_VALUE {
+ // if (outLong == Long.MAX_VALUE) || (outLong == Long.MIN_VALUE) {
// // floor()/ceil() has almost certainly returned a value
// // which can't be successfully represented as a signed
// // 64-bit number. Java expects that the input value will
// // be returned in these cases.
// // There is also a small probability that floor(in)/ceil(in)
// // correctly truncates/rounds up the input value to
- // // Long.MAX_VALUE. In that case, this exception handling
- // // code still does the correct thing.
+ // // Long.MAX_VALUE or Long.MIN_VALUE. In these cases, this
+ // // exception handling code still does the correct thing.
// return in;
// }
if (mode == kFloor) {
@@ -852,8 +852,14 @@
}
__ Dmfc1(AT, out);
__ MovD(out, in);
- __ LoadConst64(TMP, kPrimLongMax);
- __ Beqc(AT, TMP, &done);
+ __ Daddiu(TMP, AT, 1);
+ __ Dati(TMP, 0x8000); // TMP = AT + 0x8000 0000 0000 0001
+ // or AT - 0x7FFF FFFF FFFF FFFF.
+ // IOW, TMP = 1 if AT = Long.MIN_VALUE
+ // or TMP = 0 if AT = Long.MAX_VALUE.
+ __ Dsrl(TMP, TMP, 1); // TMP = 0 if AT = Long.MIN_VALUE
+ // or AT = Long.MAX_VALUE.
+ __ Beqzc(TMP, &done);
// double out = outLong;
// return out;
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index ca31bf8..bf18cc9 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -544,12 +544,13 @@
bool vectorized_def = VectorizeDef(node, it.Current(), /*generate_code*/ true);
DCHECK(vectorized_def);
}
- // Generate body.
+ // Generate body from the instruction map, but in original program order.
HEnvironment* env = vector_header_->GetFirstInstruction()->GetEnvironment();
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
auto i = vector_map_->find(it.Current());
if (i != vector_map_->end() && !i->second->IsInBlock()) {
- Insert(vector_body_, i->second); // lays out in original order
+ Insert(vector_body_, i->second);
+ // Deal with instructions that need an environment, such as the scalar intrinsics.
if (i->second->NeedsEnvironment()) {
i->second->CopyEnvironmentFromWithLoopPhiAdjustment(env, vector_header_);
}
@@ -579,7 +580,7 @@
HInstruction* offset = nullptr;
if (TrySetVectorType(type, &restrictions) &&
node->loop_info->IsDefinedOutOfTheLoop(base) &&
- induction_range_.IsUnitStride(index, &offset) &&
+ induction_range_.IsUnitStride(instruction, index, &offset) &&
VectorizeUse(node, value, generate_code, type, restrictions)) {
if (generate_code) {
GenerateVecSub(index, offset);
@@ -632,7 +633,7 @@
HInstruction* offset = nullptr;
if (type == instruction->GetType() &&
node->loop_info->IsDefinedOutOfTheLoop(base) &&
- induction_range_.IsUnitStride(index, &offset)) {
+ induction_range_.IsUnitStride(instruction, index, &offset)) {
if (generate_code) {
GenerateVecSub(index, offset);
GenerateVecMem(instruction, vector_map_->Get(index), nullptr, type);
@@ -991,8 +992,9 @@
UNREACHABLE();
} // switch invoke
} else {
- // In scalar code, simply clone the method invoke, and replace its operands
- // with the corresponding new scalar instructions in the loop.
+ // In scalar code, simply clone the method invoke, and replace its operands with the
+ // corresponding new scalar instructions in the loop. The instruction will get an
+ // environment while being inserted from the instruction map in original program order.
DCHECK(vector_mode_ == kSequential);
HInvokeStaticOrDirect* new_invoke = new (global_allocator_) HInvokeStaticOrDirect(
global_allocator_,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index b8ff2c2..4bc8e8e 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2782,7 +2782,7 @@
}
CHECK(klass->IsLoaded());
- // At this point the class is loaded. Publish a ClassLoad even.
+ // At this point the class is loaded. Publish a ClassLoad event.
// Note: this may be a temporary class. It is a listener's responsibility to handle this.
Runtime::Current()->GetRuntimeCallbacks()->ClassLoad(klass);
@@ -3732,6 +3732,12 @@
ObjPtr<mirror::Class> existing = InsertClass(descriptor, new_class.Get(), hash);
if (existing == nullptr) {
+ // We postpone ClassLoad and ClassPrepare events to this point in time to avoid
+ // duplicate events in case of races. Array classes don't really follow dedicated
+ // load and prepare, anyways.
+ Runtime::Current()->GetRuntimeCallbacks()->ClassLoad(new_class);
+ Runtime::Current()->GetRuntimeCallbacks()->ClassPrepare(new_class, new_class);
+
jit::Jit::NewTypeLoadedIfUsingJit(new_class.Get());
return new_class.Get();
}
@@ -4267,53 +4273,53 @@
jobjectArray throws) {
Thread* self = soa.Self();
StackHandleScope<10> hs(self);
- MutableHandle<mirror::Class> klass(hs.NewHandle(
+ MutableHandle<mirror::Class> temp_klass(hs.NewHandle(
AllocClass(self, GetClassRoot(kJavaLangClass), sizeof(mirror::Class))));
- if (klass == nullptr) {
+ if (temp_klass == nullptr) {
CHECK(self->IsExceptionPending()); // OOME.
return nullptr;
}
- DCHECK(klass->GetClass() != nullptr);
- klass->SetObjectSize(sizeof(mirror::Proxy));
+ DCHECK(temp_klass->GetClass() != nullptr);
+ temp_klass->SetObjectSize(sizeof(mirror::Proxy));
// Set the class access flags incl. VerificationAttempted, so we do not try to set the flag on
// the methods.
- klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal | kAccVerificationAttempted);
- klass->SetClassLoader(soa.Decode<mirror::ClassLoader>(loader));
- DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
- klass->SetName(soa.Decode<mirror::String>(name));
- klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache());
+ temp_klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal | kAccVerificationAttempted);
+ temp_klass->SetClassLoader(soa.Decode<mirror::ClassLoader>(loader));
+ DCHECK_EQ(temp_klass->GetPrimitiveType(), Primitive::kPrimNot);
+ temp_klass->SetName(soa.Decode<mirror::String>(name));
+ temp_klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache());
// Object has an empty iftable, copy it for that reason.
- klass->SetIfTable(GetClassRoot(kJavaLangObject)->GetIfTable());
- mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self);
- std::string descriptor(GetDescriptorForProxy(klass.Get()));
+ temp_klass->SetIfTable(GetClassRoot(kJavaLangObject)->GetIfTable());
+ mirror::Class::SetStatus(temp_klass, mirror::Class::kStatusIdx, self);
+ std::string descriptor(GetDescriptorForProxy(temp_klass.Get()));
const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
// Needs to be before we insert the class so that the allocator field is set.
- LinearAlloc* const allocator = GetOrCreateAllocatorForClassLoader(klass->GetClassLoader());
+ LinearAlloc* const allocator = GetOrCreateAllocatorForClassLoader(temp_klass->GetClassLoader());
// Insert the class before loading the fields as the field roots
// (ArtField::declaring_class_) are only visited from the class
// table. There can't be any suspend points between inserting the
// class and setting the field arrays below.
- ObjPtr<mirror::Class> existing = InsertClass(descriptor.c_str(), klass.Get(), hash);
+ ObjPtr<mirror::Class> existing = InsertClass(descriptor.c_str(), temp_klass.Get(), hash);
CHECK(existing == nullptr);
// Instance fields are inherited, but we add a couple of static fields...
const size_t num_fields = 2;
LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self, allocator, num_fields);
- klass->SetSFieldsPtr(sfields);
+ temp_klass->SetSFieldsPtr(sfields);
// 1. Create a static field 'interfaces' that holds the _declared_ interfaces implemented by
// our proxy, so Class.getInterfaces doesn't return the flattened set.
ArtField& interfaces_sfield = sfields->At(0);
interfaces_sfield.SetDexFieldIndex(0);
- interfaces_sfield.SetDeclaringClass(klass.Get());
+ interfaces_sfield.SetDeclaringClass(temp_klass.Get());
interfaces_sfield.SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
// 2. Create a static field 'throws' that holds exceptions thrown by our methods.
ArtField& throws_sfield = sfields->At(1);
throws_sfield.SetDexFieldIndex(1);
- throws_sfield.SetDeclaringClass(klass.Get());
+ throws_sfield.SetDeclaringClass(temp_klass.Get());
throws_sfield.SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
// Proxies have 1 direct method, the constructor
@@ -4334,43 +4340,46 @@
self->AssertPendingOOMException();
return nullptr;
}
- klass->SetMethodsPtr(proxy_class_methods, num_direct_methods, num_virtual_methods);
+ temp_klass->SetMethodsPtr(proxy_class_methods, num_direct_methods, num_virtual_methods);
// Create the single direct method.
- CreateProxyConstructor(klass, klass->GetDirectMethodUnchecked(0, image_pointer_size_));
+ CreateProxyConstructor(temp_klass, temp_klass->GetDirectMethodUnchecked(0, image_pointer_size_));
// Create virtual method using specified prototypes.
// TODO These should really use the iterators.
for (size_t i = 0; i < num_virtual_methods; ++i) {
- auto* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
+ auto* virtual_method = temp_klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
auto* prototype = h_methods->Get(i)->GetArtMethod();
- CreateProxyMethod(klass, prototype, virtual_method);
+ CreateProxyMethod(temp_klass, prototype, virtual_method);
DCHECK(virtual_method->GetDeclaringClass() != nullptr);
DCHECK(prototype->GetDeclaringClass() != nullptr);
}
// The super class is java.lang.reflect.Proxy
- klass->SetSuperClass(GetClassRoot(kJavaLangReflectProxy));
+ temp_klass->SetSuperClass(GetClassRoot(kJavaLangReflectProxy));
// Now effectively in the loaded state.
- mirror::Class::SetStatus(klass, mirror::Class::kStatusLoaded, self);
+ mirror::Class::SetStatus(temp_klass, mirror::Class::kStatusLoaded, self);
self->AssertNoPendingException();
- MutableHandle<mirror::Class> new_class = hs.NewHandle<mirror::Class>(nullptr);
+ // At this point the class is loaded. Publish a ClassLoad event.
+ // Note: this may be a temporary class. It is a listener's responsibility to handle this.
+ Runtime::Current()->GetRuntimeCallbacks()->ClassLoad(temp_klass);
+
+ MutableHandle<mirror::Class> klass = hs.NewHandle<mirror::Class>(nullptr);
{
// Must hold lock on object when resolved.
- ObjectLock<mirror::Class> resolution_lock(self, klass);
+ ObjectLock<mirror::Class> resolution_lock(self, temp_klass);
// Link the fields and virtual methods, creating vtable and iftables.
// The new class will replace the old one in the class table.
Handle<mirror::ObjectArray<mirror::Class>> h_interfaces(
hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>>(interfaces)));
- if (!LinkClass(self, descriptor.c_str(), klass, h_interfaces, &new_class)) {
- mirror::Class::SetStatus(klass, mirror::Class::kStatusErrorUnresolved, self);
+ if (!LinkClass(self, descriptor.c_str(), temp_klass, h_interfaces, &klass)) {
+ mirror::Class::SetStatus(temp_klass, mirror::Class::kStatusErrorUnresolved, self);
return nullptr;
}
}
- CHECK(klass->IsRetired());
- CHECK_NE(klass.Get(), new_class.Get());
- klass.Assign(new_class.Get());
+ CHECK(temp_klass->IsRetired());
+ CHECK_NE(temp_klass.Get(), klass.Get());
CHECK_EQ(interfaces_sfield.GetDeclaringClass(), klass.Get());
interfaces_sfield.SetObject<false>(
@@ -4381,6 +4390,8 @@
klass.Get(),
soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>>(throws));
+ Runtime::Current()->GetRuntimeCallbacks()->ClassPrepare(temp_klass, klass);
+
{
// Lock on klass is released. Lock new class object.
ObjectLock<mirror::Class> initialization_lock(self, klass);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b2fba67..868d8df 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1396,7 +1396,8 @@
mirror::Class* c = m->GetDeclaringClass();
location->type_tag = GetTypeTag(c);
location->class_id = gRegistry->AddRefType(c);
- location->method_id = ToMethodId(m);
+ // The RI Seems to return 0 for all obsolete methods. For compatibility we shall do the same.
+ location->method_id = m->IsObsolete() ? 0 : ToMethodId(m);
location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
}
}
@@ -1409,6 +1410,15 @@
return m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName();
}
+bool Dbg::IsMethodObsolete(JDWP::MethodId method_id) {
+ ArtMethod* m = FromMethodId(method_id);
+ if (m == nullptr) {
+ // NB Since we return 0 as MID for obsolete methods we want to default to true here.
+ return true;
+ }
+ return m->IsObsolete();
+}
+
std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
ArtField* f = FromFieldId(field_id);
if (f == nullptr) {
@@ -3717,10 +3727,9 @@
if (!m->IsRuntimeMethod()) {
++stack_depth;
if (method == nullptr) {
- mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
+ const DexFile* dex_file = m->GetDexFile();
method = m;
- if (dex_cache != nullptr) {
- const DexFile* dex_file = dex_cache->GetDexFile();
+ if (dex_file != nullptr) {
line_number = annotations::GetLineNumFromPC(dex_file, m, GetDexPc());
}
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index a7fd160..27124e1 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -370,6 +370,8 @@
//
static std::string GetMethodName(JDWP::MethodId method_id)
REQUIRES_SHARED(Locks::mutator_lock_);
+ static bool IsMethodObsolete(JDWP::MethodId method_id)
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId ref_type_id, bool with_generic,
JDWP::ExpandBuf* pReply)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 964c567..971d039 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -761,12 +761,11 @@
return ERR_NONE;
}
-// Default implementation for IDEs relying on this command.
static JdwpError M_IsObsolete(JdwpState*, Request* request, ExpandBuf* reply)
REQUIRES_SHARED(Locks::mutator_lock_) {
request->ReadRefTypeId(); // unused reference type ID
- request->ReadMethodId(); // unused method ID
- expandBufAdd1(reply, false); // a method is never obsolete.
+ MethodId id = request->ReadMethodId();
+ expandBufAdd1(reply, Dbg::IsMethodObsolete(id));
return ERR_NONE;
}
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 4dd8e60..be2bffc 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -52,8 +52,10 @@
REQUIRES(!Locks::profiler_lock_, !wait_lock_)
NO_THREAD_SAFETY_ANALYSIS;
- // Just for testing purpose.
+ // For testing or manual purposes (SIGUSR1).
static void ForceProcessProfiles();
+
+ // Just for testing purpose.
static bool HasSeenMethod(const std::string& profile,
const DexFile* dex_file,
uint16_t method_idx);
diff --git a/runtime/mirror/class_ext.cc b/runtime/mirror/class_ext.cc
index 94e4b88..32d49bb 100644
--- a/runtime/mirror/class_ext.cc
+++ b/runtime/mirror/class_ext.cc
@@ -40,8 +40,6 @@
void ClassExt::SetObsoleteArrays(ObjPtr<PointerArray> methods,
ObjPtr<ObjectArray<DexCache>> dex_caches) {
- DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId())
- << "Obsolete arrays are set without synchronization!";
CHECK_EQ(methods.IsNull(), dex_caches.IsNull());
auto obsolete_dex_cache_off = OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_dex_caches_);
auto obsolete_methods_off = OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_methods_);
@@ -54,8 +52,7 @@
// these arrays are written into without all threads being suspended we have a race condition! This
// race could cause obsolete methods to be missed.
bool ClassExt::ExtendObsoleteArrays(Thread* self, uint32_t increase) {
- DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId())
- << "Obsolete arrays are set without synchronization!";
+ // TODO It would be good to check that we have locked the class associated with this ClassExt.
StackHandleScope<5> hs(self);
Handle<ClassExt> h_this(hs.NewHandle(this));
Handle<PointerArray> old_methods(hs.NewHandle(h_this->GetObsoleteMethods()));
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index 7d95de8..0655079 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -216,7 +216,6 @@
jvmtiError Redefiner::IsModifiableClass(jvmtiEnv* env ATTRIBUTE_UNUSED,
jclass klass,
jboolean* is_redefinable) {
- // TODO Check for the appropriate feature flags once we have enabled them.
art::Thread* self = art::Thread::Current();
art::ScopedObjectAccess soa(self);
art::StackHandleScope<1> hs(self);
@@ -790,9 +789,11 @@
kSlotNewDexCache = 3,
kSlotMirrorClass = 4,
kSlotOrigDexFile = 5,
+ kSlotOldObsoleteMethods = 6,
+ kSlotOldDexCaches = 7,
// Must be last one.
- kNumSlots = 6,
+ kNumSlots = 8,
};
// This needs to have a HandleScope passed in that is capable of creating a new Handle without
@@ -815,7 +816,6 @@
return arr_.IsNull();
}
- // TODO Maybe make an iterable view type to simplify using this.
art::mirror::ClassLoader* GetSourceClassLoader(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
return art::down_cast<art::mirror::ClassLoader*>(GetSlot(klass_index, kSlotSourceClassLoader));
@@ -842,6 +842,18 @@
return art::down_cast<art::mirror::Object*>(GetSlot(klass_index, kSlotOrigDexFile));
}
+ art::mirror::PointerArray* GetOldObsoleteMethods(jint klass_index) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::down_cast<art::mirror::PointerArray*>(
+ GetSlot(klass_index, kSlotOldObsoleteMethods));
+ }
+
+ art::mirror::ObjectArray<art::mirror::DexCache>* GetOldDexCaches(jint klass_index) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::down_cast<art::mirror::ObjectArray<art::mirror::DexCache>*>(
+ GetSlot(klass_index, kSlotOldDexCaches));
+ }
+
void SetSourceClassLoader(jint klass_index, art::mirror::ClassLoader* loader)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotSourceClassLoader, loader);
@@ -866,6 +878,14 @@
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotOrigDexFile, bytes);
}
+ void SetOldObsoleteMethods(jint klass_index, art::mirror::PointerArray* methods)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotOldObsoleteMethods, methods);
+ }
+ void SetOldDexCaches(jint klass_index, art::mirror::ObjectArray<art::mirror::DexCache>* caches)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotOldDexCaches, caches);
+ }
int32_t Length() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
return arr_->GetLength() / kNumSlots;
@@ -979,6 +999,15 @@
REQUIRES_SHARED(art::Locks::mutator_lock_) {
return holder_.GetOriginalDexFile(idx_);
}
+ art::mirror::PointerArray* GetOldObsoleteMethods() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetOldObsoleteMethods(idx_);
+ }
+ art::mirror::ObjectArray<art::mirror::DexCache>* GetOldDexCaches() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetOldDexCaches(idx_);
+ }
+
int32_t GetIndex() const {
return idx_;
}
@@ -1004,6 +1033,14 @@
REQUIRES_SHARED(art::Locks::mutator_lock_) {
holder_.SetOriginalDexFile(idx_, bytes);
}
+ void SetOldObsoleteMethods(art::mirror::PointerArray* methods)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetOldObsoleteMethods(idx_, methods);
+ }
+ void SetOldDexCaches(art::mirror::ObjectArray<art::mirror::DexCache>* caches)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetOldDexCaches(idx_, caches);
+ }
private:
int32_t idx_;
@@ -1164,9 +1201,15 @@
return true;
}
-bool Redefiner::EnsureAllClassAllocationsFinished() {
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
- if (!redef.EnsureClassAllocationsFinished()) {
+void Redefiner::RestoreObsoleteMethodMapsIfUnneeded(RedefinitionDataHolder& holder) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ data.GetRedefinition().RestoreObsoleteMethodMapsIfUnneeded(&data);
+ }
+}
+
+bool Redefiner::EnsureAllClassAllocationsFinished(RedefinitionDataHolder& holder) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ if (!data.GetRedefinition().EnsureClassAllocationsFinished(&data)) {
return false;
}
}
@@ -1239,13 +1282,9 @@
// between allocating them and pausing all threads before we can update them so we need to do a
// try loop.
if (!CheckAllRedefinitionAreValid() ||
- !EnsureAllClassAllocationsFinished() ||
+ !EnsureAllClassAllocationsFinished(holder) ||
!FinishAllRemainingAllocations(holder) ||
!CheckAllClassesAreVerified(holder)) {
- // TODO Null out the ClassExt fields we allocated (if possible, might be racing with another
- // redefineclass call which made it even bigger. Leak shouldn't be huge (2x array of size
- // declared_methods_.length) but would be good to get rid of. All other allocations should be
- // cleaned up by the GC eventually.
return result_;
}
@@ -1277,11 +1316,11 @@
redef.FindAndAllocateObsoleteMethods(klass);
redef.UpdateClass(klass, data.GetNewDexCache(), data.GetOriginalDexFile());
}
+ RestoreObsoleteMethodMapsIfUnneeded(holder);
// TODO We should check for if any of the redefined methods are intrinsic methods here and, if any
// are, force a full-world deoptimization before finishing redefinition. If we don't do this then
// methods that have been jitted prior to the current redefinition being applied might continue
// to use the old versions of the intrinsics!
- // TODO Shrink the obsolete method maps if possible?
// TODO Do the dex_file release at a more reasonable place. This works but it muddles who really
// owns the DexFile and when ownership is transferred.
ReleaseAllDexFiles();
@@ -1372,9 +1411,35 @@
ext->SetOriginalDexFile(original_dex_file);
}
+// Restores the old obsolete methods maps if it turns out they weren't needed (ie there were no new
+// obsolete methods).
+void Redefiner::ClassRedefinition::RestoreObsoleteMethodMapsIfUnneeded(
+ const RedefinitionDataIter* cur_data) {
+ art::mirror::Class* klass = GetMirrorClass();
+ art::mirror::ClassExt* ext = klass->GetExtData();
+ art::mirror::PointerArray* methods = ext->GetObsoleteMethods();
+ int32_t old_length =
+ cur_data->GetOldDexCaches() == nullptr ? 0 : cur_data->GetOldDexCaches()->GetLength();
+ int32_t expected_length =
+ old_length + klass->NumDirectMethods() + klass->NumDeclaredVirtualMethods();
+ // Check to make sure we are only undoing this one.
+ if (expected_length == methods->GetLength()) {
+ for (int32_t i = old_length; i < expected_length; i++) {
+ if (methods->GetElementPtrSize<art::ArtMethod*>(i, art::kRuntimePointerSize) != nullptr) {
+ // We actually have some new obsolete methods. Just abort since we cannot safely shrink the
+ // obsolete methods array.
+ return;
+ }
+ }
+ // No new obsolete methods! We can get rid of the maps.
+ ext->SetObsoleteArrays(cur_data->GetOldObsoleteMethods(), cur_data->GetOldDexCaches());
+ }
+}
+
// This function does all (java) allocations we need to do for the Class being redefined.
// TODO Change this name maybe?
-bool Redefiner::ClassRedefinition::EnsureClassAllocationsFinished() {
+bool Redefiner::ClassRedefinition::EnsureClassAllocationsFinished(
+ /*out*/RedefinitionDataIter* cur_data) {
art::StackHandleScope<2> hs(driver_->self_);
art::Handle<art::mirror::Class> klass(hs.NewHandle(
driver_->self_->DecodeJObject(klass_)->AsClass()));
@@ -1391,22 +1456,20 @@
RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate ClassExt");
return false;
}
- // Allocate the 2 arrays that make up the obsolete methods map. Since the contents of the arrays
+ // First save the old values of the 2 arrays that make up the obsolete methods maps. Then
+ // allocate the 2 arrays that make up the obsolete methods map. Since the contents of the arrays
// are only modified when all threads (other than the modifying one) are suspended we don't need
// to worry about missing the unsyncronized writes to the array. We do synchronize when setting it
// however, since that can happen at any time.
- // TODO Clear these after we walk the stacks in order to free them in the (likely?) event there
- // are no obsolete methods.
- {
- art::ObjectLock<art::mirror::ClassExt> lock(driver_->self_, ext);
- if (!ext->ExtendObsoleteArrays(
- driver_->self_, klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size())) {
- // OOM. Clear exception and return error.
- driver_->self_->AssertPendingOOMException();
- driver_->self_->ClearException();
- RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate/extend obsolete methods map");
- return false;
- }
+ cur_data->SetOldObsoleteMethods(ext->GetObsoleteMethods());
+ cur_data->SetOldDexCaches(ext->GetObsoleteDexCaches());
+ if (!ext->ExtendObsoleteArrays(
+ driver_->self_, klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size())) {
+ // OOM. Clear exception and return error.
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
+ RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate/extend obsolete methods map");
+ return false;
}
return true;
}
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 809a681..586259a 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -166,7 +166,8 @@
// Preallocates all needed allocations in klass so that we can pause execution safely.
// TODO We should be able to free the arrays if they end up not being used. Investigate doing
// this in the future. For now we will just take the memory hit.
- bool EnsureClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool EnsureClassAllocationsFinished(/*out*/RedefinitionDataIter* data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
// This will check that no constraints are violated (more than 1 class in dex file, any changes
// in number/declaration of methods & fields, changes in access flags, etc.)
@@ -198,6 +199,9 @@
art::ObjPtr<art::mirror::Object> original_dex_file)
REQUIRES(art::Locks::mutator_lock_);
+ void RestoreObsoleteMethodMapsIfUnneeded(const RedefinitionDataIter* cur_data)
+ REQUIRES(art::Locks::mutator_lock_);
+
void ReleaseDexFile() REQUIRES_SHARED(art::Locks::mutator_lock_);
void UnregisterBreakpoints() REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -241,11 +245,16 @@
bool CheckAllRedefinitionAreValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool CheckAllClassesAreVerified(RedefinitionDataHolder& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
- bool EnsureAllClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool EnsureAllClassAllocationsFinished(RedefinitionDataHolder& holder)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
bool FinishAllRemainingAllocations(RedefinitionDataHolder& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
void ReleaseAllDexFiles() REQUIRES_SHARED(art::Locks::mutator_lock_);
void UnregisterAllBreakpoints() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ // Restores the old obsolete methods maps if it turns out they weren't needed (ie there were no
+ // new obsolete methods).
+ void RestoreObsoleteMethodMapsIfUnneeded(RedefinitionDataHolder& holder)
+ REQUIRES(art::Locks::mutator_lock_);
void RecordFailure(jvmtiError result, const std::string& class_sig, const std::string& error_msg);
void RecordFailure(jvmtiError result, const std::string& error_msg) {
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 674459d..0b7ea2f 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -32,6 +32,7 @@
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "gc/heap.h"
+#include "jit/profile_saver.h"
#include "os.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
@@ -154,8 +155,9 @@
}
void SignalCatcher::HandleSigUsr1() {
- LOG(INFO) << "SIGUSR1 forcing GC (no HPROF)";
+ LOG(INFO) << "SIGUSR1 forcing GC (no HPROF) and profile save";
Runtime::Current()->GetHeap()->CollectGarbage(false);
+ ProfileSaver::ForceProcessProfiles();
}
int SignalCatcher::WaitForSignal(Thread* self, SignalSet& signals) {
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 1727f88..cc1b78d 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -22,10 +22,13 @@
#endif
#include <dlfcn.h>
+#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
+#include <initializer_list>
#include <utility>
#include "sigchain.h"
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index f913cf6..87287f8 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -446,9 +446,9 @@
return false;
}
-extern "C" bool native_bridge_initNamespace(const char* public_ns_sonames ATTRIBUTE_UNUSED,
- const char* anon_ns_library_path ATTRIBUTE_UNUSED) {
- printf("Initializing namespaces in native bridge.\n");
+extern "C" bool native_bridge_initAnonymousNamespace(const char* public_ns_sonames ATTRIBUTE_UNUSED,
+ const char* anon_ns_library_path ATTRIBUTE_UNUSED) {
+ printf("Initializing anonymous namespace in native bridge.\n");
return false;
}
@@ -463,6 +463,13 @@
return nullptr;
}
+extern "C" bool native_bridge_linkNamespaces(android::native_bridge_namespace_t* from ATTRIBUTE_UNUSED,
+ android::native_bridge_namespace_t* to ATTRIBUTE_UNUSED,
+ const char* shared_libs_sonames ATTRIBUTE_UNUSED) {
+ printf("Linking namespaces in native bridge.\n");
+ return false;
+}
+
extern "C" void* native_bridge_loadLibraryExt(const char* libpath ATTRIBUTE_UNUSED,
int flag ATTRIBUTE_UNUSED,
android::native_bridge_namespace_t* ns ATTRIBUTE_UNUSED) {
@@ -487,7 +494,8 @@
.unloadLibrary = &native_bridge_unloadLibrary,
.getError = &native_bridge_getError,
.isPathSupported = &native_bridge_isPathSupported,
- .initNamespace = &native_bridge_initNamespace,
+ .initAnonymousNamespace = &native_bridge_initAnonymousNamespace,
.createNamespace = &native_bridge_createNamespace,
+ .linkNamespaces = &native_bridge_linkNamespaces,
.loadLibraryExt = &native_bridge_loadLibraryExt
};
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index eee90ab..182c07d 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -270,6 +270,16 @@
}
}
+ // If vectorized, invariant stride should be recognized
+ // as a reduction, not a unit stride in outer loop.
+ static void reduc(int[] xx, int[] yy) {
+ for (int i0 = 0; i0 < 2; i0++) {
+ for (int i1 = 0; i1 < 469; i1++) {
+ xx[i0] -= (++yy[i1]);
+ }
+ }
+ }
+
public static void main(String[] args) {
expectEquals(10, earlyExitFirst(-1));
for (int i = 0; i <= 10; i++) {
@@ -335,6 +345,15 @@
expectEquals(2.0f, a[i]);
}
+ int[] xx = new int[2];
+ int[] yy = new int[469];
+ reduc(xx, yy);
+ expectEquals(-469, xx[0]);
+ expectEquals(-938, xx[1]);
+ for (int i = 0; i < 469; i++) {
+ expectEquals(2, yy[i]);
+ }
+
System.out.println("passed");
}
diff --git a/test/912-classes/expected.txt b/test/912-classes/expected.txt
index 6b86ac9..0f2920a 100644
--- a/test/912-classes/expected.txt
+++ b/test/912-classes/expected.txt
@@ -92,3 +92,7 @@
Prepare: LA; on TestRunner (cur=TestRunner)
Load: LC; on TestRunner
Prepare: LC; on TestRunner (cur=TestRunner)
+Load: L$Proxy1; on main
+Prepare: L$Proxy1; on main (cur=main)
+Load: [LMain; on main
+Prepare: [LMain; on main (cur=main)
diff --git a/test/912-classes/src/Main.java b/test/912-classes/src/Main.java
index 5d25d76..643b0807 100644
--- a/test/912-classes/src/Main.java
+++ b/test/912-classes/src/Main.java
@@ -282,6 +282,10 @@
t.start();
t.join();
+ // Check creation of arrays and proxies.
+ Proxy.getProxyClass(Main.class.getClassLoader(), new Class[] { Comparable.class });
+ Class.forName("[LMain;");
+
enableClassLoadPreparePrintEvents(false);
// Note: the JIT part of this test is about the JIT pulling in a class not yet touched by