Merge "Hold a ClassLoader reference in NativeAllocationRegistry."
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 51fbaea..08670a0 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1305,4 +1305,18 @@
locations->AddTemp(Location::RequiresRegister());
}
+uint32_t CodeGenerator::GetReferenceSlowFlagOffset() const {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
+ DCHECK(klass->IsInitialized());
+ return klass->GetSlowPathFlagOffset().Uint32Value();
+}
+
+uint32_t CodeGenerator::GetReferenceDisableFlagOffset() const {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
+ DCHECK(klass->IsInitialized());
+ return klass->GetDisableIntrinsicFlagOffset().Uint32Value();
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 6e75e3b..82a54d2 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -469,6 +469,9 @@
virtual void GenerateNop() = 0;
+ uint32_t GetReferenceSlowFlagOffset() const;
+ uint32_t GetReferenceDisableFlagOffset() const;
+
protected:
// Method patch info used for recording locations of required linker patches and
// target methods. The target method can be used for various purposes, whether for
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 50892a9..bdbafcd 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4308,16 +4308,18 @@
// save one load. However, since this is just an intrinsic slow path we prefer this
// simple and more robust approach rather that trying to determine if that's the case.
SlowPathCode* slow_path = GetCurrentSlowPath();
- DCHECK(slow_path != nullptr); // For intrinsified invokes the call is emitted on the slow path.
- if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
- int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
- __ movl(temp, Address(ESP, stack_offset));
- return temp;
+ if (slow_path != nullptr) {
+ if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
+ int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
+ __ movl(temp, Address(ESP, stack_offset));
+ return temp;
+ }
}
return location.AsRegister<Register>();
}
-void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp) {
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
@@ -4366,6 +4368,11 @@
break;
}
}
+ return callee_method;
+}
+
+void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+ Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index fe7d3ed..98dc8ca 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -398,6 +398,7 @@
MethodReference target_method) OVERRIDE;
// Generate a call to a static or direct method.
+ Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
// Generate a call to a virtual method.
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 56c5b06..30eca2c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -762,10 +762,9 @@
}
}
-void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
+Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp) {
// All registers are assumed to be correctly set up.
-
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
@@ -815,6 +814,13 @@
break;
}
}
+ return callee_method;
+}
+
+void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp) {
+ // All registers are assumed to be correctly set up.
+ Location callee_method = GenerateCalleeMethodStaticOrDirectCall(invoke, temp);
switch (invoke->GetCodePtrLocation()) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index d9908bb..7cf1245 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -394,6 +394,7 @@
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
MethodReference target_method) OVERRIDE;
+ Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 568b8a8..2038c88 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -549,26 +549,19 @@
}
}
- if (IsPass(LICM::kLoopInvariantCodeMotionPassName)
- || IsPass(HDeadCodeElimination::kFinalDeadCodeEliminationPassName)
- || IsPass(HDeadCodeElimination::kInitialDeadCodeEliminationPassName)
- || IsPass(BoundsCheckElimination::kBoundsCheckEliminationPassName)
- || IsPass(RegisterAllocator::kRegisterAllocatorPassName)
- || IsPass(HGraphBuilder::kBuilderPassName)) {
- HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
- if (info == nullptr) {
- StartAttributeStream("loop") << "none";
+ HLoopInformation* loop_info = instruction->GetBlock()->GetLoopInformation();
+ if (loop_info == nullptr) {
+ StartAttributeStream("loop") << "none";
+ } else {
+ StartAttributeStream("loop") << "B" << loop_info->GetHeader()->GetBlockId();
+ HLoopInformation* outer = loop_info->GetPreHeader()->GetLoopInformation();
+ if (outer != nullptr) {
+ StartAttributeStream("outer_loop") << "B" << outer->GetHeader()->GetBlockId();
} else {
- StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId();
- HLoopInformation* outer = info->GetPreHeader()->GetLoopInformation();
- if (outer != nullptr) {
- StartAttributeStream("outer_loop") << "B" << outer->GetHeader()->GetBlockId();
- } else {
- StartAttributeStream("outer_loop") << "none";
- }
- StartAttributeStream("irreducible")
- << std::boolalpha << info->IsIrreducible() << std::noboolalpha;
+ StartAttributeStream("outer_loop") << "none";
}
+ StartAttributeStream("irreducible")
+ << std::boolalpha << loop_info->IsIrreducible() << std::noboolalpha;
}
if ((IsPass(HGraphBuilder::kBuilderPassName)
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index d0d52bf..1e86b75 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -454,11 +454,16 @@
if (!set->IsEmpty()) {
if (block->IsLoopHeader()) {
- if (block->GetLoopInformation()->IsIrreducible()) {
+ if (block->GetLoopInformation()->ContainsIrreducibleLoop()) {
// To satisfy our linear scan algorithm, no instruction should flow in an irreducible
- // loop header.
+ // loop header. We clear the set at entry of irreducible loops and any loop containing
+ // an irreducible loop, as in both cases, GVN can extend the liveness of an instruction
+ // across the irreducible loop.
+ // Note that, if we're not compiling OSR, we could still do GVN and introduce
+ // phis at irreducible loop headers. We decided it was not worth the complexity.
set->Clear();
} else {
+ DCHECK(!block->GetLoopInformation()->IsIrreducible());
DCHECK_EQ(block->GetDominator(), block->GetLoopInformation()->GetPreHeader());
set->Kill(side_effects_.GetLoopEffects(block));
}
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index d0edeca..5c47361 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2631,8 +2631,65 @@
GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
+void IntrinsicLocationsBuilderX86::VisitReferenceGetReferent(HInvoke* invoke) {
+ if (kEmitCompilerReadBarrier) {
+ // Do not intrinsify this call with the read barrier configuration.
+ return;
+ }
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorX86::VisitReferenceGetReferent(HInvoke* invoke) {
+ DCHECK(!kEmitCompilerReadBarrier);
+ LocationSummary* locations = invoke->GetLocations();
+ X86Assembler* assembler = GetAssembler();
+
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+
+ SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load ArtMethod first.
+ HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
+ DCHECK(invoke_direct != nullptr);
+ Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall(
+ invoke_direct, locations->GetTemp(0));
+ DCHECK(temp_loc.Equals(locations->GetTemp(0)));
+ Register temp = temp_loc.AsRegister<Register>();
+
+ // Now get declaring class.
+ __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value()));
+
+ uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
+ uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
+ DCHECK_NE(slow_path_flag_offset, 0u);
+ DCHECK_NE(disable_flag_offset, 0u);
+ DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
+
+ // Check static flags preventing us for using intrinsic.
+ if (slow_path_flag_offset == disable_flag_offset + 1) {
+ __ cmpw(Address(temp, disable_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ } else {
+ __ cmpb(Address(temp, disable_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+
+ // Fast path.
+ __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value()));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ __ Bind(slow_path->GetExitLabel());
+}
+
UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
-UNIMPLEMENTED_INTRINSIC(X86, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(X86, SystemArrayCopy)
UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 4ee2368..a65e54c 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2719,7 +2719,64 @@
GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)
+void IntrinsicLocationsBuilderX86_64::VisitReferenceGetReferent(HInvoke* invoke) {
+ if (kEmitCompilerReadBarrier) {
+ // Do not intrinsify this call with the read barrier configuration.
+ return;
+ }
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) {
+ DCHECK(!kEmitCompilerReadBarrier);
+ LocationSummary* locations = invoke->GetLocations();
+ X86_64Assembler* assembler = GetAssembler();
+
+ CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+
+ SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load ArtMethod first.
+ HInvokeStaticOrDirect* invoke_direct = invoke->AsInvokeStaticOrDirect();
+ DCHECK(invoke_direct != nullptr);
+ Location temp_loc = codegen_->GenerateCalleeMethodStaticOrDirectCall(
+ invoke_direct, locations->GetTemp(0));
+ DCHECK(temp_loc.Equals(locations->GetTemp(0)));
+ CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
+
+ // Now get declaring class.
+ __ movl(temp, Address(temp, ArtMethod::DeclaringClassOffset().Int32Value()));
+
+ uint32_t slow_path_flag_offset = codegen_->GetReferenceSlowFlagOffset();
+ uint32_t disable_flag_offset = codegen_->GetReferenceDisableFlagOffset();
+ DCHECK_NE(slow_path_flag_offset, 0u);
+ DCHECK_NE(disable_flag_offset, 0u);
+ DCHECK_NE(slow_path_flag_offset, disable_flag_offset);
+
+ // Check static flags preventing us for using intrinsic.
+ if (slow_path_flag_offset == disable_flag_offset + 1) {
+ __ cmpw(Address(temp, disable_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ } else {
+ __ cmpb(Address(temp, disable_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ cmpb(Address(temp, slow_path_flag_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+
+ // Fast path.
+ __ movl(out, Address(obj, mirror::Reference::ReferentOffset().Int32Value()));
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ __ Bind(slow_path->GetExitLabel());
+}
+
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index 5a0b89c..7543cd6 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -101,16 +101,6 @@
SideEffects loop_effects = side_effects_.GetLoopEffects(block);
HBasicBlock* pre_header = loop_info->GetPreHeader();
- bool contains_irreducible_loop = false;
- if (graph_->HasIrreducibleLoops()) {
- for (HBlocksInLoopIterator it_loop(*loop_info); !it_loop.Done(); it_loop.Advance()) {
- if (it_loop.Current()->GetLoopInformation()->IsIrreducible()) {
- contains_irreducible_loop = true;
- break;
- }
- }
- }
-
for (HBlocksInLoopIterator it_loop(*loop_info); !it_loop.Done(); it_loop.Advance()) {
HBasicBlock* inner = it_loop.Current();
DCHECK(inner->IsInLoop());
@@ -123,11 +113,12 @@
visited->SetBit(inner->GetBlockId());
}
- if (contains_irreducible_loop) {
+ if (loop_info->ContainsIrreducibleLoop()) {
// We cannot licm in an irreducible loop, or in a natural loop containing an
// irreducible loop.
continue;
}
+ DCHECK(!loop_info->IsIrreducible());
// We can move an instruction that can throw only if it is the first
// throwing instruction in the loop. Note that the first potentially
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 1e6bf07..60329cc 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -446,8 +446,10 @@
}
GraphAnalysisResult HGraph::AnalyzeLoops() const {
- // Order does not matter.
- for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
+ // We iterate post order to ensure we visit inner loops before outer loops.
+ // `PopulateRecursive` needs this guarantee to know whether a natural loop
+ // contains an irreducible loop.
+ for (HPostOrderIterator it(*this); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
if (block->IsLoopHeader()) {
if (block->IsCatchBlock()) {
@@ -580,6 +582,14 @@
blocks_.SetBit(block->GetBlockId());
block->SetInLoop(this);
+ if (block->IsLoopHeader()) {
+ // We're visiting loops in post-order, so inner loops must have been
+ // populated already.
+ DCHECK(block->GetLoopInformation()->IsPopulated());
+ if (block->GetLoopInformation()->IsIrreducible()) {
+ contains_irreducible_loop_ = true;
+ }
+ }
for (HBasicBlock* predecessor : block->GetPredecessors()) {
PopulateRecursive(predecessor);
}
@@ -683,6 +693,7 @@
}
if (is_irreducible_loop) {
irreducible_ = true;
+ contains_irreducible_loop_ = true;
graph->SetHasIrreducibleLoops(true);
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 934d355..12ea059 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -650,6 +650,7 @@
: header_(header),
suspend_check_(nullptr),
irreducible_(false),
+ contains_irreducible_loop_(false),
back_edges_(graph->GetArena()->Adapter(kArenaAllocLoopInfoBackEdges)),
// Make bit vector growable, as the number of blocks may change.
blocks_(graph->GetArena(), graph->GetBlocks().size(), true, kArenaAllocLoopInfoBackEdges) {
@@ -657,6 +658,7 @@
}
bool IsIrreducible() const { return irreducible_; }
+ bool ContainsIrreducibleLoop() const { return contains_irreducible_loop_; }
void Dump(std::ostream& os);
@@ -727,6 +729,10 @@
bool HasBackEdgeNotDominatedByHeader() const;
+ bool IsPopulated() const {
+ return blocks_.GetHighestBitSet() != -1;
+ }
+
private:
// Internal recursive implementation of `Populate`.
void PopulateRecursive(HBasicBlock* block);
@@ -735,6 +741,7 @@
HBasicBlock* header_;
HSuspendCheck* suspend_check_;
bool irreducible_;
+ bool contains_irreducible_loop_;
ArenaVector<HBasicBlock*> back_edges_;
ArenaBitVector blocks_;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index b1f9cbc..4405b80 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1773,7 +1773,9 @@
// therefore will not have a location for that instruction for `to`.
// Because the instruction is a constant or the ArtMethod, we don't need to
// do anything: it will be materialized in the irreducible loop.
- DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by));
+ DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by))
+ << defined_by->DebugName() << ":" << defined_by->GetId()
+ << " " << from->GetBlockId() << " -> " << to->GetBlockId();
return;
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 5534aea..36e0d99 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -309,17 +309,8 @@
}
if (block->IsLoopHeader()) {
- if (kIsDebugBuild && block->GetLoopInformation()->IsIrreducible()) {
- // To satisfy our liveness algorithm, we need to ensure loop headers of
- // irreducible loops do not have any live-in instructions, except constants
- // and the current method, which can be trivially re-materialized.
- for (uint32_t idx : live_in->Indexes()) {
- HInstruction* instruction = GetInstructionFromSsaIndex(idx);
- DCHECK(instruction->GetBlock()->IsEntryBlock()) << instruction->DebugName();
- DCHECK(!instruction->IsParameterValue());
- DCHECK(instruction->IsCurrentMethod() || instruction->IsConstant())
- << instruction->DebugName();
- }
+ if (kIsDebugBuild) {
+ CheckNoLiveInIrreducibleLoop(*block);
}
size_t last_position = block->GetLoopInformation()->GetLifetimeEnd();
// For all live_in instructions at the loop header, we need to create a range
@@ -344,6 +335,9 @@
// change in this loop), and the live_out set. If the live_out
// set does not change, there is no need to update the live_in set.
if (UpdateLiveOut(block) && UpdateLiveIn(block)) {
+ if (kIsDebugBuild) {
+ CheckNoLiveInIrreducibleLoop(block);
+ }
changed = true;
}
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 1141fd1..1fcba8b 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1260,6 +1260,23 @@
return instruction->GetType() == Primitive::kPrimNot;
}
+ void CheckNoLiveInIrreducibleLoop(const HBasicBlock& block) const {
+ if (!block.IsLoopHeader() || !block.GetLoopInformation()->IsIrreducible()) {
+ return;
+ }
+ BitVector* live_in = GetLiveInSet(block);
+ // To satisfy our liveness algorithm, we need to ensure loop headers of
+ // irreducible loops do not have any live-in instructions, except constants
+ // and the current method, which can be trivially re-materialized.
+ for (uint32_t idx : live_in->Indexes()) {
+ HInstruction* instruction = GetInstructionFromSsaIndex(idx);
+ DCHECK(instruction->GetBlock()->IsEntryBlock()) << instruction->DebugName();
+ DCHECK(!instruction->IsParameterValue());
+ DCHECK(instruction->IsCurrentMethod() || instruction->IsConstant())
+ << instruction->DebugName();
+ }
+ }
+
HGraph* const graph_;
CodeGenerator* const codegen_;
ArenaVector<BlockInfo*> block_infos_;
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 2203646..84cdb7d 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1030,6 +1030,14 @@
}
+void X86Assembler::cmpb(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x80);
+ EmitOperand(7, address);
+ EmitUint8(imm.value() & 0xFF);
+}
+
+
void X86Assembler::cmpw(const Address& address, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 8567ad2..bc46e9f 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -479,6 +479,7 @@
void xchgl(Register dst, Register src);
void xchgl(Register reg, const Address& address);
+ void cmpb(const Address& address, const Immediate& imm);
void cmpw(const Address& address, const Immediate& imm);
void cmpl(Register reg, const Immediate& imm);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 1d1df6e..28043c9 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -389,4 +389,10 @@
DriverStr(expected, "near_label");
}
+TEST_F(AssemblerX86Test, Cmpb) {
+ GetAssembler()->cmpb(x86::Address(x86::EDI, 128), x86::Immediate(0));
+ const char* expected = "cmpb $0, 128(%EDI)\n";
+ DriverStr(expected, "cmpb");
+}
+
} // namespace art
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 32eb4a3..5e7b587 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1224,6 +1224,16 @@
}
+void X86_64Assembler::cmpb(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_int32());
+ EmitOptionalRex32(address);
+ EmitUint8(0x80);
+ EmitOperand(7, address);
+ EmitUint8(imm.value() & 0xFF);
+}
+
+
void X86_64Assembler::cmpw(const Address& address, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK(imm.is_int32());
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 92c7d0a..720a402 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -506,6 +506,7 @@
void xchgq(CpuRegister dst, CpuRegister src);
void xchgl(CpuRegister reg, const Address& address);
+ void cmpb(const Address& address, const Immediate& imm);
void cmpw(const Address& address, const Immediate& imm);
void cmpl(CpuRegister reg, const Immediate& imm);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index afe9207..9dccc9f 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1637,4 +1637,11 @@
DriverStr(expected, "Repecmpsq");
}
+TEST_F(AssemblerX86_64Test, Cmpb) {
+ GetAssembler()->cmpb(x86_64::Address(x86_64::CpuRegister(x86_64::RDI), 128),
+ x86_64::Immediate(0));
+ const char* expected = "cmpb $0, 128(%RDI)\n";
+ DriverStr(expected, "cmpb");
+}
+
} // namespace art
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 06156f5..1790df6 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -253,14 +253,17 @@
Runtime* runtime = Runtime::Current();
// Call the invoke stub, passing everything as arguments.
// If the runtime is not yet started or it is required by the debugger, then perform the
- // Invocation by the interpreter.
+ // Invocation by the interpreter, explicitly forcing interpretation over JIT to prevent
+ // cycling around the various JIT/Interpreter methods that handle method invocation.
if (UNLIKELY(!runtime->IsStarted() || Dbg::IsForcedInterpreterNeededForCalling(self, this))) {
if (IsStatic()) {
- art::interpreter::EnterInterpreterFromInvoke(self, this, nullptr, args, result);
+ art::interpreter::EnterInterpreterFromInvoke(
+ self, this, nullptr, args, result, /*stay_in_interpreter*/ true);
} else {
mirror::Object* receiver =
reinterpret_cast<StackReference<mirror::Object>*>(&args[0])->AsMirrorPtr();
- art::interpreter::EnterInterpreterFromInvoke(self, this, receiver, args + 1, result);
+ art::interpreter::EnterInterpreterFromInvoke(
+ self, this, receiver, args + 1, result, /*stay_in_interpreter*/ true);
}
} else {
DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index bcb7b3b..0e3bc8e 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -85,6 +85,10 @@
return max_value_added_;
}
+ Value BucketWidth() const {
+ return bucket_width_;
+ }
+
const std::string& Name() const {
return name_;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 35c40cd..e9b8643 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -6970,6 +6970,7 @@
}
// Put some random garbage in old methods to help find stale pointers.
if (methods != old_methods && old_methods != nullptr) {
+ WriterMutexLock mu(self, ClassTableForClassLoader(klass->GetClassLoader())->GetLock());
memset(old_methods, 0xFEu, old_size);
}
} else {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index ece171c..d1c8172 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -1020,7 +1020,7 @@
// Returns null if not found.
ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Insert a new class table if not found.
ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader)
diff --git a/runtime/class_table.h b/runtime/class_table.h
index eb784b5..686381d 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -153,6 +153,10 @@
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ ReaderWriterMutex& GetLock() {
+ return lock_;
+ }
+
private:
// Lock to guard inserting and removing.
mutable ReaderWriterMutex lock_;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index df5aa0a..fa540c0 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -119,6 +119,8 @@
// Dump the rosalloc stats on SIGQUIT.
static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
+static constexpr size_t kNativeAllocationHistogramBuckets = 16;
+
static inline bool CareAboutPauseTimes() {
return Runtime::Current()->InJankPerceptibleProcessState();
}
@@ -186,6 +188,11 @@
total_objects_freed_ever_(0),
num_bytes_allocated_(0),
native_bytes_allocated_(0),
+ native_histogram_lock_("Native allocation lock"),
+ native_allocation_histogram_("Native allocation sizes",
+ 1U,
+ kNativeAllocationHistogramBuckets),
+ native_free_histogram_("Native free sizes", 1U, kNativeAllocationHistogramBuckets),
num_bytes_freed_revoke_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
@@ -1185,6 +1192,20 @@
rosalloc_space_->DumpStats(os);
}
+ {
+ MutexLock mu(Thread::Current(), native_histogram_lock_);
+ if (native_allocation_histogram_.SampleSize() > 0u) {
+ os << "Histogram of native allocation ";
+ native_allocation_histogram_.DumpBins(os);
+ os << " bucket size " << native_allocation_histogram_.BucketWidth() << "\n";
+ }
+ if (native_free_histogram_.SampleSize() > 0u) {
+ os << "Histogram of native free ";
+ native_free_histogram_.DumpBins(os);
+ os << " bucket size " << native_free_histogram_.BucketWidth() << "\n";
+ }
+ }
+
BaseMutex::DumpAll(os);
}
@@ -3848,6 +3869,10 @@
void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
Thread* self = ThreadForEnv(env);
+ {
+ MutexLock mu(self, native_histogram_lock_);
+ native_allocation_histogram_.AddValue(bytes);
+ }
if (native_need_to_run_finalization_) {
RunFinalization(env, kNativeAllocationFinalizeTimeout);
UpdateMaxNativeFootprint();
@@ -3892,6 +3917,10 @@
void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
size_t expected_size;
+ {
+ MutexLock mu(Thread::Current(), native_histogram_lock_);
+ native_free_histogram_.AddValue(bytes);
+ }
do {
expected_size = native_bytes_allocated_.LoadRelaxed();
if (UNLIKELY(bytes > expected_size)) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index fada1a2..2a1a4a1 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -241,9 +241,9 @@
SHARED_REQUIRES(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
void RegisterNativeFree(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
// Change the allocator, updates entrypoints.
void ChangeAllocator(AllocatorType allocator)
@@ -532,7 +532,7 @@
space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const
SHARED_REQUIRES(Locks::mutator_lock_);
- void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
+ void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
// Do a pending collector transition.
void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_);
@@ -654,7 +654,8 @@
std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
// GC performance measuring
- void DumpGcPerformanceInfo(std::ostream& os) REQUIRES(!*gc_complete_lock_);
+ void DumpGcPerformanceInfo(std::ostream& os)
+ REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
// Thread pool.
@@ -1156,6 +1157,11 @@
// Bytes which are allocated and managed by native code but still need to be accounted for.
Atomic<size_t> native_bytes_allocated_;
+ // Native allocation stats.
+ Mutex native_histogram_lock_;
+ Histogram<uint64_t> native_allocation_histogram_;
+ Histogram<uint64_t> native_free_histogram_;
+
// Number of bytes freed by thread local buffer revokes. This will
// cancel out the ahead-of-time bulk counting of bytes allocated in
// rosalloc thread-local buffers. It is temporarily accumulated
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 6c630cc..1d0e600 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -264,12 +264,12 @@
ShadowFrame& shadow_frame, JValue result_register);
#endif
-static JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame,
- JValue result_register)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
-static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register) {
+static inline JValue Execute(
+ Thread* self,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame,
+ JValue result_register,
+ bool stay_in_interpreter = false) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
if (LIKELY(shadow_frame.GetDexPC() == 0)) { // Entering the method, but not via deoptimization.
@@ -284,19 +284,21 @@
method, 0);
}
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- jit->MethodEntered(self, shadow_frame.GetMethod());
- if (jit->CanInvokeCompiledCode(method)) {
- JValue result;
+ if (!stay_in_interpreter) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit->MethodEntered(self, shadow_frame.GetMethod());
+ if (jit->CanInvokeCompiledCode(method)) {
+ JValue result;
- // Pop the shadow frame before calling into compiled code.
- self->PopShadowFrame();
- ArtInterpreterToCompiledCodeBridge(self, nullptr, code_item, &shadow_frame, &result);
- // Push the shadow frame back as the caller will expect it.
- self->PushShadowFrame(&shadow_frame);
+ // Pop the shadow frame before calling into compiled code.
+ self->PopShadowFrame();
+ ArtInterpreterToCompiledCodeBridge(self, nullptr, code_item, &shadow_frame, &result);
+ // Push the shadow frame back as the caller will expect it.
+ self->PushShadowFrame(&shadow_frame);
- return result;
+ return result;
+ }
}
}
}
@@ -387,7 +389,8 @@
}
void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receiver,
- uint32_t* args, JValue* result) {
+ uint32_t* args, JValue* result,
+ bool stay_in_interpreter) {
DCHECK_EQ(self, Thread::Current());
bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
@@ -462,7 +465,7 @@
}
}
if (LIKELY(!method->IsNative())) {
- JValue r = Execute(self, code_item, *shadow_frame, JValue());
+ JValue r = Execute(self, code_item, *shadow_frame, JValue(), stay_in_interpreter);
if (result != nullptr) {
*result = r;
}
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 6353a9b..bf4bcff 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -33,8 +33,11 @@
namespace interpreter {
// Called by ArtMethod::Invoke, shadow frames arguments are taken from the args array.
+// The optional stay_in_interpreter parameter (false by default) can be used by clients to
+// explicitly force interpretation in the remaining path that implements method invocation.
extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method,
- mirror::Object* receiver, uint32_t* args, JValue* result)
+ mirror::Object* receiver, uint32_t* args, JValue* result,
+ bool stay_in_interpreter = false)
SHARED_REQUIRES(Locks::mutator_lock_);
// 'from_code' denotes whether the deoptimization was explicitly triggered by compiled code.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 42182a2..0126b4d 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -539,6 +539,16 @@
return env->NewStringUTF(new_filter_str.c_str());
}
+static jboolean DexFile_isBackedByOatFile(JNIEnv* env, jclass, jobject cookie) {
+ const OatFile* oat_file = nullptr;
+ std::vector<const DexFile*> dex_files;
+ if (!ConvertJavaArrayToDexFiles(env, cookie, /*out */ dex_files, /* out */ oat_file)) {
+ DCHECK(env->ExceptionCheck());
+ return false;
+ }
+ return oat_file != nullptr;
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile, closeDexFile, "(Ljava/lang/Object;)Z"),
NATIVE_METHOD(DexFile,
@@ -564,6 +574,7 @@
NATIVE_METHOD(DexFile,
getNonProfileGuidedCompilerFilter,
"(Ljava/lang/String;)Ljava/lang/String;"),
+ NATIVE_METHOD(DexFile, isBackedByOatFile, "(Ljava/lang/Object;)Z"),
NATIVE_METHOD(DexFile, getDexFileStatus,
"(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;")
};
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index ddcaade..54b8afd 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -34,20 +34,38 @@
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
- return soa.AddLocalReference<jobject>(
- method->GetDexFile()->GetAnnotationForMethod(method, klass));
+ if (method->IsProxyMethod()) {
+ return nullptr;
+ } else {
+ Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return soa.AddLocalReference<jobject>(
+ method->GetDexFile()->GetAnnotationForMethod(method, klass));
+ }
}
static jobjectArray Constructor_getDeclaredAnnotations(JNIEnv* env, jobject javaMethod) {
ScopedFastNativeObjectAccess soa(env);
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetAnnotationsForMethod(method));
+ if (method->IsProxyMethod()) {
+ mirror::Class* class_class = mirror::Class::GetJavaLangClass();
+ mirror::Class* class_array_class =
+ Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
+ if (class_array_class == nullptr) {
+ return nullptr;
+ }
+ mirror::ObjectArray<mirror::Class>* empty_array =
+ mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0);
+ return soa.AddLocalReference<jobjectArray>(empty_array);
+ } else {
+ return soa.AddLocalReference<jobjectArray>(
+ method->GetDexFile()->GetAnnotationsForMethod(method));
+ }
}
static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
ScopedFastNativeObjectAccess soa(env);
- ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod)
+ ->GetInterfaceMethodIfProxy(sizeof(void*));
mirror::ObjectArray<mirror::Class>* result_array =
method->GetDexFile()->GetExceptionTypesForMethod(method);
if (result_array == nullptr) {
@@ -69,7 +87,12 @@
static jobjectArray Constructor_getParameterAnnotationsNative(JNIEnv* env, jobject javaMethod) {
ScopedFastNativeObjectAccess soa(env);
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetParameterAnnotations(method));
+ if (method->IsProxyMethod()) {
+ return nullptr;
+ } else {
+ return soa.AddLocalReference<jobjectArray>(
+ method->GetDexFile()->GetParameterAnnotations(method));
+ }
}
static jboolean Constructor_isAnnotationPresentNative(JNIEnv* env, jobject javaMethod,
@@ -77,6 +100,10 @@
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->IsProxyMethod()) {
+ // Proxies have no annotations.
+ return false;
+ }
Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
return method->GetDexFile()->IsMethodAnnotationPresent(method, klass);
}
diff --git a/test/044-proxy/expected.txt b/test/044-proxy/expected.txt
index be7023e..2a5f0b9 100644
--- a/test/044-proxy/expected.txt
+++ b/test/044-proxy/expected.txt
@@ -95,3 +95,5 @@
5.8
JNI_OnLoad called
callback
+Found constructor.
+Found constructors with 0 exceptions
diff --git a/test/044-proxy/src/ConstructorProxy.java b/test/044-proxy/src/ConstructorProxy.java
new file mode 100644
index 0000000..95d150c
--- /dev/null
+++ b/test/044-proxy/src/ConstructorProxy.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+
+/**
+ * Tests proxies when used with constructor methods.
+ */
+class ConstructorProxy implements InvocationHandler {
+ public static void main() {
+ try {
+ new ConstructorProxy().runTest();
+ } catch (Exception e) {
+ System.out.println("Unexpected failure occured");
+ e.printStackTrace();
+ }
+ }
+
+ public void runTest() throws Exception {
+ Class<?> proxyClass = Proxy.getProxyClass(
+ getClass().getClassLoader(),
+ new Class<?>[] { Runnable.class }
+ );
+ Constructor<?> constructor = proxyClass.getConstructor(InvocationHandler.class);
+ System.out.println("Found constructor.");
+ // We used to crash when asking the exception types of the constructor, because the runtime was
+ // not using the non-proxy ArtMethod
+ Object[] exceptions = constructor.getExceptionTypes();
+ System.out.println("Found constructors with " + exceptions.length + " exceptions");
+ }
+
+ @Override
+ public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
+ return args[0];
+ }
+}
+
diff --git a/test/044-proxy/src/Main.java b/test/044-proxy/src/Main.java
index 1f23b95..9dadb7c 100644
--- a/test/044-proxy/src/Main.java
+++ b/test/044-proxy/src/Main.java
@@ -31,6 +31,7 @@
NarrowingTest.main(null);
FloatSelect.main(null);
NativeProxy.main(args);
+ ConstructorProxy.main();
}
// The following code maps from the actual proxy class names (eg $Proxy2) to their test output
diff --git a/test/599-checker-irreducible-loop/expected.txt b/test/599-checker-irreducible-loop/expected.txt
new file mode 100644
index 0000000..573541a
--- /dev/null
+++ b/test/599-checker-irreducible-loop/expected.txt
@@ -0,0 +1 @@
+0
diff --git a/test/599-checker-irreducible-loop/info.txt b/test/599-checker-irreducible-loop/info.txt
new file mode 100644
index 0000000..1e0dd02
--- /dev/null
+++ b/test/599-checker-irreducible-loop/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing in the presence of
+an irreducible loop.
diff --git a/test/599-checker-irreducible-loop/smali/IrreducibleLoop.smali b/test/599-checker-irreducible-loop/smali/IrreducibleLoop.smali
new file mode 100644
index 0000000..5331fd6
--- /dev/null
+++ b/test/599-checker-irreducible-loop/smali/IrreducibleLoop.smali
@@ -0,0 +1,56 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LIrreducibleLoop;
+
+.super Ljava/lang/Object;
+
+## CHECK-START: int IrreducibleLoop.test(int) GVN (before)
+## CHECK-DAG: LoadClass loop:none
+## CHECK-DAG: LoadClass loop:{{B\d+}} outer_loop:none
+
+## CHECK-START: int IrreducibleLoop.test(int) GVN (after)
+## CHECK-DAG: LoadClass loop:none
+## CHECK-DAG: LoadClass loop:{{B\d+}} outer_loop:none
+.method public static test(I)I
+ .registers 2
+
+ sget v0, LIrreducibleLoop;->field1:I
+ sput v0, LIrreducibleLoop;->field2:I
+
+ if-eqz p0, :loop_entry
+ goto :exit
+
+ :loop_entry
+ if-eqz p0, :irreducible_loop_entry
+ sget v0, LIrreducibleLoop;->field2:I
+ sput v0, LIrreducibleLoop;->field1:I
+ if-eqz v0, :exit
+ goto :irreducible_other_loop_entry
+
+ :irreducible_loop_entry
+ if-eqz p0, :loop_back_edge
+ :irreducible_other_loop_entry
+ if-eqz v0, :loop_back_edge
+ goto :irreducible_loop_entry
+
+ :loop_back_edge
+ goto :loop_entry
+
+ :exit
+ return v0
+.end method
+
+.field public static field1:I
+.field public static field2:I
diff --git a/test/599-checker-irreducible-loop/src/Main.java b/test/599-checker-irreducible-loop/src/Main.java
new file mode 100644
index 0000000..b47721f
--- /dev/null
+++ b/test/599-checker-irreducible-loop/src/Main.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String[] args) throws Exception {
+ Class<?> c = Class.forName("IrreducibleLoop");
+ Method m = c.getMethod("test", int.class);
+ Object[] arguments = { 42 };
+ // Invoke the code just for sanity checking.
+ System.out.println(m.invoke(null, arguments));
+ }
+}
diff --git a/test/run-test b/test/run-test
index 047e3fb..2710ea3 100755
--- a/test/run-test
+++ b/test/run-test
@@ -122,10 +122,12 @@
have_dex2oat="yes"
have_patchoat="yes"
have_image="yes"
-image_suffix=""
pic_image_suffix=""
multi_image_suffix=""
android_root="/system"
+# By default we will use optimizing.
+image_args=""
+image_suffix="-optimizing"
while true; do
if [ "x$1" = "x--host" ]; then
@@ -249,18 +251,18 @@
image_suffix="-interpreter"
shift
elif [ "x$1" = "x--jit" ]; then
- run_args="${run_args} --jit"
+ image_args="--jit"
image_suffix="-jit"
shift
elif [ "x$1" = "x--optimizing" ]; then
- run_args="${run_args} -Xcompiler-option --compiler-backend=Optimizing"
+ image_args="-Xcompiler-option --compiler-backend=Optimizing"
image_suffix="-optimizing"
shift
elif [ "x$1" = "x--no-verify" ]; then
run_args="${run_args} --no-verify"
shift
elif [ "x$1" = "x--verify-soft-fail" ]; then
- run_args="${run_args} --verify-soft-fail"
+ image_args="--verify-soft-fail"
image_suffix="-interp-ac"
shift
elif [ "x$1" = "x--no-optimize" ]; then
@@ -349,6 +351,7 @@
fi
done
+run_args="${run_args} ${image_args}"
# Allocate file descriptor real_stderr and redirect it to the shell's error
# output (fd 2).
if [ ${BASH_VERSINFO[1]} -ge 4 ] && [ ${BASH_VERSINFO[2]} -ge 1 ]; then
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index dd2cc31..f25fb98 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -253,11 +253,5 @@
names: ["jsr166.CollectionTest#testEmptyMeansEmpty",
"jsr166.Collection8Test#testForEach",
"jsr166.Collection8Test#testForEachConcurrentStressTest"]
-},
-{
- description: "Unclear why this started to fail",
- result: EXEC_FAILED,
- bug: 28574453,
- names: [ "org.apache.harmony.tests.javax.security.cert.X509CertificateTest#testVerifyPublicKey" ]
}
]