Merge "Create a typedef for HInstruction::GetInputs() return type."
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 5f27e14..1fc247f 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -847,7 +847,7 @@
}
// Try index range obtained by induction variable analysis.
// Disables dynamic bce if OOB is certain.
- if (InductionRangeFitsIn(&array_range, bounds_check, index, &try_dynamic_bce)) {
+ if (InductionRangeFitsIn(&array_range, bounds_check, &try_dynamic_bce)) {
ReplaceInstruction(bounds_check, index);
return;
}
@@ -1299,33 +1299,30 @@
* parameter try_dynamic_bce is set to false if OOB is certain.
*/
bool InductionRangeFitsIn(ValueRange* array_range,
- HInstruction* context,
- HInstruction* index,
+ HBoundsCheck* context,
bool* try_dynamic_bce) {
InductionVarRange::Value v1;
InductionVarRange::Value v2;
bool needs_finite_test = false;
- if (induction_range_.GetInductionRange(context, index, &v1, &v2, &needs_finite_test)) {
- do {
- if (v1.is_known && (v1.a_constant == 0 || v1.a_constant == 1) &&
- v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) {
- DCHECK(v1.a_constant == 1 || v1.instruction == nullptr);
- DCHECK(v2.a_constant == 1 || v2.instruction == nullptr);
- ValueRange index_range(GetGraph()->GetArena(),
- ValueBound(v1.instruction, v1.b_constant),
- ValueBound(v2.instruction, v2.b_constant));
- // If analysis reveals a certain OOB, disable dynamic BCE.
- if (index_range.GetLower().LessThan(array_range->GetLower()) ||
- index_range.GetUpper().GreaterThan(array_range->GetUpper())) {
- *try_dynamic_bce = false;
- return false;
- }
- // Use analysis for static bce only if loop is finite.
- if (!needs_finite_test && index_range.FitsIn(array_range)) {
- return true;
- }
+ HInstruction* index = context->InputAt(0);
+ HInstruction* hint = ValueBound::HuntForDeclaration(context->InputAt(1));
+ if (induction_range_.GetInductionRange(context, index, hint, &v1, &v2, &needs_finite_test)) {
+ if (v1.is_known && (v1.a_constant == 0 || v1.a_constant == 1) &&
+ v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) {
+ DCHECK(v1.a_constant == 1 || v1.instruction == nullptr);
+ DCHECK(v2.a_constant == 1 || v2.instruction == nullptr);
+ ValueRange index_range(GetGraph()->GetArena(),
+ ValueBound(v1.instruction, v1.b_constant),
+ ValueBound(v2.instruction, v2.b_constant));
+ // If analysis reveals a certain OOB, disable dynamic BCE. Otherwise,
+ // use analysis for static bce only if loop is finite.
+ if (index_range.GetLower().LessThan(array_range->GetLower()) ||
+ index_range.GetUpper().GreaterThan(array_range->GetUpper())) {
+ *try_dynamic_bce = false;
+ } else if (!needs_finite_test && index_range.FitsIn(array_range)) {
+ return true;
}
- } while (induction_range_.RefineOuter(&v1, &v2));
+ }
}
return false;
}
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index f1965f0..7c74816 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -132,7 +132,7 @@
InductionInfo* a,
InductionInfo* b,
Primitive::Type type) {
- DCHECK(a != nullptr);
+ DCHECK(a != nullptr && b != nullptr);
return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr, type);
}
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index bc920d9..5e587e0 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -73,9 +73,12 @@
return v;
}
-/**
- * Corrects a value for type to account for arithmetic wrap-around in lower precision.
- */
+/** Helper method to test for a constant value. */
+static bool IsConstantValue(InductionVarRange::Value v) {
+ return v.is_known && v.a_constant == 0;
+}
+
+/** Corrects a value for type to account for arithmetic wrap-around in lower precision. */
static InductionVarRange::Value CorrectForType(InductionVarRange::Value v, Primitive::Type type) {
switch (type) {
case Primitive::kPrimShort:
@@ -85,26 +88,15 @@
// TODO: maybe some room for improvement, like allowing widening conversions
const int32_t min = Primitive::MinValueOfIntegralType(type);
const int32_t max = Primitive::MaxValueOfIntegralType(type);
- return (v.is_known && v.a_constant == 0 && min <= v.b_constant && v.b_constant <= max)
+ return (IsConstantValue(v) && min <= v.b_constant && v.b_constant <= max)
? v
: InductionVarRange::Value();
}
default:
- // At int or higher.
return v;
}
}
-/** Helper method to test for a constant value. */
-static bool IsConstantValue(InductionVarRange::Value v) {
- return v.is_known && v.a_constant == 0;
-}
-
-/** Helper method to test for same constant value. */
-static bool IsSameConstantValue(InductionVarRange::Value v1, InductionVarRange::Value v2) {
- return IsConstantValue(v1) && IsConstantValue(v2) && v1.b_constant == v2.b_constant;
-}
-
/** Helper method to insert an instruction. */
static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) {
DCHECK(block != nullptr);
@@ -119,22 +111,22 @@
//
InductionVarRange::InductionVarRange(HInductionVarAnalysis* induction_analysis)
- : induction_analysis_(induction_analysis) {
+ : induction_analysis_(induction_analysis),
+ chase_hint_(nullptr) {
DCHECK(induction_analysis != nullptr);
}
bool InductionVarRange::GetInductionRange(HInstruction* context,
HInstruction* instruction,
+ HInstruction* chase_hint,
/*out*/Value* min_val,
/*out*/Value* max_val,
/*out*/bool* needs_finite_test) {
- HLoopInformation* loop = context->GetBlock()->GetLoopInformation(); // closest enveloping loop
- if (loop == nullptr) {
- return false; // no loop
- }
- HInductionVarAnalysis::InductionInfo* info = induction_analysis_->LookupInfo(loop, instruction);
- if (info == nullptr) {
- return false; // no induction information
+ HLoopInformation* loop = nullptr;
+ HInductionVarAnalysis::InductionInfo* info = nullptr;
+ HInductionVarAnalysis::InductionInfo* trip = nullptr;
+ if (!HasInductionInfo(context, instruction, &loop, &info, &trip)) {
+ return false;
}
// Type int or lower (this is not too restrictive since intended clients, like
// bounds check elimination, will have truncated higher precision induction
@@ -148,45 +140,15 @@
default:
return false;
}
- // Set up loop information.
- HBasicBlock* header = loop->GetHeader();
- bool in_body = context->GetBlock() != header;
- HInductionVarAnalysis::InductionInfo* trip =
- induction_analysis_->LookupInfo(loop, header->GetLastInstruction());
// Find range.
+ chase_hint_ = chase_hint;
+ bool in_body = context->GetBlock() != loop->GetHeader();
*min_val = GetVal(info, trip, in_body, /* is_min */ true);
*max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false));
*needs_finite_test = NeedsTripCount(info) && IsUnsafeTripCount(trip);
return true;
}
-bool InductionVarRange::RefineOuter(/*in-out*/ Value* min_val,
- /*in-out*/ Value* max_val) const {
- if (min_val->instruction != nullptr || max_val->instruction != nullptr) {
- Value v1_min = RefineOuter(*min_val, /* is_min */ true);
- Value v2_max = RefineOuter(*max_val, /* is_min */ false);
- // The refined range is safe if both sides refine the same instruction. Otherwise, since two
- // different ranges are combined, the new refined range is safe to pass back to the client if
- // the extremes of the computed ranges ensure no arithmetic wrap-around anomalies occur.
- if (min_val->instruction != max_val->instruction) {
- Value v1_max = RefineOuter(*min_val, /* is_min */ false);
- Value v2_min = RefineOuter(*max_val, /* is_min */ true);
- if (!IsConstantValue(v1_max) ||
- !IsConstantValue(v2_min) ||
- v1_max.b_constant > v2_min.b_constant) {
- return false;
- }
- }
- // Did something change?
- if (v1_min.instruction != min_val->instruction || v2_max.instruction != max_val->instruction) {
- *min_val = v1_min;
- *max_val = v2_max;
- return true;
- }
- }
- return false;
-}
-
bool InductionVarRange::CanGenerateCode(HInstruction* context,
HInstruction* instruction,
/*out*/bool* needs_finite_test,
@@ -226,7 +188,7 @@
bool InductionVarRange::IsConstant(HInductionVarAnalysis::InductionInfo* info,
ConstantRequest request,
- /*out*/ int64_t *value) const {
+ /*out*/ int64_t* value) const {
if (info != nullptr) {
// A direct 32-bit or 64-bit constant fetch. This immediately satisfies
// any of the three requests (kExact, kAtMost, and KAtLeast).
@@ -236,27 +198,16 @@
return true;
}
}
- // Try range analysis while traversing outward on loops.
- bool in_body = true; // no known trip count
- Value v_min = GetVal(info, nullptr, in_body, /* is_min */ true);
- Value v_max = GetVal(info, nullptr, in_body, /* is_min */ false);
- do {
- // Make sure *both* extremes are known to avoid arithmetic wrap-around anomalies.
- if (IsConstantValue(v_min) && IsConstantValue(v_max) && v_min.b_constant <= v_max.b_constant) {
- if ((request == kExact && v_min.b_constant == v_max.b_constant) || request == kAtMost) {
- *value = v_max.b_constant;
- return true;
- } else if (request == kAtLeast) {
- *value = v_min.b_constant;
- return true;
- }
- }
- } while (RefineOuter(&v_min, &v_max));
- // Exploit array length + c >= c, with c <= 0 to avoid arithmetic wrap-around anomalies
- // (e.g. array length == maxint and c == 1 would yield minint).
- if (request == kAtLeast) {
- if (v_min.a_constant == 1 && v_min.b_constant <= 0 && v_min.instruction->IsArrayLength()) {
- *value = v_min.b_constant;
+ // Try range analysis on the invariant, but only on proper range to avoid wrap-around anomalies.
+ Value min_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ true);
+ Value max_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ false);
+ if (IsConstantValue(min_val) &&
+ IsConstantValue(max_val) && min_val.b_constant <= max_val.b_constant) {
+ if ((request == kExact && min_val.b_constant == max_val.b_constant) || request == kAtMost) {
+ *value = max_val.b_constant;
+ return true;
+ } else if (request == kAtLeast) {
+ *value = min_val.b_constant;
return true;
}
}
@@ -264,6 +215,51 @@
return false;
}
+bool InductionVarRange::HasInductionInfo(
+ HInstruction* context,
+ HInstruction* instruction,
+ /*out*/ HLoopInformation** loop,
+ /*out*/ HInductionVarAnalysis::InductionInfo** info,
+ /*out*/ HInductionVarAnalysis::InductionInfo** trip) const {
+ HLoopInformation* l = context->GetBlock()->GetLoopInformation(); // closest enveloping loop
+ if (l != nullptr) {
+ HInductionVarAnalysis::InductionInfo* i = induction_analysis_->LookupInfo(l, instruction);
+ if (i != nullptr) {
+ *loop = l;
+ *info = i;
+ *trip = induction_analysis_->LookupInfo(l, l->GetHeader()->GetLastInstruction());
+ return true;
+ }
+ }
+ return false;
+}
+
+bool InductionVarRange::IsWellBehavedTripCount(HInductionVarAnalysis::InductionInfo* trip) const {
+ if (trip != nullptr) {
+ // Both bounds that define a trip-count are well-behaved if they either are not defined
+ // in any loop, or are contained in a proper interval. This allows finding the min/max
+ // of an expression by chasing outward.
+ InductionVarRange range(induction_analysis_);
+ HInductionVarAnalysis::InductionInfo* lower = trip->op_b->op_a;
+ HInductionVarAnalysis::InductionInfo* upper = trip->op_b->op_b;
+ int64_t not_used = 0;
+ return (!HasFetchInLoop(lower) || range.IsConstant(lower, kAtLeast, ¬_used)) &&
+ (!HasFetchInLoop(upper) || range.IsConstant(upper, kAtLeast, ¬_used));
+ }
+ return true;
+}
+
+bool InductionVarRange::HasFetchInLoop(HInductionVarAnalysis::InductionInfo* info) const {
+ if (info != nullptr) {
+ if (info->induction_class == HInductionVarAnalysis::kInvariant &&
+ info->operation == HInductionVarAnalysis::kFetch) {
+ return info->fetch->GetBlock()->GetLoopInformation() != nullptr;
+ }
+ return HasFetchInLoop(info->op_a) || HasFetchInLoop(info->op_b);
+ }
+ return false;
+}
+
bool InductionVarRange::NeedsTripCount(HInductionVarAnalysis::InductionInfo* info) const {
if (info != nullptr) {
if (info->induction_class == HInductionVarAnalysis::kLinear) {
@@ -299,13 +295,13 @@
HInductionVarAnalysis::InductionInfo* trip,
bool in_body,
bool is_min) const {
- // Detect common situation where an offset inside the trip count cancels out during range
+ // Detect common situation where an offset inside the trip-count cancels out during range
// analysis (finding max a * (TC - 1) + OFFSET for a == 1 and TC = UPPER - OFFSET or finding
// min a * (TC - 1) + OFFSET for a == -1 and TC = OFFSET - UPPER) to avoid losing information
// with intermediate results that only incorporate single instructions.
if (trip != nullptr) {
HInductionVarAnalysis::InductionInfo* trip_expr = trip->op_a;
- if (trip_expr->operation == HInductionVarAnalysis::kSub) {
+ if (trip_expr->type == info->type && trip_expr->operation == HInductionVarAnalysis::kSub) {
int64_t stride_value = 0;
if (IsConstant(info->op_a, kExact, &stride_value)) {
if (!is_min && stride_value == 1) {
@@ -349,12 +345,25 @@
HInductionVarAnalysis::InductionInfo* trip,
bool in_body,
bool is_min) const {
- // Detect constants and chase the fetch a bit deeper into the HIR tree, so that it becomes
- // more likely range analysis will compare the same instructions as terminal nodes.
+ // Stop chasing the instruction at constant or hint.
int64_t value;
- if (IsIntAndGet(instruction, &value) && CanLongValueFitIntoInt(value)) {
+ if (IsIntAndGet(instruction, &value) && CanLongValueFitIntoInt(value)) {
return Value(static_cast<int32_t>(value));
- } else if (instruction->IsAdd()) {
+ } else if (instruction == chase_hint_) {
+ return Value(instruction, 1, 0);
+ }
+ // Special cases when encountering a single instruction that denotes trip count in the
+ // loop-body: min is 1 and, when chasing constants, max of safe trip-count is max int
+ if (in_body && trip != nullptr && instruction == trip->op_a->fetch) {
+ if (is_min) {
+ return Value(1);
+ } else if (chase_hint_ == nullptr && !IsUnsafeTripCount(trip)) {
+ return Value(std::numeric_limits<int32_t>::max());
+ }
+ }
+ // Chase the instruction a bit deeper into the HIR tree, so that it becomes more likely
+ // range analysis will compare the same instructions as terminal nodes.
+ if (instruction->IsAdd()) {
if (IsIntAndGet(instruction->InputAt(0), &value) && CanLongValueFitIntoInt(value)) {
return AddValue(Value(static_cast<int32_t>(value)),
GetFetch(instruction->InputAt(1), trip, in_body, is_min));
@@ -362,19 +371,35 @@
return AddValue(GetFetch(instruction->InputAt(0), trip, in_body, is_min),
Value(static_cast<int32_t>(value)));
}
- } else if (instruction->IsArrayLength() && instruction->InputAt(0)->IsNewArray()) {
- return GetFetch(instruction->InputAt(0)->InputAt(0), trip, in_body, is_min);
+ } else if (instruction->IsArrayLength()) {
+ // Return extreme values when chasing constants. Otherwise, chase deeper.
+ if (chase_hint_ == nullptr) {
+ return is_min ? Value(0) : Value(std::numeric_limits<int32_t>::max());
+ } else if (instruction->InputAt(0)->IsNewArray()) {
+ return GetFetch(instruction->InputAt(0)->InputAt(0), trip, in_body, is_min);
+ }
} else if (instruction->IsTypeConversion()) {
// Since analysis is 32-bit (or narrower) we allow a widening along the path.
if (instruction->AsTypeConversion()->GetInputType() == Primitive::kPrimInt &&
instruction->AsTypeConversion()->GetResultType() == Primitive::kPrimLong) {
return GetFetch(instruction->InputAt(0), trip, in_body, is_min);
}
- } else if (is_min) {
- // Special case for finding minimum: minimum of trip-count in loop-body is 1.
- if (trip != nullptr && in_body && instruction == trip->op_a->fetch) {
- return Value(1);
- }
+ }
+ // Chase an invariant fetch that is defined by an outer loop if the trip-count used
+ // so far is well-behaved in both bounds and the next trip-count is safe.
+ // Example:
+ // for (int i = 0; i <= 100; i++) // safe
+ // for (int j = 0; j <= i; j++) // well-behaved
+ // j is in range [0, i ] (if i is chase hint)
+ // or in range [0, 100] (otherwise)
+ HLoopInformation* next_loop = nullptr;
+ HInductionVarAnalysis::InductionInfo* next_info = nullptr;
+ HInductionVarAnalysis::InductionInfo* next_trip = nullptr;
+ bool next_in_body = true; // inner loop is always in body of outer loop
+ if (HasInductionInfo(instruction, instruction, &next_loop, &next_info, &next_trip) &&
+ IsWellBehavedTripCount(trip) &&
+ !IsUnsafeTripCount(next_trip)) {
+ return GetVal(next_info, next_trip, next_in_body, is_min);
}
return Value(instruction, 1, 0);
}
@@ -421,9 +446,8 @@
break;
}
break;
- case HInductionVarAnalysis::kLinear: {
+ case HInductionVarAnalysis::kLinear:
return CorrectForType(GetLinear(info, trip, in_body, is_min), info->type);
- }
case HInductionVarAnalysis::kWrapAround:
case HInductionVarAnalysis::kPeriodic:
return MergeVal(GetVal(info->op_a, trip, in_body, is_min),
@@ -438,20 +462,18 @@
HInductionVarAnalysis::InductionInfo* trip,
bool in_body,
bool is_min) const {
+ // Constant times range.
+ int64_t value = 0;
+ if (IsConstant(info1, kExact, &value)) {
+ return MulRangeAndConstant(value, info2, trip, in_body, is_min);
+ } else if (IsConstant(info2, kExact, &value)) {
+ return MulRangeAndConstant(value, info1, trip, in_body, is_min);
+ }
+ // Interval ranges.
Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
- // Try to refine first operand.
- if (!IsConstantValue(v1_min) && !IsConstantValue(v1_max)) {
- RefineOuter(&v1_min, &v1_max);
- }
- // Constant times range.
- if (IsSameConstantValue(v1_min, v1_max)) {
- return MulRangeAndConstant(v2_min, v2_max, v1_min, is_min);
- } else if (IsSameConstantValue(v2_min, v2_max)) {
- return MulRangeAndConstant(v1_min, v1_max, v2_min, is_min);
- }
// Positive range vs. positive or negative range.
if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
@@ -476,14 +498,16 @@
HInductionVarAnalysis::InductionInfo* trip,
bool in_body,
bool is_min) const {
+ // Range divided by constant.
+ int64_t value = 0;
+ if (IsConstant(info2, kExact, &value)) {
+ return DivRangeAndConstant(value, info1, trip, in_body, is_min);
+ }
+ // Interval ranges.
Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
- // Range divided by constant.
- if (IsSameConstantValue(v2_min, v2_max)) {
- return DivRangeAndConstant(v1_min, v1_max, v2_min, is_min);
- }
// Positive range vs. positive or negative range.
if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
@@ -503,18 +527,30 @@
return Value();
}
-InductionVarRange::Value InductionVarRange::MulRangeAndConstant(Value v_min,
- Value v_max,
- Value c,
- bool is_min) const {
- return is_min == (c.b_constant >= 0) ? MulValue(v_min, c) : MulValue(v_max, c);
+InductionVarRange::Value InductionVarRange::MulRangeAndConstant(
+ int64_t value,
+ HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip,
+ bool in_body,
+ bool is_min) const {
+ if (CanLongValueFitIntoInt(value)) {
+ Value c(static_cast<int32_t>(value));
+ return MulValue(GetVal(info, trip, in_body, is_min == value >= 0), c);
+ }
+ return Value();
}
-InductionVarRange::Value InductionVarRange::DivRangeAndConstant(Value v_min,
- Value v_max,
- Value c,
- bool is_min) const {
- return is_min == (c.b_constant >= 0) ? DivValue(v_min, c) : DivValue(v_max, c);
+InductionVarRange::Value InductionVarRange::DivRangeAndConstant(
+ int64_t value,
+ HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip,
+ bool in_body,
+ bool is_min) const {
+ if (CanLongValueFitIntoInt(value)) {
+ Value c(static_cast<int32_t>(value));
+ return DivValue(GetVal(info, trip, in_body, is_min == value >= 0), c);
+ }
+ return Value();
}
InductionVarRange::Value InductionVarRange::AddValue(Value v1, Value v2) const {
@@ -580,28 +616,6 @@
return Value();
}
-InductionVarRange::Value InductionVarRange::RefineOuter(Value v, bool is_min) const {
- if (v.instruction == nullptr) {
- return v; // nothing to refine
- }
- HLoopInformation* loop =
- v.instruction->GetBlock()->GetLoopInformation(); // closest enveloping loop
- if (loop == nullptr) {
- return v; // no loop
- }
- HInductionVarAnalysis::InductionInfo* info = induction_analysis_->LookupInfo(loop, v.instruction);
- if (info == nullptr) {
- return v; // no induction information
- }
- // Set up loop information.
- HBasicBlock* header = loop->GetHeader();
- bool in_body = true; // inner always in more outer
- HInductionVarAnalysis::InductionInfo* trip =
- induction_analysis_->LookupInfo(loop, header->GetLastInstruction());
- // Try to refine "a x instruction + b" with outer loop range information on instruction.
- return AddValue(MulValue(Value(v.a_constant), GetVal(info, trip, in_body, is_min)), Value(v.b_constant));
-}
-
bool InductionVarRange::GenerateCode(HInstruction* context,
HInstruction* instruction,
HGraph* graph,
@@ -611,27 +625,18 @@
/*out*/HInstruction** taken_test,
/*out*/bool* needs_finite_test,
/*out*/bool* needs_taken_test) const {
- HLoopInformation* loop = context->GetBlock()->GetLoopInformation(); // closest enveloping loop
- if (loop == nullptr) {
- return false; // no loop
- }
- HInductionVarAnalysis::InductionInfo* info = induction_analysis_->LookupInfo(loop, instruction);
- if (info == nullptr) {
- return false; // no induction information
- }
- // Set up loop information.
- HBasicBlock* header = loop->GetHeader();
- bool in_body = context->GetBlock() != header;
- HInductionVarAnalysis::InductionInfo* trip =
- induction_analysis_->LookupInfo(loop, header->GetLastInstruction());
- if (trip == nullptr) {
- return false; // codegen relies on trip count
+ HLoopInformation* loop = nullptr;
+ HInductionVarAnalysis::InductionInfo* info = nullptr;
+ HInductionVarAnalysis::InductionInfo* trip = nullptr;
+ if (!HasInductionInfo(context, instruction, &loop, &info, &trip) || trip == nullptr) {
+ return false; // codegen needs all information, including tripcount
}
// Determine what tests are needed. A finite test is needed if the evaluation code uses the
// trip-count and the loop maybe unsafe (because in such cases, the index could "overshoot"
// the computed range). A taken test is needed for any unknown trip-count, even if evaluation
// code does not use the trip-count explicitly (since there could be an implicit relation
// between e.g. an invariant subscript and a not-taken condition).
+ bool in_body = context->GetBlock() != loop->GetHeader();
*needs_finite_test = NeedsTripCount(info) && IsUnsafeTripCount(trip);
*needs_taken_test = IsBodyTripCount(trip);
// Code generation for taken test: generate the code when requested or otherwise analyze
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 0af4156..00aaa16 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -57,21 +57,19 @@
explicit InductionVarRange(HInductionVarAnalysis* induction);
/**
- * Given a context denoted by the first instruction, returns a possibly conservative
- * lower and upper bound on the instruction's value in the output parameters min_val
- * and max_val, respectively. The need_finite_test flag denotes if an additional finite-test
- * is needed to protect the range evaluation inside its loop. Returns false on failure.
+ * Given a context denoted by the first instruction, returns a possibly conservative lower
+ * and upper bound on the instruction's value in the output parameters min_val and max_val,
+ * respectively. The need_finite_test flag denotes if an additional finite-test is needed
+ * to protect the range evaluation inside its loop. The parameter chase_hint defines an
+ * instruction at which chasing may stop. Returns false on failure.
*/
bool GetInductionRange(HInstruction* context,
HInstruction* instruction,
+ HInstruction* chase_hint,
/*out*/ Value* min_val,
/*out*/ Value* max_val,
/*out*/ bool* needs_finite_test);
- /** Refines the values with induction of next outer loop. Returns true on change. */
- bool RefineOuter(/*in-out*/ Value* min_val,
- /*in-out*/ Value* max_val) const;
-
/**
* Returns true if range analysis is able to generate code for the lower and upper
* bound expressions on the instruction in the given context. The need_finite_test
@@ -132,11 +130,20 @@
*/
bool IsConstant(HInductionVarAnalysis::InductionInfo* info,
ConstantRequest request,
- /*out*/ int64_t *value) const;
+ /*out*/ int64_t* value) const;
+ /** Returns whether induction information can be obtained. */
+ bool HasInductionInfo(HInstruction* context,
+ HInstruction* instruction,
+ /*out*/ HLoopInformation** loop,
+ /*out*/ HInductionVarAnalysis::InductionInfo** info,
+ /*out*/ HInductionVarAnalysis::InductionInfo** trip) const;
+
+ bool HasFetchInLoop(HInductionVarAnalysis::InductionInfo* info) const;
bool NeedsTripCount(HInductionVarAnalysis::InductionInfo* info) const;
bool IsBodyTripCount(HInductionVarAnalysis::InductionInfo* trip) const;
bool IsUnsafeTripCount(HInductionVarAnalysis::InductionInfo* trip) const;
+ bool IsWellBehavedTripCount(HInductionVarAnalysis::InductionInfo* trip) const;
Value GetLinear(HInductionVarAnalysis::InductionInfo* info,
HInductionVarAnalysis::InductionInfo* trip,
@@ -161,8 +168,16 @@
bool in_body,
bool is_min) const;
- Value MulRangeAndConstant(Value v1, Value v2, Value c, bool is_min) const;
- Value DivRangeAndConstant(Value v1, Value v2, Value c, bool is_min) const;
+ Value MulRangeAndConstant(int64_t value,
+ HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip,
+ bool in_body,
+ bool is_min) const;
+ Value DivRangeAndConstant(int64_t value,
+ HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip,
+ bool in_body,
+ bool is_min) const;
Value AddValue(Value v1, Value v2) const;
Value SubValue(Value v1, Value v2) const;
@@ -171,12 +186,6 @@
Value MergeVal(Value v1, Value v2, bool is_min) const;
/**
- * Returns refined value using induction of next outer loop or the input value if no
- * further refinement is possible.
- */
- Value RefineOuter(Value val, bool is_min) const;
-
- /**
* Generates code for lower/upper/taken-test in the HIR. Returns true on success.
* With values nullptr, the method can be used to determine if code generation
* would be successful without generating actual code yet.
@@ -200,7 +209,10 @@
bool is_min) const;
/** Results of prior induction variable analysis. */
- HInductionVarAnalysis *induction_analysis_;
+ HInductionVarAnalysis* induction_analysis_;
+
+ /** Instruction at which chasing may stop. */
+ HInstruction* chase_hint_;
friend class HInductionVarAnalysis;
friend class InductionVarRangeTest;
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index dc04dc2..4ea170f 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -66,6 +66,8 @@
entry_block_->AddInstruction(x_);
y_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimInt);
entry_block_->AddInstruction(y_);
+ // Set arbitrary range analysis hint while testing private methods.
+ SetHint(x_);
}
/** Constructs loop with given upper bound. */
@@ -111,6 +113,11 @@
iva_->Run();
}
+ /** Sets hint. */
+ void SetHint(HInstruction* hint) {
+ range_.chase_hint_ = hint;
+ }
+
/** Constructs an invariant. */
HInductionVarAnalysis::InductionInfo* CreateInvariant(char opc,
HInductionVarAnalysis::InductionInfo* a,
@@ -122,6 +129,7 @@
case 'n': op = HInductionVarAnalysis::kNeg; break;
case '*': op = HInductionVarAnalysis::kMul; break;
case '/': op = HInductionVarAnalysis::kDiv; break;
+ case '<': op = HInductionVarAnalysis::kLT; break;
default: op = HInductionVarAnalysis::kNop; break;
}
return iva_->CreateInvariantOp(op, a, b);
@@ -137,22 +145,21 @@
return CreateFetch(graph_->GetIntConstant(c));
}
- /** Constructs a trip-count. */
+ /** Constructs a constant trip-count. */
HInductionVarAnalysis::InductionInfo* CreateTripCount(int32_t tc, bool in_loop, bool safe) {
- Primitive::Type type = Primitive::kPrimInt;
+ HInductionVarAnalysis::InductionOp op = HInductionVarAnalysis::kTripCountInBodyUnsafe;
if (in_loop && safe) {
- return iva_->CreateTripCount(
- HInductionVarAnalysis::kTripCountInLoop, CreateConst(tc), nullptr, type);
+ op = HInductionVarAnalysis::kTripCountInLoop;
} else if (in_loop) {
- return iva_->CreateTripCount(
- HInductionVarAnalysis::kTripCountInLoopUnsafe, CreateConst(tc), nullptr, type);
+ op = HInductionVarAnalysis::kTripCountInLoopUnsafe;
} else if (safe) {
- return iva_->CreateTripCount(
- HInductionVarAnalysis::kTripCountInBody, CreateConst(tc), nullptr, type);
- } else {
- return iva_->CreateTripCount(
- HInductionVarAnalysis::kTripCountInBodyUnsafe, CreateConst(tc), nullptr, type);
+ op = HInductionVarAnalysis::kTripCountInBody;
}
+ // Return TC with taken-test 0 < TC.
+ return iva_->CreateTripCount(op,
+ CreateConst(tc),
+ CreateInvariant('<', CreateConst(0), CreateConst(tc)),
+ Primitive::kPrimInt);
}
/** Constructs a linear a * i + b induction. */
@@ -197,13 +204,13 @@
}
Value GetMin(HInductionVarAnalysis::InductionInfo* info,
- HInductionVarAnalysis::InductionInfo* induc) {
- return range_.GetVal(info, induc, /* in_body */ true, /* is_min */ true);
+ HInductionVarAnalysis::InductionInfo* trip) {
+ return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ true);
}
Value GetMax(HInductionVarAnalysis::InductionInfo* info,
- HInductionVarAnalysis::InductionInfo* induc) {
- return range_.GetVal(info, induc, /* in_body */ true, /* is_min */ false);
+ HInductionVarAnalysis::InductionInfo* trip) {
+ return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ false);
}
Value GetMul(HInductionVarAnalysis::InductionInfo* info1,
@@ -558,6 +565,31 @@
ExpectEqual(Value(), MaxValue(Value(55), Value(y_, 1, -50)));
}
+TEST_F(InductionVarRangeTest, ArrayLengthAndHints) {
+ HInstruction* new_array = new (&allocator_)
+ HNewArray(x_,
+ graph_->GetCurrentMethod(),
+ 0, Primitive::kPrimInt,
+ graph_->GetDexFile(),
+ kQuickAllocArray);
+ entry_block_->AddInstruction(new_array);
+ HInstruction* array_length = new (&allocator_) HArrayLength(new_array, 0);
+ entry_block_->AddInstruction(array_length);
+ // With null hint: yields extreme constants.
+ const int32_t max_value = std::numeric_limits<int32_t>::max();
+ SetHint(nullptr);
+ ExpectEqual(Value(0), GetMin(CreateFetch(array_length), nullptr));
+ ExpectEqual(Value(max_value), GetMax(CreateFetch(array_length), nullptr));
+ // With explicit hint: yields the length instruction.
+ SetHint(array_length);
+ ExpectEqual(Value(array_length, 1, 0), GetMin(CreateFetch(array_length), nullptr));
+ ExpectEqual(Value(array_length, 1, 0), GetMax(CreateFetch(array_length), nullptr));
+ // With any non-null hint: chases beyond the length instruction.
+ SetHint(x_);
+ ExpectEqual(Value(x_, 1, 0), GetMin(CreateFetch(array_length), nullptr));
+ ExpectEqual(Value(x_, 1, 0), GetMax(CreateFetch(array_length), nullptr));
+}
+
//
// Tests on public methods.
//
@@ -570,23 +602,20 @@
bool needs_finite_test = true;
// In context of header: known.
- range_.GetInductionRange(condition_, condition_->InputAt(0), &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(condition_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(0), v1);
ExpectEqual(Value(1000), v2);
- EXPECT_FALSE(range_.RefineOuter(&v1, &v2));
// In context of loop-body: known.
- range_.GetInductionRange(increment_, condition_->InputAt(0), &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(increment_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(0), v1);
ExpectEqual(Value(999), v2);
- EXPECT_FALSE(range_.RefineOuter(&v1, &v2));
- range_.GetInductionRange(increment_, increment_, &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(increment_, increment_, x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(1), v1);
ExpectEqual(Value(1000), v2);
- EXPECT_FALSE(range_.RefineOuter(&v1, &v2));
}
TEST_F(InductionVarRangeTest, ConstantTripCountDown) {
@@ -597,23 +626,20 @@
bool needs_finite_test = true;
// In context of header: known.
- range_.GetInductionRange(condition_, condition_->InputAt(0), &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(condition_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(0), v1);
ExpectEqual(Value(1000), v2);
- EXPECT_FALSE(range_.RefineOuter(&v1, &v2));
// In context of loop-body: known.
- range_.GetInductionRange(increment_, condition_->InputAt(0), &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(increment_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(1), v1);
ExpectEqual(Value(1000), v2);
- EXPECT_FALSE(range_.RefineOuter(&v1, &v2));
- range_.GetInductionRange(increment_, increment_, &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(increment_, increment_, x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(0), v1);
ExpectEqual(Value(999), v2);
- EXPECT_FALSE(range_.RefineOuter(&v1, &v2));
}
TEST_F(InductionVarRangeTest, SymbolicTripCountUp) {
@@ -625,23 +651,20 @@
bool needs_taken_test = true;
// In context of header: upper unknown.
- range_.GetInductionRange(condition_, condition_->InputAt(0), &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(condition_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(0), v1);
ExpectEqual(Value(), v2);
- EXPECT_FALSE(range_.RefineOuter(&v1, &v2));
// In context of loop-body: known.
- range_.GetInductionRange(increment_, condition_->InputAt(0), &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(increment_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(0), v1);
ExpectEqual(Value(x_, 1, -1), v2);
- EXPECT_FALSE(range_.RefineOuter(&v1, &v2));
- range_.GetInductionRange(increment_, increment_, &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(increment_, increment_, x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(1), v1);
ExpectEqual(Value(x_, 1, 0), v2);
- EXPECT_FALSE(range_.RefineOuter(&v1, &v2));
HInstruction* lower = nullptr;
HInstruction* upper = nullptr;
@@ -695,23 +718,20 @@
bool needs_taken_test = true;
// In context of header: lower unknown.
- range_.GetInductionRange(condition_, condition_->InputAt(0), &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(condition_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(), v1);
ExpectEqual(Value(1000), v2);
- EXPECT_FALSE(range_.RefineOuter(&v1, &v2));
// In context of loop-body: known.
- range_.GetInductionRange(increment_, condition_->InputAt(0), &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(increment_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(x_, 1, 1), v1);
ExpectEqual(Value(1000), v2);
- EXPECT_FALSE(range_.RefineOuter(&v1, &v2));
- range_.GetInductionRange(increment_, increment_, &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(increment_, increment_, x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(x_, 1, 0), v1);
ExpectEqual(Value(999), v2);
- EXPECT_FALSE(range_.RefineOuter(&v1, &v2));
HInstruction* lower = nullptr;
HInstruction* upper = nullptr;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d98dd06..abc8d57 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3167,7 +3167,7 @@
}
private:
- template <typename T> bool Compute(T x, T y) const { return x == y; }
+ template <typename T> static bool Compute(T x, T y) { return x == y; }
DISALLOW_COPY_AND_ASSIGN(HEqual);
};
@@ -3210,7 +3210,7 @@
}
private:
- template <typename T> bool Compute(T x, T y) const { return x != y; }
+ template <typename T> static bool Compute(T x, T y) { return x != y; }
DISALLOW_COPY_AND_ASSIGN(HNotEqual);
};
@@ -3247,7 +3247,7 @@
}
private:
- template <typename T> bool Compute(T x, T y) const { return x < y; }
+ template <typename T> static bool Compute(T x, T y) { return x < y; }
DISALLOW_COPY_AND_ASSIGN(HLessThan);
};
@@ -3284,7 +3284,7 @@
}
private:
- template <typename T> bool Compute(T x, T y) const { return x <= y; }
+ template <typename T> static bool Compute(T x, T y) { return x <= y; }
DISALLOW_COPY_AND_ASSIGN(HLessThanOrEqual);
};
@@ -3321,7 +3321,7 @@
}
private:
- template <typename T> bool Compute(T x, T y) const { return x > y; }
+ template <typename T> static bool Compute(T x, T y) { return x > y; }
DISALLOW_COPY_AND_ASSIGN(HGreaterThan);
};
@@ -3358,7 +3358,7 @@
}
private:
- template <typename T> bool Compute(T x, T y) const { return x >= y; }
+ template <typename T> static bool Compute(T x, T y) { return x >= y; }
DISALLOW_COPY_AND_ASSIGN(HGreaterThanOrEqual);
};
@@ -3396,7 +3396,7 @@
}
private:
- template <typename T> bool Compute(T x, T y) const {
+ template <typename T> static bool Compute(T x, T y) {
return MakeUnsigned(x) < MakeUnsigned(y);
}
@@ -3436,7 +3436,7 @@
}
private:
- template <typename T> bool Compute(T x, T y) const {
+ template <typename T> static bool Compute(T x, T y) {
return MakeUnsigned(x) <= MakeUnsigned(y);
}
@@ -3476,7 +3476,7 @@
}
private:
- template <typename T> bool Compute(T x, T y) const {
+ template <typename T> static bool Compute(T x, T y) {
return MakeUnsigned(x) > MakeUnsigned(y);
}
@@ -3516,7 +3516,7 @@
}
private:
- template <typename T> bool Compute(T x, T y) const {
+ template <typename T> static bool Compute(T x, T y) {
return MakeUnsigned(x) >= MakeUnsigned(y);
}
@@ -4189,7 +4189,7 @@
DCHECK_EQ(result_type, Primitive::PrimitiveKind(input->GetType()));
}
- template <typename T> T Compute(T x) const { return -x; }
+ template <typename T> static T Compute(T x) { return -x; }
HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
@@ -4259,7 +4259,7 @@
bool IsCommutative() const OVERRIDE { return true; }
- template <typename T> T Compute(T x, T y) const { return x + y; }
+ template <typename T> static T Compute(T x, T y) { return x + y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
@@ -4292,7 +4292,7 @@
uint32_t dex_pc = kNoDexPc)
: HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
- template <typename T> T Compute(T x, T y) const { return x - y; }
+ template <typename T> static T Compute(T x, T y) { return x - y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
@@ -4327,7 +4327,7 @@
bool IsCommutative() const OVERRIDE { return true; }
- template <typename T> T Compute(T x, T y) const { return x * y; }
+ template <typename T> static T Compute(T x, T y) { return x * y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
@@ -4493,7 +4493,7 @@
}
template <typename T>
- T Compute(T value, int32_t distance, int32_t max_shift_distance) const {
+ static T Compute(T value, int32_t distance, int32_t max_shift_distance) {
return value << (distance & max_shift_distance);
}
@@ -4539,7 +4539,7 @@
}
template <typename T>
- T Compute(T value, int32_t distance, int32_t max_shift_distance) const {
+ static T Compute(T value, int32_t distance, int32_t max_shift_distance) {
return value >> (distance & max_shift_distance);
}
@@ -4585,7 +4585,7 @@
}
template <typename T>
- T Compute(T value, int32_t distance, int32_t max_shift_distance) const {
+ static T Compute(T value, int32_t distance, int32_t max_shift_distance) {
typedef typename std::make_unsigned<T>::type V;
V ux = static_cast<V>(value);
return static_cast<T>(ux >> (distance & max_shift_distance));
@@ -4631,7 +4631,7 @@
bool IsCommutative() const OVERRIDE { return true; }
- template <typename T> T Compute(T x, T y) const { return x & y; }
+ template <typename T> static T Compute(T x, T y) { return x & y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
@@ -4668,7 +4668,7 @@
bool IsCommutative() const OVERRIDE { return true; }
- template <typename T> T Compute(T x, T y) const { return x | y; }
+ template <typename T> static T Compute(T x, T y) { return x | y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
@@ -4705,7 +4705,7 @@
bool IsCommutative() const OVERRIDE { return true; }
- template <typename T> T Compute(T x, T y) const { return x ^ y; }
+ template <typename T> static T Compute(T x, T y) { return x ^ y; }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
@@ -4741,7 +4741,7 @@
}
template <typename T>
- T Compute(T value, int32_t distance, int32_t max_shift_value) const {
+ static T Compute(T value, int32_t distance, int32_t max_shift_value) {
typedef typename std::make_unsigned<T>::type V;
V ux = static_cast<V>(value);
if ((distance & max_shift_value) == 0) {
@@ -4837,7 +4837,7 @@
return true;
}
- template <typename T> T Compute(T x) const { return ~x; }
+ template <typename T> static T Compute(T x) { return ~x; }
HConstant* Evaluate(HIntConstant* x) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
@@ -4870,7 +4870,7 @@
return true;
}
- template <typename T> bool Compute(T x) const {
+ template <typename T> static bool Compute(T x) {
DCHECK(IsUint<1>(x)) << x;
return !x;
}
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 6188883..93351e9 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -154,33 +154,44 @@
CHECK(android_root != nullptr);
argv.push_back("--android-root=" + std::string(android_root));
- std::string command_line(Join(argv, ' '));
+ int link[2];
- // We need to fix up the '&' being used for "do not check classpath."
- size_t ampersand = command_line.find(" &");
- CHECK_NE(ampersand, std::string::npos);
- command_line = command_line.replace(ampersand, 2, " \\&");
+ if (pipe(link) == -1) {
+ return false;
+ }
- command_line += " 2>&1";
+ pid_t pid = fork();
+ if (pid == -1) {
+ return false;
+ }
- // We need dex2oat to actually log things.
- setenv("ANDROID_LOG_TAGS", "*:d", 1);
-
- FILE* pipe = popen(command_line.c_str(), "r");
-
- setenv("ANDROID_LOG_TAGS", "*:e", 1);
-
- if (pipe == nullptr) {
- success_ = false;
- } else {
- char buffer[128];
-
- while (fgets(buffer, 128, pipe) != nullptr) {
- output_ += buffer;
+ if (pid == 0) {
+ // We need dex2oat to actually log things.
+ setenv("ANDROID_LOG_TAGS", "*:d", 1);
+ dup2(link[1], STDERR_FILENO);
+ close(link[0]);
+ close(link[1]);
+ std::vector<const char*> c_args;
+ for (const std::string& str : argv) {
+ c_args.push_back(str.c_str());
}
+ c_args.push_back(nullptr);
+ execv(c_args[0], const_cast<char* const*>(c_args.data()));
+ exit(1);
+ } else {
+ close(link[1]);
+ char buffer[128];
+ memset(buffer, 0, 128);
+ ssize_t bytes_read = 0;
- int result = pclose(pipe);
- success_ = result == 0;
+ while (TEMP_FAILURE_RETRY(bytes_read = read(link[0], buffer, 128)) > 0) {
+ output_ += std::string(buffer, bytes_read);
+ }
+ close(link[0]);
+ int status = 0;
+ if (waitpid(pid, &status, 0) != -1) {
+ success_ = (status == 0);
+ }
}
return success_;
}
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index 754284c..7969a8f 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -44,7 +44,7 @@
uintptr_t* out_return_pc, uintptr_t* out_sp) {
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
- *out_sp = static_cast<uintptr_t>(sc->sc_regs[29]); // SP register
+ *out_sp = static_cast<uintptr_t>(sc->sc_regs[mips::SP]);
VLOG(signals) << "sp: " << *out_sp;
if (*out_sp == 0) {
return;
@@ -56,7 +56,7 @@
uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kMips));
if (overflow_addr == fault_addr) {
- *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[4]); // A0 register
+ *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips::A0]);
} else {
// The method is at the top of the stack.
*out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
@@ -82,12 +82,12 @@
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
- sc->sc_regs[31] = sc->sc_pc + 4; // RA needs to point to gc map location
+ sc->sc_regs[mips::RA] = sc->sc_pc + 4; // RA needs to point to gc map location
sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal);
- sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function
+ sc->sc_regs[mips::T9] = sc->sc_pc; // make sure T9 points to the function
// Pass the faulting address as the first argument of
// art_quick_throw_null_pointer_exception_from_signal.
- sc->sc_regs[0] = reinterpret_cast<uintptr_t>(info->si_addr);
+ sc->sc_regs[mips::A0] = reinterpret_cast<uintptr_t>(info->si_addr);
VLOG(signals) << "Generating null pointer exception";
return true;
}
@@ -116,7 +116,7 @@
VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
VLOG(signals) << "sigcontext: " << std::hex << sc;
- uintptr_t sp = sc->sc_regs[29]; // SP register
+ uintptr_t sp = sc->sc_regs[mips::SP];
VLOG(signals) << "sp: " << std::hex << sp;
uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr); // BVA addr
@@ -139,7 +139,7 @@
// caused this fault. This will be inserted into a callee save frame by
// the function to which this handler returns (art_quick_throw_stack_overflow).
sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
- sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function
+ sc->sc_regs[mips::T9] = sc->sc_pc; // make sure T9 points to the function
// The kernel will now return to the address in sc->arm_pc.
return true;
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index c9a32ad..0bbb6e1 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -44,7 +44,7 @@
uintptr_t* out_return_pc, uintptr_t* out_sp) {
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
- *out_sp = static_cast<uintptr_t>(sc->sc_regs[29]); // SP register
+ *out_sp = static_cast<uintptr_t>(sc->sc_regs[mips64::SP]);
VLOG(signals) << "sp: " << *out_sp;
if (*out_sp == 0) {
return;
@@ -56,7 +56,7 @@
uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kMips64));
if (overflow_addr == fault_addr) {
- *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[4]); // A0 register
+ *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips64::A0]);
} else {
// The method is at the top of the stack.
*out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
@@ -83,12 +83,12 @@
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
- sc->sc_regs[31] = sc->sc_pc + 4; // RA needs to point to gc map location
+ sc->sc_regs[mips64::RA] = sc->sc_pc + 4; // RA needs to point to gc map location
sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal);
- sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function
+ sc->sc_regs[mips64::T9] = sc->sc_pc; // make sure T9 points to the function
// Pass the faulting address as the first argument of
// art_quick_throw_null_pointer_exception_from_signal.
- sc->sc_regs[0] = reinterpret_cast<uintptr_t>(info->si_addr);
+ sc->sc_regs[mips64::A0] = reinterpret_cast<uintptr_t>(info->si_addr);
VLOG(signals) << "Generating null pointer exception";
return true;
}
@@ -117,7 +117,7 @@
VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
VLOG(signals) << "sigcontext: " << std::hex << sc;
- uintptr_t sp = sc->sc_regs[29]; // SP register
+ uintptr_t sp = sc->sc_regs[mips64::SP];
VLOG(signals) << "sp: " << std::hex << sp;
uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr); // BVA addr
@@ -140,7 +140,7 @@
// caused this fault. This will be inserted into a callee save frame by
// the function to which this handler returns (art_quick_throw_stack_overflow).
sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
- sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function
+ sc->sc_regs[mips64::T9] = sc->sc_pc; // make sure T9 points to the function
// The kernel will now return to the address in sc->arm_pc.
return true;
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index fc62573..1b55d2f 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -448,23 +448,10 @@
: ClassLinker::kNoICCECheckForCache;
resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
}
+ // Resolution and access check.
if (UNLIKELY(resolved_method == nullptr)) {
DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
return nullptr; // Failure.
- } else if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
- if (UNLIKELY(resolved_method->GetDeclaringClass()->IsStringClass() &&
- resolved_method->IsConstructor())) {
- // Hack for String init:
- //
- // We assume that the input of String.<init> in verified code is always
- // an unitialized reference. If it is a null constant, it must have been
- // optimized out by the compiler. Do not throw NullPointerException.
- } else {
- // Maintain interpreter-like semantics where NullPointerException is thrown
- // after potential NoSuchMethodError from class linker.
- ThrowNullPointerExceptionForMethodAccess(method_idx, type);
- return nullptr; // Failure.
- }
} else if (access_check) {
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
bool can_access_resolved_method =
@@ -482,6 +469,22 @@
return nullptr; // Failure.
}
}
+ // Next, null pointer check.
+ if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
+ if (UNLIKELY(resolved_method->GetDeclaringClass()->IsStringClass() &&
+ resolved_method->IsConstructor())) {
+ // Hack for String init:
+ //
+ // We assume that the input of String.<init> in verified code is always
+ // an unitialized reference. If it is a null constant, it must have been
+ // optimized out by the compiler. Do not throw NullPointerException.
+ } else {
+ // Maintain interpreter-like semantics where NullPointerException is thrown
+ // after potential NoSuchMethodError from class linker.
+ ThrowNullPointerExceptionForMethodAccess(method_idx, type);
+ return nullptr; // Failure.
+ }
+ }
switch (type) {
case kStatic:
case kDirect:
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 64fa434..3011112 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -28,7 +28,7 @@
namespace gc {
namespace collector {
-inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegionOrImmuneSpace(
+inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) {
// For the Baker-style RB, in a rare case, we could incorrectly change the object from white
// to gray even though the object has already been marked through. This happens if a mutator
@@ -69,6 +69,37 @@
return ref;
}
+template<bool kGrayImmuneObject>
+inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) {
+ if (kUseBakerReadBarrier) {
+ // The GC-running thread doesn't (need to) gray immune objects except when updating thread roots
+ // in the thread flip on behalf of suspended threads (when gc_grays_immune_objects_ is
+ // true). Also, a mutator doesn't (need to) gray an immune object after GC has updated all
+ // immune space objects (when updated_all_immune_objects_ is true).
+ if (kIsDebugBuild) {
+ if (Thread::Current() == thread_running_gc_) {
+ DCHECK(!kGrayImmuneObject ||
+ updated_all_immune_objects_.LoadRelaxed() ||
+ gc_grays_immune_objects_);
+ } else {
+ DCHECK(kGrayImmuneObject);
+ }
+ }
+ if (!kGrayImmuneObject || updated_all_immune_objects_.LoadRelaxed()) {
+ return ref;
+ }
+ // This may or may not succeed, which is ok because the object may already be gray.
+ bool success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
+ ReadBarrier::GrayPtr());
+ if (success) {
+ MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
+ immune_gray_stack_.push_back(ref);
+ }
+ }
+ return ref;
+}
+
+template<bool kGrayImmuneObject>
inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
if (from_ref == nullptr) {
return nullptr;
@@ -109,10 +140,14 @@
return to_ref;
}
case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: {
- return MarkUnevacFromSpaceRegionOrImmuneSpace(from_ref, region_space_bitmap_);
+ return MarkUnevacFromSpaceRegion(from_ref, region_space_bitmap_);
}
case space::RegionSpace::RegionType::kRegionTypeNone:
- return MarkNonMoving(from_ref);
+ if (immune_spaces_.ContainsObject(from_ref)) {
+ return MarkImmuneSpace<kGrayImmuneObject>(from_ref);
+ } else {
+ return MarkNonMoving(from_ref);
+ }
default:
UNREACHABLE();
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index dd75006..b7b5aa0 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -50,14 +50,16 @@
mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
thread_running_gc_(nullptr),
is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
+ region_space_bitmap_(nullptr),
heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
weak_ref_access_enabled_(true),
skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
rb_table_(heap_->GetReadBarrierTable()),
- force_evacuate_all_(false) {
+ force_evacuate_all_(false),
+ immune_gray_stack_lock_("concurrent copying immune gray stack lock",
+ kMarkSweepMarkStackLock) {
static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
"The region space size and the read barrier table region size must match");
- cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -139,19 +141,10 @@
space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
CHECK(space->IsZygoteSpace() || space->IsImageSpace());
immune_spaces_.AddSpace(space);
- const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
- "cc zygote space bitmap";
- // TODO: try avoiding using bitmaps for image/zygote to save space.
- accounting::ContinuousSpaceBitmap* bitmap =
- accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
- cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
- cc_bitmaps_.push_back(bitmap);
} else if (space == region_space_) {
accounting::ContinuousSpaceBitmap* bitmap =
accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
space->Begin(), space->Capacity());
- cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
- cc_bitmaps_.push_back(bitmap);
region_space_bitmap_ = bitmap;
}
}
@@ -179,6 +172,15 @@
} else {
force_evacuate_all_ = false;
}
+ if (kUseBakerReadBarrier) {
+ updated_all_immune_objects_.StoreRelaxed(false);
+ // GC may gray immune objects in the thread flip.
+ gc_grays_immune_objects_ = true;
+ if (kIsDebugBuild) {
+ MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
+ DCHECK(immune_gray_stack_.empty());
+ }
+ }
BindBitmaps();
if (kVerboseMode) {
LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
@@ -303,30 +305,6 @@
live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
}
-// Used to visit objects in the immune spaces.
-class ConcurrentCopying::ImmuneSpaceObjVisitor {
- public:
- explicit ImmuneSpaceObjVisitor(ConcurrentCopying* cc) : collector_(cc) {}
-
- void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
- DCHECK(obj != nullptr);
- DCHECK(collector_->immune_spaces_.ContainsObject(obj));
- accounting::ContinuousSpaceBitmap* cc_bitmap =
- collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
- DCHECK(cc_bitmap != nullptr)
- << "An immune space object must have a bitmap";
- if (kIsDebugBuild) {
- DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
- << "Immune space object must be already marked";
- }
- collector_->MarkUnevacFromSpaceRegionOrImmuneSpace(obj, cc_bitmap);
- }
-
- private:
- ConcurrentCopying* const collector_;
-};
-
class EmptyCheckpoint : public Closure {
public:
explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
@@ -347,6 +325,27 @@
ConcurrentCopying* const concurrent_copying_;
};
+// Used to visit objects in the immune spaces.
+inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
+ DCHECK(obj != nullptr);
+ DCHECK(immune_spaces_.ContainsObject(obj));
+ // Update the fields without graying it or pushing it onto the mark stack.
+ Scan(obj);
+}
+
+class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
+ public:
+ explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
+ : collector_(cc) {}
+
+ void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ collector_->ScanImmuneObject(obj);
+ }
+
+ private:
+ ConcurrentCopying* const collector_;
+};
+
// Concurrently mark roots that are guarded by read barriers and process the mark stack.
void ConcurrentCopying::MarkingPhase() {
TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
@@ -354,25 +353,46 @@
LOG(INFO) << "GC MarkingPhase";
}
CHECK(weak_ref_access_enabled_);
- {
- // Mark the image root. The WB-based collectors do not need to
- // scan the image objects from roots by relying on the card table,
- // but it's necessary for the RB to-space invariant to hold.
- TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
- for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
- if (space->IsImageSpace()) {
- gc::space::ImageSpace* image = space->AsImageSpace();
- if (image != nullptr) {
- mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
- mirror::Object* marked_image_root = Mark(image_root);
- CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
- if (ReadBarrier::kEnableToSpaceInvariantChecks) {
- AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
- }
- }
- }
- }
+
+ // Scan immune spaces.
+ // Update all the fields in the immune spaces first without graying the objects so that we
+ // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
+ // of the objects.
+ if (kUseBakerReadBarrier) {
+ gc_grays_immune_objects_ = false;
}
+ for (auto& space : immune_spaces_.GetSpaces()) {
+ DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ ImmuneSpaceScanObjVisitor visitor(this);
+ live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
+ reinterpret_cast<uintptr_t>(space->Limit()),
+ visitor);
+ }
+ if (kUseBakerReadBarrier) {
+ // This release fence makes the field updates in the above loop visible before allowing mutator
+ // getting access to immune objects without graying it first.
+ updated_all_immune_objects_.StoreRelease(true);
+ // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
+ // the above loop because we would incorrectly disable the read barrier by whitening an object
+ // which may point to an unscanned, white object, breaking the to-space invariant.
+ //
+ // Make sure no mutators are in the middle of marking an immune object before whitening immune
+ // objects.
+ IssueEmptyCheckpoint();
+ MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
+ if (kVerboseMode) {
+ LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
+ }
+ for (mirror::Object* obj : immune_gray_stack_) {
+ DCHECK(obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
+ bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
+ ReadBarrier::WhitePtr());
+ DCHECK(success);
+ }
+ immune_gray_stack_.clear();
+ }
+
{
TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
@@ -383,16 +403,6 @@
Runtime::Current()->VisitNonThreadRoots(this);
}
- // Immune spaces.
- for (auto& space : immune_spaces_.GetSpaces()) {
- DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
- accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
- ImmuneSpaceObjVisitor visitor(this);
- live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
- reinterpret_cast<uintptr_t>(space->Limit()),
- visitor);
- }
-
Thread* self = Thread::Current();
{
TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
@@ -1239,6 +1249,9 @@
IssueEmptyCheckpoint();
// Disable the check.
is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
+ if (kUseBakerReadBarrier) {
+ updated_all_immune_objects_.StoreSequentiallyConsistent(false);
+ }
CheckEmptyMarkStack();
}
@@ -1288,13 +1301,9 @@
SwapBitmaps();
heap_->UnBindBitmaps();
- // Remove bitmaps for the immune spaces.
- while (!cc_bitmaps_.empty()) {
- accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
- cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
- delete cc_bitmap;
- cc_bitmaps_.pop_back();
- }
+ // Delete the region bitmap.
+ DCHECK(region_space_bitmap_ != nullptr);
+ delete region_space_bitmap_;
region_space_bitmap_ = nullptr;
}
@@ -1410,15 +1419,6 @@
// In a non-moving space.
if (immune_spaces_.ContainsObject(obj)) {
LOG(INFO) << "holder is in an immune image or the zygote space.";
- accounting::ContinuousSpaceBitmap* cc_bitmap =
- cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
- CHECK(cc_bitmap != nullptr)
- << "An immune space object must have a bitmap.";
- if (cc_bitmap->Test(obj)) {
- LOG(INFO) << "holder is marked in the bit map.";
- } else {
- LOG(INFO) << "holder is NOT marked in the bit map.";
- }
} else {
LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
accounting::ContinuousSpaceBitmap* mark_bitmap =
@@ -1449,17 +1449,17 @@
mirror::Object* ref) {
// In a non-moving spaces. Check that the ref is marked.
if (immune_spaces_.ContainsObject(ref)) {
- accounting::ContinuousSpaceBitmap* cc_bitmap =
- cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
- CHECK(cc_bitmap != nullptr)
- << "An immune space ref must have a bitmap. " << ref;
if (kUseBakerReadBarrier) {
- CHECK(cc_bitmap->Test(ref))
+ // Immune object may not be gray if called from the GC.
+ if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
+ return;
+ }
+ bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
+ CHECK(updated_all_immune_objects || ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
<< "Unmarked immune space ref. obj=" << obj << " rb_ptr="
- << obj->GetReadBarrierPointer() << " ref=" << ref;
- } else {
- CHECK(cc_bitmap->Test(ref))
- << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
+ << (obj != nullptr ? obj->GetReadBarrierPointer() : nullptr)
+ << " ref=" << ref << " ref rb_ptr=" << ref->GetReadBarrierPointer()
+ << " updated_all_immune_objects=" << updated_all_immune_objects;
}
} else {
accounting::ContinuousSpaceBitmap* mark_bitmap =
@@ -1510,7 +1510,7 @@
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
SHARED_REQUIRES(Locks::mutator_lock_) {
- collector_->MarkRoot(root);
+ collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
}
private:
@@ -1520,6 +1520,7 @@
// Scan ref fields of an object.
inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
DCHECK(!region_space_->IsInFromSpace(to_ref));
+ DCHECK_EQ(Thread::Current(), thread_running_gc_);
RefFieldsVisitor visitor(this);
// Disable the read barrier for a performance reason.
to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
@@ -1528,9 +1529,10 @@
// Process a field.
inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
+ DCHECK_EQ(Thread::Current(), thread_running_gc_);
mirror::Object* ref = obj->GetFieldObject<
mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
- mirror::Object* to_ref = Mark(ref);
+ mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false>(ref);
if (to_ref == ref) {
return;
}
@@ -1569,10 +1571,11 @@
}
}
+template<bool kGrayImmuneObject>
inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
DCHECK(!root->IsNull());
mirror::Object* const ref = root->AsMirrorPtr();
- mirror::Object* to_ref = Mark(ref);
+ mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref);
if (to_ref != ref) {
auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
@@ -1593,14 +1596,46 @@
for (size_t i = 0; i < count; ++i) {
mirror::CompressedReference<mirror::Object>* const root = roots[i];
if (!root->IsNull()) {
- MarkRoot(root);
+ // kGrayImmuneObject is true because this is used for the thread flip.
+ MarkRoot</*kGrayImmuneObject*/true>(root);
}
}
}
+// Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
+class ConcurrentCopying::ScopedGcGraysImmuneObjects {
+ public:
+ explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
+ : collector_(collector), enabled_(false) {
+ if (kUseBakerReadBarrier &&
+ collector_->thread_running_gc_ == Thread::Current() &&
+ !collector_->gc_grays_immune_objects_) {
+ collector_->gc_grays_immune_objects_ = true;
+ enabled_ = true;
+ }
+ }
+
+ ~ScopedGcGraysImmuneObjects() {
+ if (kUseBakerReadBarrier &&
+ collector_->thread_running_gc_ == Thread::Current() &&
+ enabled_) {
+ DCHECK(collector_->gc_grays_immune_objects_);
+ collector_->gc_grays_immune_objects_ = false;
+ }
+ }
+
+ private:
+ ConcurrentCopying* const collector_;
+ bool enabled_;
+};
+
// Fill the given memory block with a dummy object. Used to fill in a
// copy of objects that was lost in race.
void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
+ // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
+ // barriers here because we need the updated reference to the int array class, etc. Temporary set
+ // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
+ ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
CHECK_ALIGNED(byte_size, kObjectAlignment);
memset(dummy_obj, 0, byte_size);
mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
@@ -1836,21 +1871,8 @@
} else {
// from_ref is in a non-moving space.
if (immune_spaces_.ContainsObject(from_ref)) {
- accounting::ContinuousSpaceBitmap* cc_bitmap =
- cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
- DCHECK(cc_bitmap != nullptr)
- << "An immune space object must have a bitmap";
- if (kIsDebugBuild) {
- DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
- << "Immune space object must be already marked";
- }
- if (cc_bitmap->Test(from_ref)) {
- // Already marked.
- to_ref = from_ref;
- } else {
- // Newly marked.
- to_ref = nullptr;
- }
+ // An immune object is alive.
+ to_ref = from_ref;
} else {
// Non-immune non-moving space. Use the mark bitmap.
accounting::ContinuousSpaceBitmap* mark_bitmap =
@@ -1889,85 +1911,74 @@
mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
// ref is in a non-moving space (from_ref == to_ref).
DCHECK(!region_space_->HasAddress(ref)) << ref;
- if (immune_spaces_.ContainsObject(ref)) {
- accounting::ContinuousSpaceBitmap* cc_bitmap =
- cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
- DCHECK(cc_bitmap != nullptr)
- << "An immune space object must have a bitmap";
- if (kIsDebugBuild) {
- DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref))
- << "Immune space object must be already marked";
+ DCHECK(!immune_spaces_.ContainsObject(ref));
+ // Use the mark bitmap.
+ accounting::ContinuousSpaceBitmap* mark_bitmap =
+ heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
+ accounting::LargeObjectBitmap* los_bitmap =
+ heap_mark_bitmap_->GetLargeObjectBitmap(ref);
+ CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
+ bool is_los = mark_bitmap == nullptr;
+ if (!is_los && mark_bitmap->Test(ref)) {
+ // Already marked.
+ if (kUseBakerReadBarrier) {
+ DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
+ ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
}
- MarkUnevacFromSpaceRegionOrImmuneSpace(ref, cc_bitmap);
+ } else if (is_los && los_bitmap->Test(ref)) {
+ // Already marked in LOS.
+ if (kUseBakerReadBarrier) {
+ DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
+ ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
+ }
} else {
- // Use the mark bitmap.
- accounting::ContinuousSpaceBitmap* mark_bitmap =
- heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
- accounting::LargeObjectBitmap* los_bitmap =
- heap_mark_bitmap_->GetLargeObjectBitmap(ref);
- CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
- bool is_los = mark_bitmap == nullptr;
- if (!is_los && mark_bitmap->Test(ref)) {
- // Already marked.
- if (kUseBakerReadBarrier) {
- DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
+ // Not marked.
+ if (IsOnAllocStack(ref)) {
+ // If it's on the allocation stack, it's considered marked. Keep it white.
+ // Objects on the allocation stack need not be marked.
+ if (!is_los) {
+ DCHECK(!mark_bitmap->Test(ref));
+ } else {
+ DCHECK(!los_bitmap->Test(ref));
}
- } else if (is_los && los_bitmap->Test(ref)) {
- // Already marked in LOS.
if (kUseBakerReadBarrier) {
- DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
}
} else {
- // Not marked.
- if (IsOnAllocStack(ref)) {
- // If it's on the allocation stack, it's considered marked. Keep it white.
- // Objects on the allocation stack need not be marked.
- if (!is_los) {
- DCHECK(!mark_bitmap->Test(ref));
- } else {
- DCHECK(!los_bitmap->Test(ref));
+ // For the baker-style RB, we need to handle 'false-gray' cases. See the
+ // kRegionTypeUnevacFromSpace-case comment in Mark().
+ if (kUseBakerReadBarrier) {
+ // Test the bitmap first to reduce the chance of false gray cases.
+ if ((!is_los && mark_bitmap->Test(ref)) ||
+ (is_los && los_bitmap->Test(ref))) {
+ return ref;
}
- if (kUseBakerReadBarrier) {
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
+ }
+ // Not marked or on the allocation stack. Try to mark it.
+ // This may or may not succeed, which is ok.
+ bool cas_success = false;
+ if (kUseBakerReadBarrier) {
+ cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
+ ReadBarrier::GrayPtr());
+ }
+ if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
+ // Already marked.
+ if (kUseBakerReadBarrier && cas_success &&
+ ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ PushOntoFalseGrayStack(ref);
+ }
+ } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
+ // Already marked in LOS.
+ if (kUseBakerReadBarrier && cas_success &&
+ ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ PushOntoFalseGrayStack(ref);
}
} else {
- // For the baker-style RB, we need to handle 'false-gray' cases. See the
- // kRegionTypeUnevacFromSpace-case comment in Mark().
+ // Newly marked.
if (kUseBakerReadBarrier) {
- // Test the bitmap first to reduce the chance of false gray cases.
- if ((!is_los && mark_bitmap->Test(ref)) ||
- (is_los && los_bitmap->Test(ref))) {
- return ref;
- }
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
}
- // Not marked or on the allocation stack. Try to mark it.
- // This may or may not succeed, which is ok.
- bool cas_success = false;
- if (kUseBakerReadBarrier) {
- cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
- ReadBarrier::GrayPtr());
- }
- if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
- // Already marked.
- if (kUseBakerReadBarrier && cas_success &&
- ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
- PushOntoFalseGrayStack(ref);
- }
- } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
- // Already marked in LOS.
- if (kUseBakerReadBarrier && cas_success &&
- ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
- PushOntoFalseGrayStack(ref);
- }
- } else {
- // Newly marked.
- if (kUseBakerReadBarrier) {
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
- }
- PushOntoMarkStack(ref);
- }
+ PushOntoMarkStack(ref);
}
}
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index a986a7a..166a1f0 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -61,10 +61,12 @@
ConcurrentCopying(Heap* heap, const std::string& name_prefix = "");
~ConcurrentCopying();
- virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
- void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ virtual void RunPhases() OVERRIDE
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
+ void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void FinishPhase() REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
@@ -92,8 +94,9 @@
DCHECK(ref != nullptr);
return IsMarked(ref) == ref;
}
+ template<bool kGrayImmuneObject = true>
ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
bool IsMarking() const {
return is_marking_;
}
@@ -117,16 +120,19 @@
void Scan(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void Process(mirror::Object* obj, MemberOffset offset)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
+ template<bool kGrayImmuneObject>
void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
accounting::ObjectStack* GetAllocationStack();
accounting::ObjectStack* GetLiveStack();
@@ -146,9 +152,11 @@
SHARED_REQUIRES(Locks::mutator_lock_);
void ProcessReferences(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
@@ -182,14 +190,19 @@
void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* MarkNonMoving(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
- ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegionOrImmuneSpace(mirror::Object* from_ref,
+ ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(mirror::Object* from_ref,
accounting::SpaceBitmap<kObjectAlignment>* bitmap)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ template<bool kGrayImmuneObject>
+ ALWAYS_INLINE mirror::Object* MarkImmuneSpace(mirror::Object* from_ref)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
void PushOntoFalseGrayStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void ProcessFalseGrayStack() SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
+ void ScanImmuneObject(mirror::Object* obj)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
@@ -207,8 +220,6 @@
bool is_active_; // True while the collection is ongoing.
bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant.
ImmuneSpaces immune_spaces_;
- std::unique_ptr<accounting::HeapBitmap> cc_heap_bitmap_;
- std::vector<accounting::SpaceBitmap<kObjectAlignment>*> cc_bitmaps_;
accounting::SpaceBitmap<kObjectAlignment>* region_space_bitmap_;
// A cache of Heap::GetMarkBitmap().
accounting::HeapBitmap* heap_mark_bitmap_;
@@ -242,6 +253,10 @@
accounting::ReadBarrierTable* rb_table_;
bool force_evacuate_all_; // True if all regions are evacuated.
+ Atomic<bool> updated_all_immune_objects_;
+ bool gc_grays_immune_objects_;
+ Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
class AssertToSpaceInvariantFieldVisitor;
class AssertToSpaceInvariantObjectVisitor;
@@ -250,14 +265,15 @@
class ComputeUnevacFromSpaceLiveRatioVisitor;
class DisableMarkingCheckpoint;
class FlipCallback;
- class ImmuneSpaceObjVisitor;
+ class ImmuneSpaceScanObjVisitor;
class LostCopyVisitor;
class RefFieldsVisitor;
class RevokeThreadLocalMarkStackCheckpoint;
+ class ScopedGcGraysImmuneObjects;
+ class ThreadFlipVisitor;
class VerifyNoFromSpaceRefsFieldVisitor;
class VerifyNoFromSpaceRefsObjectVisitor;
class VerifyNoFromSpaceRefsVisitor;
- class ThreadFlipVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
};
diff --git a/test/530-checker-loops1/src/Main.java b/test/530-checker-loops1/src/Main.java
index 948a7b7..dde4d62 100644
--- a/test/530-checker-loops1/src/Main.java
+++ b/test/530-checker-loops1/src/Main.java
@@ -562,7 +562,7 @@
//
/// CHECK-START: void Main.linearTriangularOnTwoArrayLengths(int) BCE (after)
/// CHECK-NOT: BoundsCheck
- // TODO: also CHECK-NOT: Deoptimize, see b/27151190
+ /// CHECK-NOT: Deoptimize
private static void linearTriangularOnTwoArrayLengths(int n) {
int[] a = new int[n];
for (int i = 0; i < a.length; i++) {
@@ -604,7 +604,7 @@
//
/// CHECK-START: void Main.linearTriangularOnParameter(int) BCE (after)
/// CHECK-NOT: BoundsCheck
- // TODO: also CHECK-NOT: Deoptimize, see b/27151190
+ /// CHECK-NOT: Deoptimize
private static void linearTriangularOnParameter(int n) {
int[] a = new int[n];
for (int i = 0; i < n; i++) {
@@ -619,56 +619,56 @@
}
}
- /// CHECK-START: void Main.linearTriangularVariationsInnerStrict(int) BCE (before)
+ /// CHECK-START: void Main.linearTriangularStrictLower(int) BCE (before)
/// CHECK-DAG: BoundsCheck
/// CHECK-DAG: BoundsCheck
/// CHECK-DAG: BoundsCheck
/// CHECK-DAG: BoundsCheck
//
- /// CHECK-START: void Main.linearTriangularVariationsInnerStrict(int) BCE (after)
+ /// CHECK-START: void Main.linearTriangularStrictLower(int) BCE (after)
/// CHECK-NOT: BoundsCheck
- // TODO: also CHECK-NOT: Deoptimize, see b/27151190
- private static void linearTriangularVariationsInnerStrict(int n) {
+ /// CHECK-NOT: Deoptimize
+ private static void linearTriangularStrictLower(int n) {
int[] a = new int[n];
for (int i = 0; i < n; i++) {
for (int j = 0; j < i; j++) {
a[j] += 1;
}
- for (int j = i - 1; j > -1; j--) {
+ for (int j = i - 1; j >= 0; j--) {
a[j] += 1;
}
for (int j = i; j < n; j++) {
a[j] += 1;
}
- for (int j = n - 1; j > i - 1; j--) {
+ for (int j = n - 1; j >= i; j--) {
a[j] += 1;
}
}
verifyTriangular(a);
}
- /// CHECK-START: void Main.linearTriangularVariationsInnerNonStrict(int) BCE (before)
+ /// CHECK-START: void Main.linearTriangularStrictUpper(int) BCE (before)
/// CHECK-DAG: BoundsCheck
/// CHECK-DAG: BoundsCheck
/// CHECK-DAG: BoundsCheck
/// CHECK-DAG: BoundsCheck
//
- /// CHECK-START: void Main.linearTriangularVariationsInnerNonStrict(int) BCE (after)
+ /// CHECK-START: void Main.linearTriangularStrictUpper(int) BCE (after)
/// CHECK-NOT: BoundsCheck
- // TODO: also CHECK-NOT: Deoptimize, see b/27151190
- private static void linearTriangularVariationsInnerNonStrict(int n) {
+ /// CHECK-NOT: Deoptimize
+ private static void linearTriangularStrictUpper(int n) {
int[] a = new int[n];
for (int i = 0; i < n; i++) {
- for (int j = 0; j <= i - 1; j++) {
+ for (int j = 0; j <= i; j++) {
a[j] += 1;
}
- for (int j = i - 1; j >= 0; j--) {
+ for (int j = i; j >= 0; j--) {
a[j] += 1;
}
- for (int j = i; j <= n - 1; j++) {
+ for (int j = i + 1; j < n; j++) {
a[j] += 1;
}
- for (int j = n - 1; j >= i; j--) {
+ for (int j = n - 1; j >= i + 1; j--) {
a[j] += 1;
}
}
@@ -802,8 +802,8 @@
linearTriangularOnTwoArrayLengths(10);
linearTriangularOnOneArrayLength(10);
linearTriangularOnParameter(10);
- linearTriangularVariationsInnerStrict(10);
- linearTriangularVariationsInnerNonStrict(10);
+ linearTriangularStrictLower(10);
+ linearTriangularStrictUpper(10);
{
int[] t = linearTriangularOOB();
for (int i = 0; i < 200; i++) {
diff --git a/test/530-checker-loops2/src/Main.java b/test/530-checker-loops2/src/Main.java
index b12fbd6..7acf008 100644
--- a/test/530-checker-loops2/src/Main.java
+++ b/test/530-checker-loops2/src/Main.java
@@ -31,7 +31,7 @@
//
/// CHECK-START: void Main.bubble(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
- // TODO: also CHECK-NOT: Deoptimize, see b/27151190
+ /// CHECK-NOT: Deoptimize
private static void bubble(int[] a) {
for (int i = a.length; --i >= 0;) {
for (int j = 0; j < i; j++) {
@@ -301,6 +301,53 @@
} while (-1 <= i);
}
+ /// CHECK-START: void Main.justRightTriangular1() BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ //
+ /// CHECK-START: void Main.justRightTriangular1() BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ private static void justRightTriangular1() {
+ int[] a = { 1 } ;
+ for (int i = Integer.MIN_VALUE + 5; i <= Integer.MIN_VALUE + 10; i++) {
+ for (int j = Integer.MIN_VALUE + 4; j < i - 5; j++) {
+ sResult += a[j - (Integer.MIN_VALUE + 4)];
+ }
+ }
+ }
+
+ /// CHECK-START: void Main.justRightTriangular2() BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ //
+ /// CHECK-START: void Main.justRightTriangular2() BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ private static void justRightTriangular2() {
+ int[] a = { 1 } ;
+ for (int i = Integer.MIN_VALUE + 5; i <= 10; i++) {
+ for (int j = 4; j < i - 5; j++) {
+ sResult += a[j - 4];
+ }
+ }
+ }
+
+ /// CHECK-START: void Main.justOOBTriangular() BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ //
+ /// CHECK-START: void Main.justOOBTriangular() BCE (after)
+ /// CHECK-DAG: Deoptimize
+ //
+ /// CHECK-START: void Main.justOOBTriangular() BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static void justOOBTriangular() {
+ int[] a = { 1 } ;
+ for (int i = Integer.MIN_VALUE + 4; i <= 10; i++) {
+ for (int j = 4; j < i - 5; j++) {
+ sResult += a[j - 4];
+ }
+ }
+ }
+
/// CHECK-START: void Main.hiddenOOB1(int) BCE (before)
/// CHECK-DAG: BoundsCheck
//
@@ -315,7 +362,6 @@
// Dangerous loop where careless static range analysis would yield strict upper bound
// on index j of 5. When, for instance, lo and thus i = -2147483648, the upper bound
// becomes really positive due to arithmetic wrap-around, causing OOB.
- // Dynamic BCE is feasible though, since it checks the range.
for (int j = 4; j < i - 5; j++) {
sResult += a[j - 4];
}
@@ -336,13 +382,32 @@
// Dangerous loop where careless static range analysis would yield strict lower bound
// on index j of 5. When, for instance, hi and thus i = 2147483647, the upper bound
// becomes really negative due to arithmetic wrap-around, causing OOB.
- // Dynamic BCE is feasible though, since it checks the range.
for (int j = 6; j > i + 5; j--) {
sResult += a[j - 6];
}
}
}
+ /// CHECK-START: void Main.hiddenOOB3(int) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ //
+ /// CHECK-START: void Main.hiddenOOB3(int) BCE (after)
+ /// CHECK-DAG: Deoptimize
+ //
+ /// CHECK-START: void Main.hiddenOOB3(int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static void hiddenOOB3(int hi) {
+ int[] a = { 11 } ;
+ for (int i = -1; i <= hi; i++) {
+ // Dangerous loop where careless static range analysis would yield strict lower bound
+ // on index j of 0. For large i, the initial value of j becomes really negative due
+ // to arithmetic wrap-around, causing OOB.
+ for (int j = i + 1; j < 1; j++) {
+ sResult += a[j];
+ }
+ }
+ }
+
/// CHECK-START: void Main.hiddenInfiniteOOB() BCE (before)
/// CHECK-DAG: BoundsCheck
//
@@ -376,7 +441,6 @@
for (int i = -1; i <= 0; i++) {
// Dangerous loop similar as above where the loop is now finite, but the
// loop still goes out of bounds for i = -1 due to the large upper bound.
- // Dynamic BCE is feasible though, since it checks the range.
for (int j = -4; j < 2147483646 * i - 3; j++) {
sResult += a[j + 4];
}
@@ -432,6 +496,25 @@
}
}
+ /// CHECK-START: int Main.doNotHoist(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ //
+ /// CHECK-START: int Main.doNotHoist(int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ public static int doNotHoist(int[] a) {
+ int n = a.length;
+ int x = 0;
+ // BCE applies, but hoisting would crash the loop.
+ for (int i = -10000; i < 10000; i++) {
+ for (int j = 0; j <= 1; j++) {
+ if (0 <= i && i < n)
+ x += a[i];
+ }
+ }
+ return x;
+ }
+
+
/// CHECK-START: int[] Main.add() BCE (before)
/// CHECK-DAG: BoundsCheck
//
@@ -687,7 +770,7 @@
/// CHECK-START: int Main.dynamicBCEAndConstantIndices(int[], int[][], int, int) BCE (after)
// Order matters:
/// CHECK: Deoptimize loop:<<Loop:B\d+>>
- // CHECK-NOT: Goto loop:<<Loop>>
+ /// CHECK-NOT: Goto loop:<<Loop>>
/// CHECK-DAG: {{l\d+}} ArrayGet loop:<<Loop>>
/// CHECK-DAG: {{l\d+}} ArrayGet loop:<<Loop>>
/// CHECK-DAG: {{l\d+}} ArrayGet loop:<<Loop>>
@@ -839,6 +922,8 @@
expectEquals(55, justRightDown1());
expectEquals(55, justRightDown2());
expectEquals(55, justRightDown3());
+
+ // Large bounds OOB.
sResult = 0;
try {
justOOBUp();
@@ -890,6 +975,23 @@
}
expectEquals(1055, sResult);
+ // Triangular.
+ sResult = 0;
+ justRightTriangular1();
+ expectEquals(1, sResult);
+ if (HEAVY) {
+ sResult = 0;
+ justRightTriangular2();
+ expectEquals(1, sResult);
+ }
+ sResult = 0;
+ try {
+ justOOBTriangular();
+ } catch (ArrayIndexOutOfBoundsException e) {
+ sResult += 1000;
+ }
+ expectEquals(1001, sResult);
+
// Hidden OOB.
sResult = 0;
try {
@@ -912,6 +1014,15 @@
sResult += 1000;
}
expectEquals(1, sResult);
+ sResult = 0;
+ try {
+ hiddenOOB3(-1); // no OOB
+ } catch (ArrayIndexOutOfBoundsException e) {
+ sResult += 1000;
+ }
+ expectEquals(11, sResult);
+
+ // Expensive hidden OOB test.
if (HEAVY) {
sResult = 0;
try {
@@ -920,7 +1031,16 @@
sResult += 1000;
}
expectEquals(1002, sResult);
+ sResult = 0;
+ try {
+ hiddenOOB3(2147483647); // OOB
+ } catch (ArrayIndexOutOfBoundsException e) {
+ sResult += 1000;
+ }
+ expectEquals(1011, sResult);
}
+
+ // More hidden OOB.
sResult = 0;
try {
hiddenInfiniteOOB();
@@ -966,6 +1086,9 @@
expectEquals(i < 128 ? i : 0, a200[i]);
}
+ // No hoisting after BCE.
+ expectEquals(110, doNotHoist(x));
+
// Addition.
{
int[] e1 ={ 1, 2, 3, 4, 4, 4, 4, 3, 2, 1 };
diff --git a/test/600-verifier-fails/expected.txt b/test/600-verifier-fails/expected.txt
index 8399969..eaa0c93 100644
--- a/test/600-verifier-fails/expected.txt
+++ b/test/600-verifier-fails/expected.txt
@@ -2,3 +2,4 @@
passed B
passed C
passed D
+passed E
diff --git a/test/600-verifier-fails/src/Main.java b/test/600-verifier-fails/src/Main.java
index 64c3d5c..fa25d58 100644
--- a/test/600-verifier-fails/src/Main.java
+++ b/test/600-verifier-fails/src/Main.java
@@ -38,7 +38,6 @@
test("B");
test("C");
test("D");
- // TODO: enable again
- // test("E");
+ test("E");
}
}
diff --git a/tools/ahat/src/AhatSnapshot.java b/tools/ahat/src/AhatSnapshot.java
index d088e8c..e6f8411 100644
--- a/tools/ahat/src/AhatSnapshot.java
+++ b/tools/ahat/src/AhatSnapshot.java
@@ -16,6 +16,7 @@
package com.android.ahat;
+import com.android.tools.perflib.captures.MemoryMappedFileBuffer;
import com.android.tools.perflib.heap.ClassObj;
import com.android.tools.perflib.heap.Heap;
import com.android.tools.perflib.heap.Instance;
@@ -24,9 +25,11 @@
import com.android.tools.perflib.heap.Snapshot;
import com.android.tools.perflib.heap.StackFrame;
import com.android.tools.perflib.heap.StackTrace;
-import com.android.tools.perflib.captures.MemoryMappedFileBuffer;
+
import com.google.common.collect.Lists;
+
import gnu.trove.TObjectProcedure;
+
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
diff --git a/tools/ahat/src/InstanceUtils.java b/tools/ahat/src/InstanceUtils.java
index 8defba2..3cdb40c 100644
--- a/tools/ahat/src/InstanceUtils.java
+++ b/tools/ahat/src/InstanceUtils.java
@@ -19,9 +19,10 @@
import com.android.tools.perflib.heap.ArrayInstance;
import com.android.tools.perflib.heap.ClassInstance;
import com.android.tools.perflib.heap.ClassObj;
-import com.android.tools.perflib.heap.Instance;
import com.android.tools.perflib.heap.Heap;
+import com.android.tools.perflib.heap.Instance;
import com.android.tools.perflib.heap.Type;
+
import java.awt.image.BufferedImage;
/**
@@ -42,11 +43,11 @@
* Returns null if the instance is not a byte array.
*/
private static byte[] asByteArray(Instance inst) {
- if (! (inst instanceof ArrayInstance)) {
+ if (!(inst instanceof ArrayInstance)) {
return null;
}
- ArrayInstance array = (ArrayInstance)inst;
+ ArrayInstance array = (ArrayInstance) inst;
if (array.getArrayType() != Type.BYTE) {
return null;
}
@@ -54,7 +55,7 @@
Object[] objs = array.getValues();
byte[] bytes = new byte[objs.length];
for (int i = 0; i < objs.length; i++) {
- Byte b = (Byte)objs[i];
+ Byte b = (Byte) objs[i];
bytes[i] = b.byteValue();
}
return bytes;
@@ -143,10 +144,10 @@
int[] abgr = new int[height * width];
for (int i = 0; i < abgr.length; i++) {
abgr[i] = (
- (((int)buffer[i * 4 + 3] & 0xFF) << 24) +
- (((int)buffer[i * 4 + 0] & 0xFF) << 16) +
- (((int)buffer[i * 4 + 1] & 0xFF) << 8) +
- ((int)buffer[i * 4 + 2] & 0xFF));
+ (((int) buffer[i * 4 + 3] & 0xFF) << 24)
+ + (((int) buffer[i * 4 + 0] & 0xFF) << 16)
+ + (((int) buffer[i * 4 + 1] & 0xFF) << 8)
+ + ((int) buffer[i * 4 + 2] & 0xFF));
}
BufferedImage bitmap = new BufferedImage(
@@ -185,7 +186,7 @@
if (!(value instanceof Instance)) {
return null;
}
- return (Instance)value;
+ return (Instance) value;
}
/**
@@ -199,7 +200,7 @@
if (!(value instanceof Integer)) {
return def;
}
- return (Integer)value;
+ return (Integer) value;
}
/**
@@ -213,7 +214,7 @@
if (!(value instanceof Long)) {
return def;
}
- return (Long)value;
+ return (Long) value;
}
/**
@@ -226,7 +227,7 @@
if (!(value instanceof Instance)) {
return null;
}
- return asByteArray((Instance)value);
+ return asByteArray((Instance) value);
}
// Return the bitmap instance associated with this object, or null if there
@@ -243,7 +244,7 @@
}
if (inst instanceof ArrayInstance) {
- ArrayInstance array = (ArrayInstance)inst;
+ ArrayInstance array = (ArrayInstance) inst;
if (array.getArrayType() == Type.BYTE && inst.getHardReverseReferences().size() == 1) {
Instance ref = inst.getHardReverseReferences().get(0);
ClassObj clsref = ref.getClassObj();
@@ -323,10 +324,10 @@
// Note: We know inst as an instance of ClassInstance because we already
// read the nativePtr field from it.
Instance registry = null;
- for (ClassInstance.FieldValue field : ((ClassInstance)inst).getValues()) {
+ for (ClassInstance.FieldValue field : ((ClassInstance) inst).getValues()) {
Object fieldValue = field.getValue();
if (fieldValue instanceof Instance) {
- Instance fieldInst = (Instance)fieldValue;
+ Instance fieldInst = (Instance) fieldValue;
if (isInstanceOfClass(fieldInst, "libcore.util.NativeAllocationRegistry")) {
registry = fieldInst;
break;
diff --git a/tools/ahat/src/Site.java b/tools/ahat/src/Site.java
index d504096..dbb84f6 100644
--- a/tools/ahat/src/Site.java
+++ b/tools/ahat/src/Site.java
@@ -20,6 +20,7 @@
import com.android.tools.perflib.heap.Heap;
import com.android.tools.perflib.heap.Instance;
import com.android.tools.perflib.heap.StackFrame;
+
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
diff --git a/tools/ahat/src/Sort.java b/tools/ahat/src/Sort.java
index c5f89c3..8a3d9f2 100644
--- a/tools/ahat/src/Sort.java
+++ b/tools/ahat/src/Sort.java
@@ -16,13 +16,14 @@
package com.android.ahat;
-import com.android.tools.perflib.heap.Instance;
import com.android.tools.perflib.heap.Heap;
+import com.android.tools.perflib.heap.Instance;
+
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
-import java.util.List;
import java.util.Iterator;
+import java.util.List;
/**
* Provides Comparators and helper functions for sorting Instances, Sites, and