Make ART compile with GCC -O0 again.
Tidy up InstructionSetFeatures so that it has a type hierarchy dependent on
architecture.
Add to instruction_set_test to warn when InstructionSetFeatures don't agree
with ones from system properties, AT_HWCAP and /proc/cpuinfo.
Clean-up class linker entry point logic to not return entry points but to
test whether the passed code is the particular entrypoint. This works around
image trampolines that replicate entrypoints.
Bug: 17993736
Change-Id: I5f4b49e88c3b02a79f9bee04f83395146ed7be23
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index 37e3a7a..34585c1 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -68,7 +68,7 @@
InstructionSet instruction_set;
bool target64;
- InstructionSetFeatures GetInstructionSetFeatures() {
+ const InstructionSetFeatures* GetInstructionSetFeatures() {
return compiler_driver->GetInstructionSetFeatures();
}
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index e833c9a..09acf4c 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -969,9 +969,9 @@
size = k32;
}
LIR* load;
- if (UNLIKELY(is_volatile == kVolatile &&
- (size == k64 || size == kDouble) &&
- !cu_->compiler_driver->GetInstructionSetFeatures().HasLpae())) {
+ if (is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+ !cu_->compiler_driver->GetInstructionSetFeatures()->
+ AsArmInstructionSetFeatures()->HasLpae()) {
// Only 64-bit load needs special handling.
// If the cpu supports LPAE, aligned LDRD is atomic - fall through to LoadBaseDisp().
DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadSave().
@@ -1093,9 +1093,9 @@
}
LIR* store;
- if (UNLIKELY(is_volatile == kVolatile &&
- (size == k64 || size == kDouble) &&
- !cu_->compiler_driver->GetInstructionSetFeatures().HasLpae())) {
+ if (is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+ !cu_->compiler_driver->GetInstructionSetFeatures()->
+ AsArmInstructionSetFeatures()->HasLpae()) {
// Only 64-bit store needs special handling.
// If the cpu supports LPAE, aligned STRD is atomic - fall through to StoreBaseDisp().
// Use STREXD for the atomic store. (Expect displacement > 0, don't optimize for == 0.)
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 12ca065..a33d15f 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1606,7 +1606,8 @@
rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
done = true;
} else if (cu_->instruction_set == kThumb2) {
- if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ if (cu_->GetInstructionSetFeatures()->AsArmInstructionSetFeatures()->
+ HasDivideInstruction()) {
// Use ARM SDIV instruction for division. For remainder we also need to
// calculate using a MUL and subtract.
rl_src1 = LoadValue(rl_src1, kCoreReg);
@@ -1875,7 +1876,8 @@
rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div);
done = true;
} else if (cu_->instruction_set == kThumb2) {
- if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ if (cu_->GetInstructionSetFeatures()->AsArmInstructionSetFeatures()->
+ HasDivideInstruction()) {
// Use ARM SDIV instruction for division. For remainder we also need to
// calculate using a MUL and subtract.
rl_src = LoadValue(rl_src, kCoreReg);