Revert "X86: Use the constant area for more operations."
Hits a DCHECK:
dex2oatd F 19461 20411 art/compiler/optimizing/pc_relative_fixups_x86.cc:196] Check failed: !invoke_static_or_direct->HasCurrentMethodInput()
This reverts commit dc00454f0b9a134f01f79b419200f4044c2af5c6.
Change-Id: Idfcacf12eb9e1dd7e68d95e880fda0f76f90e9ed
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index aad9378..3f5688b 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -37,12 +37,10 @@
static constexpr int kDoubleNaNHigh = 0x7FF80000;
static constexpr int kDoubleNaNLow = 0x00000000;
-static constexpr int64_t kDoubleNaN = INT64_C(0x7FF8000000000000);
-static constexpr int32_t kFloatNaN = INT32_C(0x7FC00000);
+static constexpr int kFloatNaN = 0x7FC00000;
IntrinsicLocationsBuilderX86::IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen)
- : arena_(codegen->GetGraph()->GetArena()),
- codegen_(codegen) {
+ : arena_(codegen->GetGraph()->GetArena()), codegen_(codegen) {
}
@@ -258,36 +256,15 @@
LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
+ // TODO: Allow x86 to work with memory. This requires assembler support, see below.
+ // locations->SetInAt(0, Location::Any()); // X86 can work on memory directly.
locations->SetOut(Location::SameAsFirstInput());
- HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
- DCHECK(static_or_direct != nullptr);
- if (invoke->InputAt(static_or_direct->GetSpecialInputIndex())->IsX86ComputeBaseMethodAddress()) {
- // We need addressibility for the constant area.
- locations->SetInAt(1, Location::RequiresRegister());
- // We need a temporary to hold the constant.
- locations->AddTemp(Location::RequiresFpuRegister());
- }
}
-static void MathAbsFP(LocationSummary* locations,
- bool is64bit,
- X86Assembler* assembler,
- CodeGeneratorX86* codegen) {
+static void MathAbsFP(LocationSummary* locations, bool is64bit, X86Assembler* assembler) {
Location output = locations->Out();
- DCHECK(output.IsFpuRegister());
- if (locations->InAt(1).IsRegister()) {
- // We also have a constant area pointer.
- Register constant_area = locations->InAt(1).AsRegister<Register>();
- XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- if (is64bit) {
- __ movsd(temp, codegen->LiteralInt64Address(INT64_C(0x7FFFFFFFFFFFFFFF), constant_area));
- __ andpd(output.AsFpuRegister<XmmRegister>(), temp);
- } else {
- __ movss(temp, codegen->LiteralInt32Address(INT32_C(0x7FFFFFFF), constant_area));
- __ andps(output.AsFpuRegister<XmmRegister>(), temp);
- }
- } else {
+ if (output.IsFpuRegister()) {
// Create the right constant on an aligned stack.
if (is64bit) {
__ subl(ESP, Immediate(8));
@@ -300,6 +277,19 @@
__ andps(output.AsFpuRegister<XmmRegister>(), Address(ESP, 0));
}
__ addl(ESP, Immediate(16));
+ } else {
+ // TODO: update when assember support is available.
+ UNIMPLEMENTED(FATAL) << "Needs assembler support.";
+// Once assembler support is available, in-memory operations look like this:
+// if (is64bit) {
+// DCHECK(output.IsDoubleStackSlot());
+// __ andl(Address(Register(RSP), output.GetHighStackIndex(kX86WordSize)),
+// Immediate(0x7FFFFFFF));
+// } else {
+// DCHECK(output.IsStackSlot());
+// // Can use and with a literal directly.
+// __ andl(Address(Register(RSP), output.GetStackIndex()), Immediate(0x7FFFFFFF));
+// }
}
}
@@ -308,7 +298,7 @@
}
void IntrinsicCodeGeneratorX86::VisitMathAbsDouble(HInvoke* invoke) {
- MathAbsFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler(), codegen_);
+ MathAbsFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
}
void IntrinsicLocationsBuilderX86::VisitMathAbsFloat(HInvoke* invoke) {
@@ -316,7 +306,7 @@
}
void IntrinsicCodeGeneratorX86::VisitMathAbsFloat(HInvoke* invoke) {
- MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler(), codegen_);
+ MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
static void CreateAbsIntLocation(ArenaAllocator* arena, HInvoke* invoke) {
@@ -398,11 +388,8 @@
GenAbsLong(invoke->GetLocations(), GetAssembler());
}
-static void GenMinMaxFP(LocationSummary* locations,
- bool is_min,
- bool is_double,
- X86Assembler* assembler,
- CodeGeneratorX86* codegen) {
+static void GenMinMaxFP(LocationSummary* locations, bool is_min, bool is_double,
+ X86Assembler* assembler) {
Location op1_loc = locations->InAt(0);
Location op2_loc = locations->InAt(1);
Location out_loc = locations->Out();
@@ -463,25 +450,15 @@
// NaN handling.
__ Bind(&nan);
- // Do we have a constant area pointer?
- if (locations->InAt(2).IsRegister()) {
- Register constant_area = locations->InAt(2).AsRegister<Register>();
- if (is_double) {
- __ movsd(out, codegen->LiteralInt64Address(kDoubleNaN, constant_area));
- } else {
- __ movss(out, codegen->LiteralInt32Address(kFloatNaN, constant_area));
- }
+ if (is_double) {
+ __ pushl(Immediate(kDoubleNaNHigh));
+ __ pushl(Immediate(kDoubleNaNLow));
+ __ movsd(out, Address(ESP, 0));
+ __ addl(ESP, Immediate(8));
} else {
- if (is_double) {
- __ pushl(Immediate(kDoubleNaNHigh));
- __ pushl(Immediate(kDoubleNaNLow));
- __ movsd(out, Address(ESP, 0));
- __ addl(ESP, Immediate(8));
- } else {
- __ pushl(Immediate(kFloatNaN));
- __ movss(out, Address(ESP, 0));
- __ addl(ESP, Immediate(4));
- }
+ __ pushl(Immediate(kFloatNaN));
+ __ movss(out, Address(ESP, 0));
+ __ addl(ESP, Immediate(4));
}
__ jmp(&done);
@@ -506,11 +483,6 @@
// The following is sub-optimal, but all we can do for now. It would be fine to also accept
// the second input to be the output (we can simply swap inputs).
locations->SetOut(Location::SameAsFirstInput());
- HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
- DCHECK(static_or_direct != nullptr);
- if (invoke->InputAt(static_or_direct->GetSpecialInputIndex())->IsX86ComputeBaseMethodAddress()) {
- locations->SetInAt(2, Location::RequiresRegister());
- }
}
void IntrinsicLocationsBuilderX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -518,11 +490,7 @@
}
void IntrinsicCodeGeneratorX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
- GenMinMaxFP(invoke->GetLocations(),
- /* is_min */ true,
- /* is_double */ true,
- GetAssembler(),
- codegen_);
+ GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, /* is_double */ true, GetAssembler());
}
void IntrinsicLocationsBuilderX86::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -530,11 +498,7 @@
}
void IntrinsicCodeGeneratorX86::VisitMathMinFloatFloat(HInvoke* invoke) {
- GenMinMaxFP(invoke->GetLocations(),
- /* is_min */ true,
- /* is_double */ false,
- GetAssembler(),
- codegen_);
+ GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, /* is_double */ false, GetAssembler());
}
void IntrinsicLocationsBuilderX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -542,11 +506,7 @@
}
void IntrinsicCodeGeneratorX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- GenMinMaxFP(invoke->GetLocations(),
- /* is_min */ false,
- /* is_double */ true,
- GetAssembler(),
- codegen_);
+ GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, /* is_double */ true, GetAssembler());
}
void IntrinsicLocationsBuilderX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -554,11 +514,7 @@
}
void IntrinsicCodeGeneratorX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
- GenMinMaxFP(invoke->GetLocations(),
- /* is_min */ false,
- /* is_double */ false,
- GetAssembler(),
- codegen_);
+ GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, /* is_double */ false, GetAssembler());
}
static void GenMinMax(LocationSummary* locations, bool is_min, bool is_long,