Revert "Intrinsify Integer.valueOf."
Heap poisoning missing
jit-gcstress not optimizing it.
bug:30933338
This reverts commit cd0b27287843cfd904dd163056322579ab4bbf27.
Change-Id: I5ece1818afbca5214babb6803f62614a649aedeb
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 18c95b3..edccbd4 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4094,7 +4094,7 @@
}
void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_);
+ IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
if (intrinsic.TryDispatch(invoke)) {
return;
}
@@ -4107,7 +4107,7 @@
// art::PrepareForRegisterAllocation.
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
- IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_);
+ IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
if (intrinsic.TryDispatch(invoke)) {
return;
}
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index a85dd84..17d683f 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -19,7 +19,6 @@
#include "art_method.h"
#include "class_linker.h"
#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
#include "invoke_type.h"
#include "mirror/dex_cache-inl.h"
#include "nodes.h"
@@ -179,110 +178,4 @@
return os;
}
-void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke,
- CodeGenerator* codegen,
- Location return_location,
- Location first_argument_location) {
- if (Runtime::Current()->IsAotCompiler()) {
- if (codegen->GetCompilerOptions().IsBootImage() ||
- codegen->GetCompilerOptions().GetCompilePic()) {
- // TODO(ngeoffray): Support boot image compilation.
- return;
- }
- }
-
- IntegerValueOfInfo info = ComputeIntegerValueOfInfo();
-
- if (info.integer_cache == nullptr ||
- info.integer == nullptr ||
- info.cache == nullptr ||
- info.value_offset == 0 ||
- // low and high cannot be 0, per the spec.
- info.low == 0 ||
- info.high == 0) {
- LOG(ERROR) << "Integer.valueOf will not be optimized";
- return;
- }
-
- // The intrinsic will call if it needs to allocate a j.l.Integer.
- LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetArena()) LocationSummary(
- invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
- if (!invoke->InputAt(0)->IsConstant()) {
- locations->SetInAt(0, Location::RequiresRegister());
- }
- locations->AddTemp(first_argument_location);
- locations->SetOut(return_location);
-}
-
-IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo() {
- // Note that we could cache all of the data looked up here. but there's no good
- // location for it. We don't want to add it to WellKnownClasses, to avoid creating global
- // jni values. Adding it as state to the compiler singleton seems like wrong
- // separation of concerns.
- // The need for this data should be pretty rare though.
-
- // The most common case is that the classes are in the boot image and initialized,
- // which is easy to generate code for. We bail if not.
- Thread* self = Thread::Current();
- ScopedObjectAccess soa(self);
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- gc::Heap* heap = runtime->GetHeap();
- IntegerValueOfInfo info;
- info.integer_cache = class_linker->FindSystemClass(self, "Ljava/lang/Integer$IntegerCache;");
- if (info.integer_cache == nullptr) {
- self->ClearException();
- return info;
- }
- if (!heap->ObjectIsInBootImageSpace(info.integer_cache) || !info.integer_cache->IsInitialized()) {
- // Optimization only works if the class is initialized and in the boot image.
- return info;
- }
- info.integer = class_linker->FindSystemClass(self, "Ljava/lang/Integer;");
- if (info.integer == nullptr) {
- self->ClearException();
- return info;
- }
- if (!heap->ObjectIsInBootImageSpace(info.integer) || !info.integer->IsInitialized()) {
- // Optimization only works if the class is initialized and in the boot image.
- return info;
- }
-
- ArtField* field = info.integer_cache->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
- if (field == nullptr) {
- return info;
- }
- info.cache = static_cast<mirror::ObjectArray<mirror::Object>*>(
- field->GetObject(info.integer_cache).Ptr());
- if (info.cache == nullptr) {
- return info;
- }
-
- if (!heap->ObjectIsInBootImageSpace(info.cache)) {
- // Optimization only works if the object is in the boot image.
- return info;
- }
-
- field = info.integer->FindDeclaredInstanceField("value", "I");
- if (field == nullptr) {
- return info;
- }
- info.value_offset = field->GetOffset().Int32Value();
-
- field = info.integer_cache->FindDeclaredStaticField("low", "I");
- if (field == nullptr) {
- return info;
- }
- info.low = field->GetInt(info.integer_cache);
-
- field = info.integer_cache->FindDeclaredStaticField("high", "I");
- if (field == nullptr) {
- return info;
- }
- info.high = field->GetInt(info.integer_cache);
-
- DCHECK_EQ(info.cache->GetLength(), info.high - info.low + 1);
- return info;
-}
-
} // namespace art
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 9da5a7f..6425e13 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -113,39 +113,6 @@
codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
}
- static void ComputeIntegerValueOfLocations(HInvoke* invoke,
- CodeGenerator* codegen,
- Location return_location,
- Location first_argument_location);
-
- // Temporary data structure for holding Integer.valueOf useful data. We only
- // use it if the mirror::Class* are in the boot image, so it is fine to keep raw
- // mirror::Class pointers in this structure.
- struct IntegerValueOfInfo {
- IntegerValueOfInfo()
- : integer_cache(nullptr),
- integer(nullptr),
- cache(nullptr),
- low(0),
- high(0),
- value_offset(0) {}
-
- // The java.lang.IntegerCache class.
- mirror::Class* integer_cache;
- // The java.lang.Integer class.
- mirror::Class* integer;
- // Value of java.lang.IntegerCache#cache.
- mirror::ObjectArray<mirror::Object>* cache;
- // Value of java.lang.IntegerCache#low.
- int32_t low;
- // Value of java.lang.IntegerCache#high.
- int32_t high;
- // The offset of java.lang.Integer.value.
- int32_t value_offset;
- };
-
- static IntegerValueOfInfo ComputeIntegerValueOfInfo();
-
protected:
IntrinsicVisitor() {}
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 1808a99..c262cf9 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -129,7 +129,6 @@
IntrinsicLocationsBuilderARM::IntrinsicLocationsBuilderARM(CodeGeneratorARM* codegen)
: arena_(codegen->GetGraph()->GetArena()),
- codegen_(codegen),
assembler_(codegen->GetAssembler()),
features_(codegen->GetInstructionSetFeatures()) {}
@@ -2645,74 +2644,6 @@
__ Bind(slow_path->GetExitLabel());
}
-void IntrinsicLocationsBuilderARM::VisitIntegerValueOf(HInvoke* invoke) {
- InvokeRuntimeCallingConvention calling_convention;
- IntrinsicVisitor::ComputeIntegerValueOfLocations(
- invoke,
- codegen_,
- Location::RegisterLocation(R0),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-}
-
-void IntrinsicCodeGeneratorARM::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
- LocationSummary* locations = invoke->GetLocations();
- ArmAssembler* const assembler = GetAssembler();
-
- Register out = locations->Out().AsRegister<Register>();
- InvokeRuntimeCallingConvention calling_convention;
- Register argument = calling_convention.GetRegisterAt(0);
- if (invoke->InputAt(0)->IsConstant()) {
- int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
- // Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
- } else {
- // Allocate and initialize a new j.l.Integer.
- // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
- // JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadLiteral(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- __ LoadImmediate(IP, value);
- __ StoreToOffset(kStoreWord, IP, out, info.value_offset);
- // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
- // one.
- codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
- }
- } else {
- Register in = locations->InAt(0).AsRegister<Register>();
- // Check bounds of our cache.
- __ AddConstant(out, in, -info.low);
- __ CmpConstant(out, info.high - info.low + 1);
- Label allocate, done;
- __ b(&allocate, HS);
- // If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ LoadLiteral(IP, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
- codegen_->LoadFromShiftedRegOffset(Primitive::kPrimNot, locations->Out(), IP, out);
- __ b(&done);
- __ Bind(&allocate);
- // Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadLiteral(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- __ StoreToOffset(kStoreWord, in, out, info.value_offset);
- // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
- // one.
- codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
- __ Bind(&done);
- }
-}
-
UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble)
UNIMPLEMENTED_INTRINSIC(ARM, MathMinFloatFloat)
UNIMPLEMENTED_INTRINSIC(ARM, MathMaxDoubleDouble)
diff --git a/compiler/optimizing/intrinsics_arm.h b/compiler/optimizing/intrinsics_arm.h
index 2840863..7f20ea4 100644
--- a/compiler/optimizing/intrinsics_arm.h
+++ b/compiler/optimizing/intrinsics_arm.h
@@ -51,7 +51,6 @@
private:
ArenaAllocator* arena_;
- CodeGenerator* codegen_;
ArmAssembler* assembler_;
const ArmInstructionSetFeatures& features_;
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index abb5c0a..86e5429 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2924,78 +2924,6 @@
__ Bind(slow_path->GetExitLabel());
}
-void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) {
- InvokeRuntimeCallingConvention calling_convention;
- IntrinsicVisitor::ComputeIntegerValueOfLocations(
- invoke,
- codegen_,
- calling_convention.GetReturnLocation(Primitive::kPrimNot),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
-}
-
-void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
- LocationSummary* locations = invoke->GetLocations();
- MacroAssembler* masm = GetVIXLAssembler();
-
- Register out = RegisterFrom(locations->Out(), Primitive::kPrimNot);
- UseScratchRegisterScope temps(masm);
- Register temp = temps.AcquireW();
- InvokeRuntimeCallingConvention calling_convention;
- Register argument = calling_convention.GetRegisterAt(0);
- if (invoke->InputAt(0)->IsConstant()) {
- int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
- // Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
- } else {
- // Allocate and initialize a new j.l.Integer.
- // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
- // JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- __ Mov(temp.W(), value);
- __ Str(temp.W(), HeapOperand(out.W(), info.value_offset));
- // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
- // one.
- codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
- }
- } else {
- Register in = RegisterFrom(locations->InAt(0), Primitive::kPrimInt);
- // Check bounds of our cache.
- __ Add(out.W(), in.W(), -info.low);
- __ Cmp(out.W(), info.high - info.low + 1);
- vixl::aarch64::Label allocate, done;
- __ B(&allocate, hs);
- // If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ Ldr(temp.W(), codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
- MemOperand source = HeapOperand(
- temp, out.X(), LSL, Primitive::ComponentSizeShift(Primitive::kPrimNot));
- codegen_->Load(Primitive::kPrimNot, out, source);
- __ B(&done);
- __ Bind(&allocate);
- // Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- __ Str(in.W(), HeapOperand(out.W(), info.value_offset));
- // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
- // one.
- codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
- __ Bind(&done);
- }
-}
-
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 3c53517..28e41cb 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -38,8 +38,7 @@
class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
public:
- explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena, CodeGeneratorARM64* codegen)
- : arena_(arena), codegen_(codegen) {}
+ explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena) : arena_(arena) {}
// Define visitor methods.
@@ -57,7 +56,6 @@
private:
ArenaAllocator* arena_;
- CodeGeneratorARM64* codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
};
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index fe05edd..70a3d38 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -203,7 +203,6 @@
IntrinsicLocationsBuilderARMVIXL::IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen)
: arena_(codegen->GetGraph()->GetArena()),
- codegen_(codegen),
assembler_(codegen->GetAssembler()),
features_(codegen->GetInstructionSetFeatures()) {}
@@ -2989,76 +2988,6 @@
__ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
-void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
- InvokeRuntimeCallingConventionARMVIXL calling_convention;
- IntrinsicVisitor::ComputeIntegerValueOfLocations(
- invoke,
- codegen_,
- LocationFrom(r0),
- LocationFrom(calling_convention.GetRegisterAt(0)));
-}
-
-void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
- LocationSummary* locations = invoke->GetLocations();
- ArmVIXLAssembler* const assembler = GetAssembler();
-
- vixl32::Register out = RegisterFrom(locations->Out());
- UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- InvokeRuntimeCallingConventionARMVIXL calling_convention;
- vixl32::Register argument = calling_convention.GetRegisterAt(0);
- if (invoke->InputAt(0)->IsConstant()) {
- int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
- // Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
- } else {
- // Allocate and initialize a new j.l.Integer.
- // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
- // JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- __ Mov(temp, value);
- assembler->StoreToOffset(kStoreWord, temp, out, info.value_offset);
- // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
- // one.
- codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
- }
- } else {
- vixl32::Register in = RegisterFrom(locations->InAt(0));
- // Check bounds of our cache.
- __ Add(out, in, -info.low);
- __ Cmp(out, info.high - info.low + 1);
- vixl32::Label allocate, done;
- __ B(hs, &allocate);
- // If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ Ldr(temp, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
- codegen_->LoadFromShiftedRegOffset(Primitive::kPrimNot, locations->Out(), temp, out);
- __ B(&done);
- __ Bind(&allocate);
- // Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- assembler->StoreToOffset(kStoreWord, in, out, info.value_offset);
- // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
- // one.
- codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
- __ Bind(&done);
- }
-}
-
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index 023cba1..6e79cb7 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -47,7 +47,6 @@
private:
ArenaAllocator* arena_;
- CodeGenerator* codegen_;
ArmVIXLAssembler* assembler_;
const ArmInstructionSetFeatures& features_;
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 536b7f6..64a6840 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2682,8 +2682,6 @@
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(MIPS, IntegerValueOf)
-
UNREACHABLE_INTRINSICS(MIPS)
#undef __
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 1112eed..3888828 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2075,8 +2075,6 @@
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject)
-UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerValueOf)
-
UNREACHABLE_INTRINSICS(MIPS64)
#undef __
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 432984e..e1b7ea5 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3335,64 +3335,6 @@
__ Bind(intrinsic_slow_path->GetExitLabel());
}
-void IntrinsicLocationsBuilderX86::VisitIntegerValueOf(HInvoke* invoke) {
- InvokeRuntimeCallingConvention calling_convention;
- IntrinsicVisitor::ComputeIntegerValueOfLocations(
- invoke,
- codegen_,
- Location::RegisterLocation(EAX),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-}
-
-void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
- LocationSummary* locations = invoke->GetLocations();
- X86Assembler* assembler = GetAssembler();
-
- Register out = locations->Out().AsRegister<Register>();
- InvokeRuntimeCallingConvention calling_convention;
- if (invoke->InputAt(0)->IsConstant()) {
- int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
- // Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ movl(out, Immediate(address));
- } else {
- // Allocate and initialize a new j.l.Integer.
- // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
- // JIT object table.
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- __ movl(Address(out, info.value_offset), Immediate(value));
- }
- } else {
- Register in = locations->InAt(0).AsRegister<Register>();
- // Check bounds of our cache.
- __ leal(out, Address(in, -info.low));
- __ cmpl(out, Immediate(info.high - info.low + 1));
- NearLabel allocate, done;
- __ j(kAboveEqual, &allocate);
- // If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ movl(out, Address(out, TIMES_4, data_offset + address));
- __ jmp(&done);
- __ Bind(&allocate);
- // Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- __ movl(Address(out, info.value_offset), in);
- __ Bind(&done);
- }
-}
-
UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 57992a9..05d270a 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2995,64 +2995,6 @@
__ Bind(slow_path->GetExitLabel());
}
-void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
- InvokeRuntimeCallingConvention calling_convention;
- IntrinsicVisitor::ComputeIntegerValueOfLocations(
- invoke,
- codegen_,
- Location::RegisterLocation(RAX),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
- LocationSummary* locations = invoke->GetLocations();
- X86_64Assembler* assembler = GetAssembler();
-
- CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- InvokeRuntimeCallingConvention calling_convention;
- if (invoke->InputAt(0)->IsConstant()) {
- int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
- // Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ movl(out, Immediate(address));
- } else {
- // Allocate and initialize a new j.l.Integer.
- // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
- // JIT object table.
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- __ movl(Address(out, info.value_offset), Immediate(value));
- }
- } else {
- CpuRegister in = locations->InAt(0).AsRegister<CpuRegister>();
- // Check bounds of our cache.
- __ leal(out, Address(in, -info.low));
- __ cmpl(out, Immediate(info.high - info.low + 1));
- NearLabel allocate, done;
- __ j(kAboveEqual, &allocate);
- // If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ movl(out, Address(out, TIMES_4, data_offset + address));
- __ jmp(&done);
- __ Bind(&allocate);
- // Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- __ movl(Address(out, info.value_offset), in);
- __ Bind(&done);
- }
-}
-
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index c39aed2..8a9e618 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1914,9 +1914,6 @@
virtual bool IsControlFlow() const { return false; }
- // Can the instruction throw?
- // TODO: We should rename to CanVisiblyThrow, as some instructions (like HNewInstance),
- // could throw OOME, but it is still OK to remove them if they are unused.
virtual bool CanThrow() const { return false; }
bool CanThrowIntoCatchBlock() const { return CanThrow() && block_->IsTryBlock(); }