ARM: VIXL32: Fix breaking changes from recent VIXL update.
Test: m test-art-host
Test: m test-art-target
Change-Id: I02a608bf51b889a2bfff43272a3619582bf9cf20
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 59e1784..a78b3da 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -572,8 +572,10 @@
// We are about to use the assembler to place literals directly. Make sure we have enough
// underlying code buffer and we have generated the jump table with right size.
- CodeBufferCheckScope scope(codegen->GetVIXLAssembler(), num_entries * sizeof(int32_t),
- CodeBufferCheckScope::kCheck, CodeBufferCheckScope::kExactSize);
+ vixl::CodeBufferCheckScope scope(codegen->GetVIXLAssembler(),
+ num_entries * sizeof(int32_t),
+ vixl::CodeBufferCheckScope::kReserveBufferSpace,
+ vixl::CodeBufferCheckScope::kExactSize);
__ Bind(&table_start_);
const ArenaVector<HBasicBlock*>& successors = switch_instr_->GetBlock()->GetSuccessors();
@@ -2260,10 +2262,10 @@
masm->GetCursorAddress<vixl::aarch64::Instruction*>() - kInstructionSize;
if (prev->IsLoadOrStore()) {
// Make sure we emit only exactly one nop.
- vixl::aarch64::CodeBufferCheckScope scope(masm,
- kInstructionSize,
- vixl::aarch64::CodeBufferCheckScope::kCheck,
- vixl::aarch64::CodeBufferCheckScope::kExactSize);
+ vixl::CodeBufferCheckScope scope(masm,
+ kInstructionSize,
+ vixl::CodeBufferCheckScope::kReserveBufferSpace,
+ vixl::CodeBufferCheckScope::kExactSize);
__ nop();
}
}
@@ -4036,7 +4038,8 @@
vixl::aarch64::Label* label = &relative_call_patches_.back().label;
SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(label);
- __ bl(0); // Branch and link to itself. This will be overriden at link time.
+ // Branch and link to itself. This will be overriden at link time.
+ __ bl(static_cast<int64_t>(0));
break;
}
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
@@ -4167,7 +4170,7 @@
DCHECK(reg.IsX());
SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(fixup_label);
- __ adrp(reg, /* offset placeholder */ 0);
+ __ adrp(reg, /* offset placeholder */ static_cast<int64_t>(0));
}
void CodeGeneratorARM64::EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 7aea616..e399f32 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -47,6 +47,7 @@
using helpers::InputRegisterAt;
using helpers::InputSRegisterAt;
using helpers::InputVRegisterAt;
+using helpers::Int32ConstantFrom;
using helpers::LocationFrom;
using helpers::LowRegisterFrom;
using helpers::LowSRegisterFrom;
@@ -132,7 +133,7 @@
vixl32::Register base = sp;
if (stack_offset != 0) {
base = temps.Acquire();
- __ Add(base, sp, stack_offset);
+ __ Add(base, sp, Operand::From(stack_offset));
}
__ Vstm(F64, base, NO_WRITE_BACK, DRegisterList(d_reg, number_of_d_regs));
}
@@ -180,7 +181,7 @@
vixl32::Register base = sp;
if (stack_offset != 0) {
base = temps.Acquire();
- __ Add(base, sp, stack_offset);
+ __ Add(base, sp, Operand::From(stack_offset));
}
__ Vldm(F64, base, NO_WRITE_BACK, DRegisterList(d_reg, number_of_d_regs));
}
@@ -673,8 +674,8 @@
DCHECK_GE(num_entries, kPackedSwitchCompareJumpThreshold);
// We are about to use the assembler to place literals directly. Make sure we have enough
- // underlying code buffer and we have generated the jump table with right size.
- codegen->GetVIXLAssembler()->GetBuffer().Align();
+ // underlying code buffer and we have generated a jump table of the right size, using
+ // codegen->GetVIXLAssembler()->GetBuffer().Align();
AssemblerAccurateScope aas(codegen->GetVIXLAssembler(),
num_entries * sizeof(int32_t),
CodeBufferCheckScope::kMaximumSize);
@@ -701,7 +702,7 @@
DCHECK_GT(jump_offset, std::numeric_limits<int32_t>::min());
DCHECK_LE(jump_offset, std::numeric_limits<int32_t>::max());
- bb_addresses_[i].get()->UpdateValue(jump_offset, &codegen->GetVIXLAssembler()->GetBuffer());
+ bb_addresses_[i].get()->UpdateValue(jump_offset, codegen->GetVIXLAssembler()->GetBuffer());
}
}
@@ -1667,7 +1668,20 @@
// Set the hidden (in r12) argument. It is done here, right before a BLX to prevent other
// instruction from clobbering it as they might use r12 as a scratch register.
DCHECK(hidden_reg.Is(r12));
- __ Mov(hidden_reg, invoke->GetDexMethodIndex());
+
+ {
+ // The VIXL macro assembler may clobber any of the scratch registers that are available to it,
+ // so it checks if the application is using them (by passing them to the macro assembler
+ // methods). The following application of UseScratchRegisterScope corrects VIXL's notion of
+ // what is available, and is the opposite of the standard usage: Instead of requesting a
+ // temporary location, it imposes an external constraint (i.e. a specific register is reserved
+ // for the hidden argument). Note that this works even if VIXL needs a scratch register itself
+ // (to materialize the constant), since the destination register becomes available for such use
+ // internally for the duration of the macro instruction.
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ temps.Exclude(hidden_reg);
+ __ Mov(hidden_reg, invoke->GetDexMethodIndex());
+ }
{
AssemblerAccurateScope aas(GetVIXLAssembler(),
@@ -2458,13 +2472,13 @@
vixl32::Register dividend = InputRegisterAt(instruction, 0);
vixl32::Register temp1 = RegisterFrom(locations->GetTemp(0));
vixl32::Register temp2 = RegisterFrom(locations->GetTemp(1));
- int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ int32_t imm = Int32ConstantFrom(second);
int64_t magic;
int shift;
CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
- __ Mov(temp1, magic);
+ __ Mov(temp1, Operand::From(magic));
__ Smull(temp2, temp1, dividend, temp1);
if (imm > 0 && magic < 0) {
@@ -2857,9 +2871,9 @@
}
// Rotate, or mov to out for zero or word size rotations.
if (rot != 0u) {
- __ Lsr(out_reg_hi, in_reg_hi, rot);
+ __ Lsr(out_reg_hi, in_reg_hi, Operand::From(rot));
__ Orr(out_reg_hi, out_reg_hi, Operand(in_reg_lo, ShiftType::LSL, kArmBitsPerWord - rot));
- __ Lsr(out_reg_lo, in_reg_lo, rot);
+ __ Lsr(out_reg_lo, in_reg_lo, Operand::From(rot));
__ Orr(out_reg_lo, out_reg_lo, Operand(in_reg_hi, ShiftType::LSL, kArmBitsPerWord - rot));
} else {
__ Mov(out_reg_lo, in_reg_lo);
@@ -2874,7 +2888,7 @@
__ And(shift_right, RegisterFrom(rhs), 0x1F);
__ Lsrs(shift_left, RegisterFrom(rhs), 6);
// TODO(VIXL): Check that flags are kept after "vixl32::LeaveFlags" enabled.
- __ Rsb(shift_left, shift_right, kArmBitsPerWord);
+ __ Rsb(shift_left, shift_right, Operand::From(kArmBitsPerWord));
__ B(cc, &shift_by_32_plus_shift_right);
// out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
@@ -3040,11 +3054,11 @@
// Shift the high part
__ Lsl(o_h, high, o_l);
// Shift the low part and `or` what overflew on the high part
- __ Rsb(temp, o_l, kArmBitsPerWord);
+ __ Rsb(temp, o_l, Operand::From(kArmBitsPerWord));
__ Lsr(temp, low, temp);
__ Orr(o_h, o_h, temp);
// If the shift is > 32 bits, override the high part
- __ Subs(temp, o_l, kArmBitsPerWord);
+ __ Subs(temp, o_l, Operand::From(kArmBitsPerWord));
{
AssemblerAccurateScope guard(GetVIXLAssembler(),
3 * kArmInstrMaxSizeInBytes,
@@ -3059,11 +3073,11 @@
// Shift the low part
__ Lsr(o_l, low, o_h);
// Shift the high part and `or` what underflew on the low part
- __ Rsb(temp, o_h, kArmBitsPerWord);
+ __ Rsb(temp, o_h, Operand::From(kArmBitsPerWord));
__ Lsl(temp, high, temp);
__ Orr(o_l, o_l, temp);
// If the shift is > 32 bits, override the low part
- __ Subs(temp, o_h, kArmBitsPerWord);
+ __ Subs(temp, o_h, Operand::From(kArmBitsPerWord));
{
AssemblerAccurateScope guard(GetVIXLAssembler(),
3 * kArmInstrMaxSizeInBytes,
@@ -3077,10 +3091,10 @@
__ And(o_h, second_reg, kMaxLongShiftDistance);
// same as Shr except we use `Lsr`s and not `Asr`s
__ Lsr(o_l, low, o_h);
- __ Rsb(temp, o_h, kArmBitsPerWord);
+ __ Rsb(temp, o_h, Operand::From(kArmBitsPerWord));
__ Lsl(temp, high, temp);
__ Orr(o_l, o_l, temp);
- __ Subs(temp, o_h, kArmBitsPerWord);
+ __ Subs(temp, o_h, Operand::From(kArmBitsPerWord));
{
AssemblerAccurateScope guard(GetVIXLAssembler(),
3 * kArmInstrMaxSizeInBytes,
@@ -3424,7 +3438,7 @@
__ Add(temp, addr, offset);
addr = temp;
}
- __ Ldrexd(out_lo, out_hi, addr);
+ __ Ldrexd(out_lo, out_hi, MemOperand(addr));
}
void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicStore(vixl32::Register addr,
@@ -3444,9 +3458,9 @@
__ Bind(&fail);
// We need a load followed by store. (The address used in a STREX instruction must
// be the same as the address in the most recently executed LDREX instruction.)
- __ Ldrexd(temp1, temp2, addr);
+ __ Ldrexd(temp1, temp2, MemOperand(addr));
codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ Strexd(temp1, value_lo, value_hi, addr);
+ __ Strexd(temp1, value_lo, value_hi, MemOperand(addr));
__ CompareAndBranchIfNonZero(temp1, &fail);
}
@@ -4648,7 +4662,7 @@
}
GetAssembler()->LoadFromOffset(
kLoadWord, card, tr, Thread::CardTableOffset<kArmPointerSize>().Int32Value());
- __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
+ __ Lsr(temp, object, Operand::From(gc::accounting::CardTable::kCardShift));
__ Strb(card, MemOperand(card, temp));
if (can_be_null) {
__ Bind(&is_null);
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
index 5129daf..d3623f1 100644
--- a/compiler/optimizing/common_arm.h
+++ b/compiler/optimizing/common_arm.h
@@ -139,9 +139,14 @@
HConstant* instr = location.GetConstant();
if (instr->IsIntConstant()) {
return instr->AsIntConstant()->GetValue();
- } else {
- DCHECK(instr->IsNullConstant()) << instr->DebugName();
+ } else if (instr->IsNullConstant()) {
return 0;
+ } else {
+ DCHECK(instr->IsLongConstant()) << instr->DebugName();
+ const int64_t ret = instr->AsLongConstant()->GetValue();
+ DCHECK_GE(ret, std::numeric_limits<int32_t>::min());
+ DCHECK_LE(ret, std::numeric_limits<int32_t>::max());
+ return ret;
}
}
@@ -161,7 +166,7 @@
if (location.IsRegister()) {
return vixl::aarch32::Operand(RegisterFrom(location, type));
} else {
- return vixl::aarch32::Operand(Int64ConstantFrom(location));
+ return vixl::aarch32::Operand(Int32ConstantFrom(location));
}
}
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 7a1ec9f..c8e3534 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -518,7 +518,7 @@
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
ArmVIXLAssembler* assembler = GetAssembler();
// Ignore upper 4B of long address.
- __ Ldrsb(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+ __ Ldrsb(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -528,7 +528,7 @@
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
ArmVIXLAssembler* assembler = GetAssembler();
// Ignore upper 4B of long address.
- __ Ldr(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+ __ Ldr(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -545,9 +545,9 @@
vixl32::Register hi = HighRegisterFrom(invoke->GetLocations()->Out());
if (addr.Is(lo)) {
__ Ldr(hi, MemOperand(addr, 4));
- __ Ldr(lo, addr);
+ __ Ldr(lo, MemOperand(addr));
} else {
- __ Ldr(lo, addr);
+ __ Ldr(lo, MemOperand(addr));
__ Ldr(hi, MemOperand(addr, 4));
}
}
@@ -559,7 +559,7 @@
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
ArmVIXLAssembler* assembler = GetAssembler();
// Ignore upper 4B of long address.
- __ Ldrsh(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+ __ Ldrsh(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
}
static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
@@ -576,7 +576,7 @@
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
ArmVIXLAssembler* assembler = GetAssembler();
- __ Strb(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+ __ Strb(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -585,7 +585,7 @@
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
ArmVIXLAssembler* assembler = GetAssembler();
- __ Str(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+ __ Str(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -598,7 +598,7 @@
vixl32::Register addr = LowRegisterFrom(invoke->GetLocations()->InAt(0));
// Worst case: Control register bit SCTLR.A = 0. Then unaligned accesses throw a processor
// exception. So we can't use ldrd as addr may be unaligned.
- __ Str(LowRegisterFrom(invoke->GetLocations()->InAt(1)), addr);
+ __ Str(LowRegisterFrom(invoke->GetLocations()->InAt(1)), MemOperand(addr));
__ Str(HighRegisterFrom(invoke->GetLocations()->InAt(1)), MemOperand(addr, 4));
}
@@ -608,7 +608,7 @@
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
ArmVIXLAssembler* assembler = GetAssembler();
- __ Strh(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+ __ Strh(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
}
void IntrinsicLocationsBuilderARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) {
@@ -842,8 +842,8 @@
__ Add(temp_reg, base, offset);
vixl32::Label loop_head;
__ Bind(&loop_head);
- __ Ldrexd(temp_lo, temp_hi, temp_reg);
- __ Strexd(temp_lo, value_lo, value_hi, temp_reg);
+ __ Ldrexd(temp_lo, temp_hi, MemOperand(temp_reg));
+ __ Strexd(temp_lo, value_lo, value_hi, MemOperand(temp_reg));
__ Cmp(temp_lo, 0);
__ B(ne, &loop_head);
} else {
@@ -1042,7 +1042,7 @@
vixl32::Label loop_head;
__ Bind(&loop_head);
- __ Ldrex(tmp, tmp_ptr);
+ __ Ldrex(tmp, MemOperand(tmp_ptr));
__ Subs(tmp, tmp, expected);
@@ -1052,7 +1052,7 @@
CodeBufferCheckScope::kMaximumSize);
__ itt(eq);
- __ strex(eq, tmp, value, tmp_ptr);
+ __ strex(eq, tmp, value, MemOperand(tmp_ptr));
__ cmp(eq, tmp, 1);
}
@@ -1220,7 +1220,7 @@
static_assert(IsAligned<8>(kObjectAlignment),
"String data must be 8-byte aligned for unrolled CompareTo loop.");
- const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ const unsigned char_size = Primitive::ComponentSize(Primitive::kPrimChar);
DCHECK_EQ(char_size, 2u);
UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
@@ -1469,7 +1469,7 @@
__ Bind(&loop);
__ Ldr(out, MemOperand(str, temp1));
__ Ldr(temp2, MemOperand(arg, temp1));
- __ Add(temp1, temp1, sizeof(uint32_t));
+ __ Add(temp1, temp1, Operand::From(sizeof(uint32_t)));
__ Cmp(out, temp2);
__ B(ne, &return_false);
// With string compression, we have compared 4 bytes, otherwise 2 chars.