Revert "ART: Split out more cases of Load/StoreRef, volatile as parameter"
This reverts commit 2689fbad6b5ec1ae8f8c8791a80c6fd3cf24144d.
Breaks the build.
Change-Id: I9faad4e9a83b32f5f38b2ef95d6f9a33345efa33
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 425caec..28195ab 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -295,8 +295,7 @@
setup_method_address_[0] = NewLIR1(kX86StartOfMethod, rs_rX86_ARG0.GetReg());
int displacement = SRegOffset(base_of_code_->s_reg_low);
// Native pointer - must be natural word size.
- setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, rs_rX86_ARG0,
- Gen64Bit() ? k64 : k32, kNotVolatile);
+ setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, rs_rX86_ARG0, Gen64Bit() ? k64 : k32);
}
FreeTemp(rs_rX86_ARG0);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 70382c7..d482e58 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -68,16 +68,20 @@
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
+ LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
+ OpSize size) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) OVERRIDE;
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
+ OpSize size) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index f854adb..5082d60 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -144,7 +144,7 @@
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(TargetReg(kSp), src_v_reg_offset, rl_src.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), src_v_reg_offset, rl_src.reg, k64);
}
}
@@ -178,7 +178,7 @@
*/
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
- LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64);
StoreFinalValueWide(rl_dest, rl_result);
} else {
@@ -363,8 +363,7 @@
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(TargetReg(kSp), src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32,
- kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32);
}
}
@@ -374,8 +373,7 @@
FlushSpecificReg(reg_info);
ResetDef(rl_src2.reg);
} else {
- StoreBaseDisp(TargetReg(kSp), src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32,
- kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32);
}
}
@@ -435,7 +433,7 @@
if (rl_result.location == kLocPhysReg) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
- LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64);
StoreFinalValueWide(rl_dest, rl_result);
} else {
Load32Disp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 350cfb8..2f914c1 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -754,7 +754,7 @@
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Unaligned access is allowed on x86.
- LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size);
if (size == k64) {
StoreValueWide(rl_dest, rl_result);
} else {
@@ -772,12 +772,12 @@
if (size == k64) {
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
}
return true;
}
@@ -1138,7 +1138,7 @@
NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg());
break;
case 1:
- LoadBaseDisp(rs_rX86_SP, displacement, dest, k32, kNotVolatile);
+ LoadBaseDisp(rs_rX86_SP, displacement, dest, k32);
break;
default:
m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(),
@@ -1294,8 +1294,7 @@
if (src1_in_reg) {
NewLIR2(kX86Mov32RR, rs_r1.GetReg(), rl_src1.reg.GetHighReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32,
- kNotVolatile);
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32);
}
if (is_square) {
@@ -1318,8 +1317,7 @@
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetHighReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32,
- kNotVolatile);
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32);
}
// EAX <- EAX * 1L (2H * 1L)
@@ -1352,8 +1350,7 @@
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetLowReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32,
- kNotVolatile);
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32);
}
// EDX:EAX <- 2L * 1L (double precision)
@@ -2292,21 +2289,21 @@
if (rl_method.location == kLocPhysReg) {
if (use_declaring_class) {
LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- check_class, kNotVolatile);
+ check_class);
} else {
LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class, kNotVolatile);
- LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
+ check_class);
+ LoadRefDisp(check_class, offset_of_type, check_class);
}
} else {
LoadCurrMethodDirect(check_class);
if (use_declaring_class) {
LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- check_class, kNotVolatile);
+ check_class);
} else {
LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class, kNotVolatile);
- LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
+ check_class);
+ LoadRefDisp(check_class, offset_of_type, check_class);
}
}
@@ -2353,16 +2350,16 @@
} else if (use_declaring_class) {
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- class_reg, kNotVolatile);
+ class_reg);
} else {
// Load dex cache entry into class_reg (kArg2).
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg, kNotVolatile);
+ class_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() +
(sizeof(mirror::HeapReference<mirror::Class*>) * type_idx);
- LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
+ LoadRefDisp(class_reg, offset_of_type, class_reg);
if (!can_assume_type_is_in_dex_cache) {
// Need to test presence of type in dex cache at runtime.
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
@@ -2395,8 +2392,7 @@
/* Load object->klass_. */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1),
- kNotVolatile);
+ LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class. */
LIR* branchover = nullptr;
if (type_known_final) {
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index e369d26..078dd5a 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1866,7 +1866,7 @@
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile);
+ StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0));
}
if (cu_->num_ins == 0) {
@@ -1916,11 +1916,11 @@
}
if (need_flush) {
if (t_loc->wide && t_loc->fp) {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64);
// Increment i to skip the next one
i++;
} else if (t_loc->wide && !t_loc->fp) {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, k64);
// Increment i to skip the next one
i++;
} else {
@@ -2018,14 +2018,14 @@
loc = UpdateLocWide(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32);
}
next_arg++;
}
@@ -2161,18 +2161,18 @@
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64);
} else {
LoadValueDirectWideFixed(rl_arg, regWide);
- StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64);
}
i++;
} else {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32);
} else {
LoadValueDirectFixed(rl_arg, regSingle);
- StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32, kNotVolatile);
+ StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32);
}
}
}
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 0352808..ac5162e 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -585,7 +585,7 @@
// value.
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::FloatSolo64(low_reg_val),
- kDouble, kNotVolatile);
+ kDouble);
res->target = data_target;
res->flags.fixup = kFixupLoad;
store_method_addr_used_ = true;
@@ -756,22 +756,17 @@
return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size);
}
-LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) {
+LIR* X86Mir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
// LoadBaseDisp() will emit correct insn for atomic load on x86
// assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
+ return LoadBaseDisp(r_base, displacement, r_dest, size);
+}
- LIR* load = LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
- size);
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- // Without context sensitive analysis, we must issue the most conservative barriers.
- // In this case, either a load or store may follow so we issue both barriers.
- GenMemBarrier(kLoadLoad);
- GenMemBarrier(kLoadStore);
- }
-
- return load;
+LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
+ return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
+ size);
}
LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
@@ -859,28 +854,20 @@
/* store value base base + scaled index. */
LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
- int scale, OpSize size) {
+ int scale, OpSize size) {
return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size);
}
-LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
- VolatileKind is_volatile) {
- if (UNLIKELY(is_volatile == kVolatile)) {
- // There might have been a store before this volatile one so insert StoreStore barrier.
- GenMemBarrier(kStoreStore);
- }
-
+LIR* X86Mir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement,
+ RegStorage r_src, OpSize size) {
// StoreBaseDisp() will emit correct insn for atomic store on x86
// assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
+ return StoreBaseDisp(r_base, displacement, r_src, size);
+}
- LIR* store = StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- // A load might follow the volatile store so insert a StoreLoad barrier.
- GenMemBarrier(kStoreLoad);
- }
-
- return store;
+LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement,
+ RegStorage r_src, OpSize size) {
+ return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
}
LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,