Use atomic load/store for volatile IGET/IPUT/SGET/SPUT.
Bug: 14112919
Change-Id: I79316f438dd3adea9b2653ffc968af83671ad282
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index a03e5f2..f7fcf19 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -32,6 +32,8 @@
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(ThreadOffset<4> offset);
+ LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
@@ -40,6 +42,8 @@
RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) OVERRIDE;
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
@@ -84,6 +88,11 @@
int GetInsnSize(LIR* lir);
bool IsUnconditionalBranch(LIR* lir);
+ // Check support for volatile load/store of a given size.
+ bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
+ // Get the register class for load/store of a field.
+ RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
+
// Required for target - Dalvik-level generators.
void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 05bef52..0c61439 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -545,6 +545,21 @@
return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
}
+bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
+ return true;
+}
+
+RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
+ if (UNLIKELY(is_volatile)) {
+ // On x86, atomic 64-bit load/store requires an fp register.
+ // Smaller aligned load/store is atomic for both core and fp registers.
+ if (size == k64 || size == kDouble) {
+ return kFPReg;
+ }
+ }
+ return RegClassBySize(size);
+}
+
X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Mir2Lir(cu, mir_graph, arena),
base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 8423ec4..5326c2b 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -666,6 +666,13 @@
return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size);
}
+LIR* X86Mir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
+ // LoadBaseDisp() will emit correct insn for atomic load on x86
+ // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
+ return LoadBaseDisp(r_base, displacement, r_dest, size);
+}
+
LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size) {
// TODO: base this on target.
@@ -755,6 +762,13 @@
return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size);
}
+LIR* X86Mir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement,
+ RegStorage r_src, OpSize size) {
+ // StoreBaseDisp() will emit correct insn for atomic store on x86
+ // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
+ return StoreBaseDisp(r_base, displacement, r_src, size);
+}
+
LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement,
RegStorage r_src, OpSize size) {
// TODO: base this on target.