Use atomic load/store for volatile IGET/IPUT/SGET/SPUT.
Bug: 14112919
Change-Id: I79316f438dd3adea9b2653ffc968af83671ad282
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 903be10..26084a2 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -32,6 +32,8 @@
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(A64ThreadOffset offset);
+ LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
@@ -40,6 +42,8 @@
RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) OVERRIDE;
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
@@ -86,6 +90,11 @@
int GetInsnSize(LIR* lir);
bool IsUnconditionalBranch(LIR* lir);
+ // Check support for volatile load/store of a given size.
+ bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
+ // Get the register class for load/store of a field.
+ RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
+
// Required for target - Dalvik-level generators.
void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 7e07e15..e4764eb 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -535,6 +535,20 @@
return (lir->opcode == kA64B1t);
}
+bool Arm64Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
+ return true;
+}
+
+RegisterClass Arm64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
+ if (UNLIKELY(is_volatile)) {
+ // On arm64, fp register load/store is atomic only for single bytes.
+ if (size != kSignedByte && size != kUnsignedByte) {
+ return kCoreReg;
+ }
+ }
+ return RegClassBySize(size);
+}
+
Arm64Mir2Lir::Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Mir2Lir(cu, mir_graph, arena) {
// Sanity check - make sure encoding map lines up.
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index e46e201..ae17711 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -930,6 +930,13 @@
return load;
}
+LIR* Arm64Mir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
+ // LoadBaseDisp() will emit correct insn for atomic load on arm64
+ // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
+ return LoadBaseDisp(r_base, displacement, r_dest, size);
+}
+
LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size) {
return LoadBaseDispBody(r_base, displacement, r_dest, size);
@@ -1032,8 +1039,15 @@
return store;
}
+LIR* Arm64Mir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) {
+ // StoreBaseDisp() will emit correct insn for atomic store on arm64
+ // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
+ return StoreBaseDisp(r_base, displacement, r_src, size);
+}
+
LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) {
+ OpSize size) {
return StoreBaseDispBody(r_base, displacement, r_src, size);
}