Merge "Pass the real capacity to CreateRosAlloc."
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index c15594a..30012d0 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -221,18 +221,10 @@
 
   virtual ~CmdlineArgs() {}
 
- protected:
-  virtual ParseStatus ParseCustom(const StringPiece& option, std::string* error_msg) {
-    UNUSED(option);
-    UNUSED(error_msg);
-
-    return kParseUnknownArgument;
-  }
-
-  virtual ParseStatus ParseChecks(std::string* error_msg) {
+  bool ParseCheckBootImage(std::string* error_msg) {
     if (boot_image_location_ == nullptr) {
       *error_msg = "--boot-image must be specified";
-      return kParseError;
+      return false;
     }
 
     DBG_LOG << "boot image location: " << boot_image_location_;
@@ -243,7 +235,7 @@
       size_t file_name_idx = boot_image_location.rfind("/");
       if (file_name_idx == std::string::npos) {  // Prevent a InsertIsaDirectory check failure.
         *error_msg = "Boot image location must have a / in it";
-        return kParseError;
+        return false;
       }
 
       // Don't let image locations with the 'arch' in it through, since it's not a location.
@@ -263,7 +255,7 @@
 
         if (GetInstructionSetFromString(parent_dir_name.c_str()) != kNone) {
           *error_msg = "Do not specify the architecture as part of the boot image location";
-          return kParseError;
+          return false;
         }
       }
 
@@ -272,19 +264,28 @@
       if (!LocationToFilename(boot_image_location, instruction_set_, &file_name)) {
         *error_msg = StringPrintf("No corresponding file for location '%s' exists",
                                   file_name.c_str());
-        return kParseError;
+        return false;
       }
 
       DBG_LOG << "boot_image_filename does exist: " << file_name;
     }
 
-    return kParseOk;
+    return true;
   }
 
- private:
   void PrintUsage() {
     fprintf(stderr, "%s", GetUsage().c_str());
   }
+
+ protected:
+  virtual ParseStatus ParseCustom(const StringPiece& option ATTRIBUTE_UNUSED,
+                                  std::string* error_msg ATTRIBUTE_UNUSED) {
+    return kParseUnknownArgument;
+  }
+
+  virtual ParseStatus ParseChecks(std::string* error_msg ATTRIBUTE_UNUSED) {
+    return kParseOk;
+  }
 };
 
 template <typename Args = CmdlineArgs>
@@ -300,14 +301,21 @@
       return EXIT_FAILURE;
     }
 
-    std::unique_ptr<Runtime> runtime = CreateRuntime(args.get());
-    if (runtime == nullptr) {
-      return EXIT_FAILURE;
-    }
-
     bool needs_runtime = NeedsRuntime();
+    std::unique_ptr<Runtime> runtime;
+
 
     if (needs_runtime) {
+      std::string error_msg;
+      if (!args_->ParseCheckBootImage(&error_msg)) {
+        fprintf(stderr, "%s\n", error_msg.c_str());
+        args_->PrintUsage();
+        return EXIT_FAILURE;
+      }
+      runtime.reset(CreateRuntime(args.get()));
+      if (runtime == nullptr) {
+        return EXIT_FAILURE;
+      }
       if (!ExecuteWithRuntime(runtime.get())) {
         return EXIT_FAILURE;
       }
@@ -358,11 +366,10 @@
   Args* args_ = nullptr;
 
  private:
-  std::unique_ptr<Runtime> CreateRuntime(CmdlineArgs* args) {
+  Runtime* CreateRuntime(CmdlineArgs* args) {
     CHECK(args != nullptr);
 
-    return std::unique_ptr<Runtime>(StartRuntime(args->boot_image_location_,
-                                                 args->instruction_set_));
+    return StartRuntime(args->boot_image_location_, args->instruction_set_);
   }
 };
 }  // namespace art
diff --git a/compiler/dex/bb_optimizations.cc b/compiler/dex/bb_optimizations.cc
index 6a610ab..e535813 100644
--- a/compiler/dex/bb_optimizations.cc
+++ b/compiler/dex/bb_optimizations.cc
@@ -51,20 +51,4 @@
   return false;
 }
 
-/*
- * BasicBlock Optimization pass implementation start.
- */
-void BBOptimizations::Start(PassDataHolder* data) const {
-  DCHECK(data != nullptr);
-  CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-  DCHECK(c_unit != nullptr);
-  /*
-   * This pass has a different ordering depEnding on the suppress exception,
-   * so do the pass here for now:
-   *   - Later, the Start should just change the ordering and we can move the extended
-   *     creation into the pass driver's main job with a new iterator
-   */
-  c_unit->mir_graph->BasicBlockOptimization();
-}
-
 }  // namespace art
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 0407e32..b07a415 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -284,7 +284,8 @@
  */
 class BBOptimizations : public PassME {
  public:
-  BBOptimizations() : PassME("BBOptimizations", kNoNodes, "5_post_bbo_cfg") {
+  BBOptimizations()
+      : PassME("BBOptimizations", kNoNodes, kOptimizationBasicBlockChange, "5_post_bbo_cfg") {
   }
 
   bool Gate(const PassDataHolder* data) const {
@@ -294,7 +295,28 @@
     return ((c_unit->disable_opt & (1 << kBBOpt)) == 0);
   }
 
-  void Start(PassDataHolder* data) const;
+  void Start(PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph->BasicBlockOptimizationStart();
+
+    /*
+     * This pass has a different ordering depending on the suppress exception,
+     * so do the pass here for now:
+     *   - Later, the Start should just change the ordering and we can move the extended
+     *     creation into the pass driver's main job with a new iterator
+     */
+    c_unit->mir_graph->BasicBlockOptimization();
+  }
+
+  void End(PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph->BasicBlockOptimizationEnd();
+    down_cast<PassMEDataHolder*>(data)->dirty = !c_unit->mir_graph->DfsOrdersUpToDate();
+  }
 };
 
 /**
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 7ff06a0..7edb490 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -555,7 +555,7 @@
  * The current recipe is as follows:
  * -# Use AnyStore ~= (LoadStore | StoreStore) ~= release barrier before volatile store.
  * -# Use AnyAny barrier after volatile store.  (StoreLoad is as expensive.)
- * -# Use LoadAny barrier ~= (LoadLoad | LoadStore) ~= acquire barrierafter each volatile load.
+ * -# Use LoadAny barrier ~= (LoadLoad | LoadStore) ~= acquire barrier after each volatile load.
  * -# Use StoreStore barrier after all stores but before return from any constructor whose
  *    class has final fields.
  * -# Use NTStoreStore to order non-temporal stores with respect to all later
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index 7e3b4d8..18e3469 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -215,7 +215,6 @@
         bb->data_flow_info->live_in_v = live_in_v_;
       }
     }
-    cu_.mir_graph->num_blocks_ = count;
     ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
     cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
     ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 7b53b14..0f0846c 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1151,7 +1151,7 @@
     skip_compilation = true;
     *skip_message = "Huge method: " + std::to_string(GetNumDalvikInsns());
     // If we're got a huge number of basic blocks, don't bother with further analysis.
-    if (static_cast<size_t>(num_blocks_) > (compiler_options.GetHugeMethodThreshold() / 2)) {
+    if (static_cast<size_t>(GetNumBlocks()) > (compiler_options.GetHugeMethodThreshold() / 2)) {
       return true;
     }
   } else if (compiler_options.IsLargeMethod(GetNumDalvikInsns()) &&
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 71ad635..312a6eb 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -91,6 +91,9 @@
       num_reachable_blocks_(0),
       max_num_reachable_blocks_(0),
       dfs_orders_up_to_date_(false),
+      domination_up_to_date_(false),
+      mir_ssa_rep_up_to_date_(false),
+      topological_order_up_to_date_(false),
       dfs_order_(arena->Adapter(kArenaAllocDfsPreOrder)),
       dfs_post_order_(arena->Adapter(kArenaAllocDfsPostOrder)),
       dom_post_order_traversal_(arena->Adapter(kArenaAllocDomPostOrder)),
@@ -105,7 +108,6 @@
       try_block_addr_(NULL),
       entry_block_(NULL),
       exit_block_(NULL),
-      num_blocks_(0),
       current_code_item_(NULL),
       dex_pc_to_block_map_(arena->Adapter()),
       m_units_(arena->Adapter()),
@@ -691,7 +693,7 @@
   if (current_method_ == 0) {
     DCHECK(entry_block_ == NULL);
     DCHECK(exit_block_ == NULL);
-    DCHECK_EQ(num_blocks_, 0U);
+    DCHECK_EQ(GetNumBlocks(), 0U);
     // Use id 0 to represent a null block.
     BasicBlock* null_block = CreateNewBB(kNullBlock);
     DCHECK_EQ(null_block->id, NullBasicBlockId);
@@ -1740,6 +1742,9 @@
 
   // Update the maximum number of reachable blocks.
   max_num_reachable_blocks_ = num_reachable_blocks_;
+
+  // Mark MIR SSA representations as up to date.
+  mir_ssa_rep_up_to_date_ = true;
 }
 
 size_t MIRGraph::GetNumDalvikInsns() const {
@@ -2005,6 +2010,7 @@
   topological_order_loop_head_stack_.clear();
   topological_order_loop_head_stack_.reserve(max_nested_loops);
   max_nested_loops_ = max_nested_loops;
+  topological_order_up_to_date_ = true;
 }
 
 bool BasicBlock::IsExceptionBlock() const {
@@ -2246,12 +2252,6 @@
   }
   predecessors.clear();
 
-  KillUnreachable(mir_graph);
-}
-
-void BasicBlock::KillUnreachable(MIRGraph* mir_graph) {
-  DCHECK(predecessors.empty());  // Unreachable.
-
   // Mark as dead and hidden.
   block_type = kDead;
   hidden = true;
@@ -2270,9 +2270,6 @@
   ChildBlockIterator iter(this, mir_graph);
   for (BasicBlock* succ_bb = iter.Next(); succ_bb != nullptr; succ_bb = iter.Next()) {
     succ_bb->ErasePredecessor(id);
-    if (succ_bb->predecessors.empty()) {
-      succ_bb->KillUnreachable(mir_graph);
-    }
   }
 
   // Remove links to children.
@@ -2393,7 +2390,8 @@
 // Create a new basic block with block_id as num_blocks_ that is
 // post-incremented.
 BasicBlock* MIRGraph::CreateNewBB(BBType block_type) {
-  BasicBlock* res = NewMemBB(block_type, num_blocks_++);
+  BasicBlockId id = static_cast<BasicBlockId>(block_list_.size());
+  BasicBlock* res = NewMemBB(block_type, id);
   block_list_.push_back(res);
   return res;
 }
@@ -2403,10 +2401,6 @@
   driver.Launch();
 }
 
-void MIRGraph::InitializeBasicBlockData() {
-  num_blocks_ = block_list_.size();
-}
-
 int MIR::DecodedInstruction::FlagsOf() const {
   // Calculate new index.
   int idx = static_cast<int>(opcode) - kNumPackedOpcodes;
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 851ca15..af97f51 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -410,18 +410,12 @@
 
   /**
    * @brief Kill the BasicBlock.
-   * @details Unlink predecessors to make this block unreachable, then KillUnreachable().
+   * @details Unlink predecessors and successors, remove all MIRs, set the block type to kDead
+   *          and set hidden to true.
    */
   void Kill(MIRGraph* mir_graph);
 
   /**
-   * @brief Kill the unreachable block and all blocks that become unreachable by killing this one.
-   * @details Set the block type to kDead and set hidden to true, remove all MIRs,
-   *          unlink all successors and recursively kill successors that become unreachable.
-   */
-  void KillUnreachable(MIRGraph* mir_graph);
-
-  /**
    * @brief Is ssa_reg the last SSA definition of that VR in the block?
    */
   bool IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg);
@@ -574,7 +568,7 @@
   }
 
   unsigned int GetNumBlocks() const {
-    return num_blocks_;
+    return block_list_.size();
   }
 
   /**
@@ -704,7 +698,9 @@
 
   void DumpRegLocTable(RegLocation* table, int count);
 
+  void BasicBlockOptimizationStart();
   void BasicBlockOptimization();
+  void BasicBlockOptimizationEnd();
 
   const ArenaVector<BasicBlockId>& GetTopologicalSortOrder() {
     DCHECK(!topological_order_.empty());
@@ -1198,7 +1194,6 @@
   void AllocateSSAUseData(MIR *mir, int num_uses);
   void AllocateSSADefData(MIR *mir, int num_defs);
   void CalculateBasicBlockInformation();
-  void InitializeBasicBlockData();
   void ComputeDFSOrders();
   void ComputeDefBlockMatrix();
   void ComputeDominators();
@@ -1211,6 +1206,18 @@
     return dfs_orders_up_to_date_;
   }
 
+  bool DominationUpToDate() const {
+    return domination_up_to_date_;
+  }
+
+  bool MirSsaRepUpToDate() const {
+    return mir_ssa_rep_up_to_date_;
+  }
+
+  bool TopologicalOrderUpToDate() const {
+    return topological_order_up_to_date_;
+  }
+
   /*
    * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
    * we can verify that all catch entries have native PC entries.
@@ -1321,6 +1328,9 @@
   unsigned int num_reachable_blocks_;
   unsigned int max_num_reachable_blocks_;
   bool dfs_orders_up_to_date_;
+  bool domination_up_to_date_;
+  bool mir_ssa_rep_up_to_date_;
+  bool topological_order_up_to_date_;
   ArenaVector<BasicBlockId> dfs_order_;
   ArenaVector<BasicBlockId> dfs_post_order_;
   ArenaVector<BasicBlockId> dom_post_order_traversal_;
@@ -1379,7 +1389,6 @@
   ArenaBitVector* try_block_addr_;
   BasicBlock* entry_block_;
   BasicBlock* exit_block_;
-  unsigned int num_blocks_;
   const DexFile::CodeItem* current_code_item_;
   ArenaVector<uint16_t> dex_pc_to_block_map_;    // FindBlock lookup cache.
   ArenaVector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
diff --git a/compiler/dex/mir_graph_test.cc b/compiler/dex/mir_graph_test.cc
index a96cd84..8a7e71f 100644
--- a/compiler/dex/mir_graph_test.cc
+++ b/compiler/dex/mir_graph_test.cc
@@ -89,7 +89,6 @@
             cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
       }
     }
-    cu_.mir_graph->num_blocks_ = count;
     ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
     cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
     ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 6e9844c..15b8341 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -485,9 +485,11 @@
             mir->ssa_rep->num_uses = 0;
             BasicBlock* successor_to_unlink = GetBasicBlock(edge_to_kill);
             successor_to_unlink->ErasePredecessor(bb->id);
-            if (successor_to_unlink->predecessors.empty()) {
-              successor_to_unlink->KillUnreachable(this);
-            }
+            // We have changed the graph structure.
+            dfs_orders_up_to_date_ = false;
+            domination_up_to_date_ = false;
+            topological_order_up_to_date_ = false;
+            // Keep MIR SSA rep, the worst that can happen is a Phi with just 1 input.
           }
           break;
         case Instruction::CMPL_FLOAT:
@@ -649,36 +651,36 @@
                * Phi node only contains our two cases as input, we will use the result
                * SSA name of the Phi node as our select result and delete the Phi.  If
                * the Phi node has more than two operands, we will arbitrarily use the SSA
-               * name of the "true" path, delete the SSA name of the "false" path from the
+               * name of the "false" path, delete the SSA name of the "true" path from the
                * Phi node (and fix up the incoming arc list).
                */
               if (phi->ssa_rep->num_uses == 2) {
                 mir->ssa_rep->defs[0] = phi->ssa_rep->defs[0];
-                phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+                // Rather than changing the Phi to kMirOpNop, remove it completely.
+                // This avoids leaving other Phis after kMirOpNop (i.e. a non-Phi) insn.
+                tk_tk->RemoveMIR(phi);
+                int dead_false_def = if_false->ssa_rep->defs[0];
+                raw_use_counts_[dead_false_def] = use_counts_[dead_false_def] = 0;
               } else {
-                int dead_def = if_false->ssa_rep->defs[0];
-                int live_def = if_true->ssa_rep->defs[0];
+                int live_def = if_false->ssa_rep->defs[0];
                 mir->ssa_rep->defs[0] = live_def;
-                BasicBlockId* incoming = phi->meta.phi_incoming;
-                for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
-                  if (phi->ssa_rep->uses[i] == live_def) {
-                    incoming[i] = bb->id;
-                  }
-                }
-                for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
-                  if (phi->ssa_rep->uses[i] == dead_def) {
-                    int last_slot = phi->ssa_rep->num_uses - 1;
-                    phi->ssa_rep->uses[i] = phi->ssa_rep->uses[last_slot];
-                    incoming[i] = incoming[last_slot];
-                  }
-                }
               }
-              phi->ssa_rep->num_uses--;
-              bb->taken = NullBasicBlockId;
-              tk->block_type = kDead;
-              for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) {
-                tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-              }
+              int dead_true_def = if_true->ssa_rep->defs[0];
+              raw_use_counts_[dead_true_def] = use_counts_[dead_true_def] = 0;
+              // We want to remove ft and tk and link bb directly to ft_ft. First, we need
+              // to update all Phi inputs correctly with UpdatePredecessor(ft->id, bb->id)
+              // since the live_def above comes from ft->first_mir_insn (if_false).
+              DCHECK(if_false == ft->first_mir_insn);
+              ft_ft->UpdatePredecessor(ft->id, bb->id);
+              // Correct the rest of the links between bb, ft and ft_ft.
+              ft->ErasePredecessor(bb->id);
+              ft->fall_through = NullBasicBlockId;
+              bb->fall_through = ft_ft->id;
+              // Now we can kill tk and ft.
+              tk->Kill(this);
+              ft->Kill(this);
+              // NOTE: DFS order, domination info and topological order are still usable
+              // despite the newly dead blocks.
             }
           }
         }
@@ -788,43 +790,9 @@
     MIR* mir = bb->last_mir_insn;
     DCHECK(bb->first_mir_insn !=  nullptr);
 
-    // Grab the attributes from the paired opcode.
+    // Get the paired insn and check if it can still throw.
     MIR* throw_insn = mir->meta.throw_insn;
-    uint64_t df_attributes = GetDataFlowAttributes(throw_insn);
-
-    // Don't combine if the throw_insn can still throw NPE.
-    if ((df_attributes & DF_HAS_NULL_CHKS) != 0 &&
-        (throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0) {
-      break;
-    }
-    // Now whitelist specific instructions.
-    bool ok = false;
-    if ((df_attributes & DF_IFIELD) != 0) {
-      // Combine only if fast, otherwise weird things can happen.
-      const MirIFieldLoweringInfo& field_info = GetIFieldLoweringInfo(throw_insn);
-      ok = (df_attributes & DF_DA)  ? field_info.FastGet() : field_info.FastPut();
-    } else if ((df_attributes & DF_SFIELD) != 0) {
-      // Combine only if fast, otherwise weird things can happen.
-      const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(throw_insn);
-      bool fast = ((df_attributes & DF_DA)  ? field_info.FastGet() : field_info.FastPut());
-      // Don't combine if the SGET/SPUT can call <clinit>().
-      bool clinit = !field_info.IsClassInitialized() &&
-          (throw_insn->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0;
-      ok = fast && !clinit;
-    } else if ((df_attributes & DF_HAS_RANGE_CHKS) != 0) {
-      // Only AGET/APUT have range checks. We have processed the AGET/APUT null check above.
-      DCHECK_NE(throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK, 0);
-      ok = ((throw_insn->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0);
-    } else if ((throw_insn->dalvikInsn.FlagsOf() & Instruction::kThrow) == 0) {
-      // We can encounter a non-throwing insn here thanks to inlining or other optimizations.
-      ok = true;
-    } else if (throw_insn->dalvikInsn.opcode == Instruction::ARRAY_LENGTH ||
-        throw_insn->dalvikInsn.opcode == Instruction::FILL_ARRAY_DATA ||
-        static_cast<int>(throw_insn->dalvikInsn.opcode) == kMirOpNullCheck) {
-      // No more checks for these (null check was processed above).
-      ok = true;
-    }
-    if (!ok) {
+    if (CanThrow(throw_insn)) {
       break;
     }
 
@@ -863,9 +831,6 @@
           BasicBlock* succ_bb = GetBasicBlock(succ_info->block);
           DCHECK(succ_bb->catch_entry);
           succ_bb->ErasePredecessor(bb->id);
-          if (succ_bb->predecessors.empty()) {
-            succ_bb->KillUnreachable(this);
-          }
         }
       }
     }
@@ -908,8 +873,10 @@
       child->UpdatePredecessor(bb_next->id, bb->id);
     }
 
-    // DFS orders are not up to date anymore.
+    // DFS orders, domination and topological order are not up to date anymore.
     dfs_orders_up_to_date_ = false;
+    domination_up_to_date_ = false;
+    topological_order_up_to_date_ = false;
 
     // Now, loop back and see if we can keep going
   }
@@ -1581,7 +1548,7 @@
   return false;  // Not iterative - return value will be ignored
 }
 
-void MIRGraph::BasicBlockOptimization() {
+void MIRGraph::BasicBlockOptimizationStart() {
   if ((cu_->disable_opt & (1 << kLocalValueNumbering)) == 0) {
     temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
     temp_.gvn.ifield_ids_ =
@@ -1589,7 +1556,9 @@
     temp_.gvn.sfield_ids_ =
         GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), sfield_lowering_infos_);
   }
+}
 
+void MIRGraph::BasicBlockOptimization() {
   if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
     ClearAllVisitedFlags();
     PreOrderDfsIterator iter2(this);
@@ -1606,7 +1575,9 @@
       BasicBlockOpt(bb);
     }
   }
+}
 
+void MIRGraph::BasicBlockOptimizationEnd() {
   // Clean up after LVN.
   temp_.gvn.ifield_ids_ = nullptr;
   temp_.gvn.sfield_ids_ = nullptr;
@@ -1719,32 +1690,37 @@
   const int opt_flags = mir->optimization_flags;
   uint64_t df_attributes = GetDataFlowAttributes(mir);
 
+  // First, check if the insn can still throw NPE.
   if (((df_attributes & DF_HAS_NULL_CHKS) != 0) && ((opt_flags & MIR_IGNORE_NULL_CHECK) == 0)) {
     return true;
   }
+
+  // Now process specific instructions.
   if ((df_attributes & DF_IFIELD) != 0) {
-    // The IGET/IPUT family.
+    // The IGET/IPUT family. We have processed the IGET/IPUT null check above.
+    DCHECK_NE(opt_flags & MIR_IGNORE_NULL_CHECK, 0);
+    // If not fast, weird things can happen and the insn can throw.
     const MirIFieldLoweringInfo& field_info = GetIFieldLoweringInfo(mir);
-    bool fast = (df_attributes & DF_DA) ? field_info.FastGet() : field_info.FastPut();
-    // Already processed null check above.
-    if (fast) {
-      return false;
-    }
-  } else if ((df_attributes & DF_HAS_RANGE_CHKS) != 0) {
-    // The AGET/APUT family.
-    // Already processed null check above.
-    if ((opt_flags & MIR_IGNORE_RANGE_CHECK) != 0) {
-      return false;
-    }
+    bool fast = (df_attributes & DF_DA) != 0 ? field_info.FastGet() : field_info.FastPut();
+    return !fast;
   } else if ((df_attributes & DF_SFIELD) != 0) {
-    // The SGET/SPUT family.
+    // The SGET/SPUT family. Check for potentially throwing class initialization.
+    // Also, if not fast, weird things can happen and the insn can throw.
     const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
-    bool fast = (df_attributes & DF_DA) ? field_info.FastGet() : field_info.FastPut();
+    bool fast = (df_attributes & DF_DA) != 0 ? field_info.FastGet() : field_info.FastPut();
     bool is_class_initialized = field_info.IsClassInitialized() ||
         ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0);
-    if (fast && is_class_initialized) {
-      return false;
-    }
+    return !(fast && is_class_initialized);
+  } else if ((df_attributes & DF_HAS_RANGE_CHKS) != 0) {
+    // Only AGET/APUT have range checks. We have processed the AGET/APUT null check above.
+    DCHECK_NE(opt_flags & MIR_IGNORE_NULL_CHECK, 0);
+    // Non-throwing only if range check has been eliminated.
+    return ((opt_flags & MIR_IGNORE_RANGE_CHECK) == 0);
+  } else if (mir->dalvikInsn.opcode == Instruction::ARRAY_LENGTH ||
+      mir->dalvikInsn.opcode == Instruction::FILL_ARRAY_DATA ||
+      static_cast<int>(mir->dalvikInsn.opcode) == kMirOpNullCheck) {
+    // No more checks for these (null check was processed above).
+    return false;
   }
   return true;
 }
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 6c2e9c0..362c7fd 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -129,7 +129,6 @@
             cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
       }
     }
-    cu_.mir_graph->num_blocks_ = count;
     ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
     cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
     ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
diff --git a/compiler/dex/pass_driver_me_post_opt.cc b/compiler/dex/pass_driver_me_post_opt.cc
index e6238e9..9b56c0d 100644
--- a/compiler/dex/pass_driver_me_post_opt.cc
+++ b/compiler/dex/pass_driver_me_post_opt.cc
@@ -31,20 +31,19 @@
 // The initial list of passes to be used by the PassDriveMEPostOpt.
 template<>
 const Pass* const PassDriver<PassDriverMEPostOpt>::g_passes[] = {
-  GetPassInstance<InitializeData>(),
-  GetPassInstance<ClearPhiInstructions>(),
-  GetPassInstance<DFSOrders>(),
-  GetPassInstance<BuildDomination>(),
-  GetPassInstance<TopologicalSortOrders>(),
-  GetPassInstance<DefBlockMatrix>(),
-  GetPassInstance<CreatePhiNodes>(),
-  GetPassInstance<ClearVisitedFlag>(),
-  GetPassInstance<SSAConversion>(),
-  GetPassInstance<PhiNodeOperands>(),
-  GetPassInstance<ConstantPropagation>(),
-  GetPassInstance<PerformInitRegLocations>(),
-  GetPassInstance<MethodUseCount>(),
-  GetPassInstance<FreeData>(),
+    GetPassInstance<DFSOrders>(),
+    GetPassInstance<BuildDomination>(),
+    GetPassInstance<TopologicalSortOrders>(),
+    GetPassInstance<InitializeSSATransformation>(),
+    GetPassInstance<ClearPhiInstructions>(),
+    GetPassInstance<DefBlockMatrix>(),
+    GetPassInstance<CreatePhiNodes>(),
+    GetPassInstance<SSAConversion>(),
+    GetPassInstance<PhiNodeOperands>(),
+    GetPassInstance<ConstantPropagation>(),
+    GetPassInstance<PerformInitRegLocations>(),
+    GetPassInstance<MethodUseCount>(),
+    GetPassInstance<FinishSSATransformation>(),
 };
 
 // The number of the passes in the initial list of Passes (g_passes).
diff --git a/compiler/dex/post_opt_passes.h b/compiler/dex/post_opt_passes.h
index 7b84ba8..964355b 100644
--- a/compiler/dex/post_opt_passes.h
+++ b/compiler/dex/post_opt_passes.h
@@ -24,13 +24,31 @@
 namespace art {
 
 /**
- * @class InitializeData
+ * @class PassMEMirSsaRep
+ * @brief Convenience class for passes that check MIRGraph::MirSsaRepUpToDate().
+ */
+class PassMEMirSsaRep : public PassME {
+ public:
+  PassMEMirSsaRep(const char* name, DataFlowAnalysisMode type = kAllNodes)
+      : PassME(name, type) {
+  }
+
+  bool Gate(const PassDataHolder* data) const OVERRIDE {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    return !c_unit->mir_graph->MirSsaRepUpToDate();
+  }
+};
+
+/**
+ * @class InitializeSSATransformation
  * @brief There is some data that needs to be initialized before performing
  * the post optimization passes.
  */
-class InitializeData : public PassME {
+class InitializeSSATransformation : public PassMEMirSsaRep {
  public:
-  InitializeData() : PassME("InitializeData", kNoNodes) {
+  InitializeSSATransformation() : PassMEMirSsaRep("InitializeSSATransformation", kNoNodes) {
   }
 
   void Start(PassDataHolder* data) const {
@@ -39,8 +57,8 @@
     DCHECK(data != nullptr);
     CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
     DCHECK(c_unit != nullptr);
-    c_unit->mir_graph.get()->InitializeBasicBlockData();
-    c_unit->mir_graph.get()->SSATransformationStart();
+    c_unit->mir_graph->SSATransformationStart();
+    c_unit->mir_graph->CompilerInitializeSSAConversion();
   }
 };
 
@@ -62,9 +80,9 @@
  * @class ClearPhiInformation
  * @brief Clear the PHI nodes from the CFG.
  */
-class ClearPhiInstructions : public PassME {
+class ClearPhiInstructions : public PassMEMirSsaRep {
  public:
-  ClearPhiInstructions() : PassME("ClearPhiInstructions") {
+  ClearPhiInstructions() : PassMEMirSsaRep("ClearPhiInstructions") {
   }
 
   bool Worker(PassDataHolder* data) const;
@@ -115,12 +133,18 @@
   BuildDomination() : PassME("BuildDomination", kNoNodes) {
   }
 
+  bool Gate(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    return !c_unit->mir_graph->DominationUpToDate();
+  }
+
   void Start(PassDataHolder* data) const {
     DCHECK(data != nullptr);
     CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
     DCHECK(c_unit != nullptr);
-    c_unit->mir_graph.get()->ComputeDominators();
-    c_unit->mir_graph.get()->CompilerInitializeSSAConversion();
+    c_unit->mir_graph->ComputeDominators();
   }
 
   void End(PassDataHolder* data) const {
@@ -143,6 +167,13 @@
   TopologicalSortOrders() : PassME("TopologicalSortOrders", kNoNodes) {
   }
 
+  bool Gate(const PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    return !c_unit->mir_graph->TopologicalOrderUpToDate();
+  }
+
   void Start(PassDataHolder* data) const {
     DCHECK(data != nullptr);
     CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
@@ -155,9 +186,9 @@
  * @class DefBlockMatrix
  * @brief Calculate the matrix of definition per basic block
  */
-class DefBlockMatrix : public PassME {
+class DefBlockMatrix : public PassMEMirSsaRep {
  public:
-  DefBlockMatrix() : PassME("DefBlockMatrix", kNoNodes) {
+  DefBlockMatrix() : PassMEMirSsaRep("DefBlockMatrix", kNoNodes) {
   }
 
   void Start(PassDataHolder* data) const {
@@ -172,9 +203,9 @@
  * @class CreatePhiNodes
  * @brief Pass to create the phi nodes after SSA calculation
  */
-class CreatePhiNodes : public PassME {
+class CreatePhiNodes : public PassMEMirSsaRep {
  public:
-  CreatePhiNodes() : PassME("CreatePhiNodes", kNoNodes) {
+  CreatePhiNodes() : PassMEMirSsaRep("CreatePhiNodes", kNoNodes) {
   }
 
   void Start(PassDataHolder* data) const {
@@ -186,30 +217,12 @@
 };
 
 /**
- * @class ClearVisitedFlag
- * @brief Pass to clear the visited flag for all basic blocks.
- */
-
-class ClearVisitedFlag : public PassME {
- public:
-  ClearVisitedFlag() : PassME("ClearVisitedFlag", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph.get()->ClearAllVisitedFlags();
-  }
-};
-
-/**
  * @class SSAConversion
  * @brief Pass for SSA conversion of MIRs
  */
-class SSAConversion : public PassME {
+class SSAConversion : public PassMEMirSsaRep {
  public:
-  SSAConversion() : PassME("SSAConversion", kNoNodes) {
+  SSAConversion() : PassMEMirSsaRep("SSAConversion", kNoNodes) {
   }
 
   void Start(PassDataHolder* data) const {
@@ -217,6 +230,7 @@
     CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
     DCHECK(c_unit != nullptr);
     MIRGraph *mir_graph = c_unit->mir_graph.get();
+    mir_graph->ClearAllVisitedFlags();
     mir_graph->DoDFSPreOrderSSARename(mir_graph->GetEntryBlock());
   }
 };
@@ -225,9 +239,9 @@
  * @class PhiNodeOperands
  * @brief Pass to insert the Phi node operands to basic blocks
  */
-class PhiNodeOperands : public PassME {
+class PhiNodeOperands : public PassMEMirSsaRep {
  public:
-  PhiNodeOperands() : PassME("PhiNodeOperands", kPreOrderDFSTraversal) {
+  PhiNodeOperands() : PassMEMirSsaRep("PhiNodeOperands", kPreOrderDFSTraversal) {
   }
 
   bool Worker(PassDataHolder* data) const {
@@ -246,9 +260,9 @@
  * @class InitRegLocations
  * @brief Initialize Register Locations.
  */
-class PerformInitRegLocations : public PassME {
+class PerformInitRegLocations : public PassMEMirSsaRep {
  public:
-  PerformInitRegLocations() : PassME("PerformInitRegLocation", kNoNodes) {
+  PerformInitRegLocations() : PassMEMirSsaRep("PerformInitRegLocation", kNoNodes) {
   }
 
   void Start(PassDataHolder* data) const {
@@ -263,9 +277,9 @@
  * @class ConstantPropagation
  * @brief Perform a constant propagation pass.
  */
-class ConstantPropagation : public PassME {
+class ConstantPropagation : public PassMEMirSsaRep {
  public:
-  ConstantPropagation() : PassME("ConstantPropagation") {
+  ConstantPropagation() : PassMEMirSsaRep("ConstantPropagation") {
   }
 
   bool Worker(PassDataHolder* data) const {
@@ -288,12 +302,12 @@
 };
 
 /**
- * @class FreeData
+ * @class FinishSSATransformation
  * @brief There is some data that needs to be freed after performing the post optimization passes.
  */
-class FreeData : public PassME {
+class FinishSSATransformation : public PassMEMirSsaRep {
  public:
-  FreeData() : PassME("FreeData", kNoNodes) {
+  FinishSSATransformation() : PassMEMirSsaRep("FinishSSATransformation", kNoNodes) {
   }
 
   void End(PassDataHolder* data) const {
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 5f6cdda..230f611 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -2209,18 +2209,36 @@
     // Handle float case.
     // TODO Add support for fast math (not value safe) and do horizontal add in that case.
 
+    int extract_index = mir->dalvikInsn.arg[0];
+
     rl_result = EvalLoc(rl_dest, kFPReg, true);
     NewLIR2(kX86PxorRR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
-    NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), vector_src.GetReg());
 
-    // Since FP must keep order of operation for value safety, we shift to low
-    // 32-bits and add to result.
-    for (int i = 0; i < 3; i++) {
-      NewLIR3(kX86ShufpsRRI, vector_src.GetReg(), vector_src.GetReg(), 0x39);
+    if (LIKELY(extract_index != 0)) {
+      // We know the index of element which we want to extract. We want to extract it and
+      // keep values in vector register correct for future use. So the way we act is:
+      // 1. Generate shuffle mask that allows to swap zeroth and required elements;
+      // 2. Shuffle vector register with this mask;
+      // 3. Extract zeroth element where required value lies;
+      // 4. Shuffle with same mask again to restore original values in vector register.
+      // The mask is generated from equivalence mask 0b11100100 swapping 0th and extracted
+      // element indices.
+      int shuffle[4] = {0b00, 0b01, 0b10, 0b11};
+      shuffle[0] = extract_index;
+      shuffle[extract_index] = 0;
+      int mask = 0;
+      for (int i = 0; i < 4; i++) {
+        mask |= (shuffle[i] << (2 * i));
+      }
+      NewLIR3(kX86ShufpsRRI, vector_src.GetReg(), vector_src.GetReg(), mask);
+      NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), vector_src.GetReg());
+      NewLIR3(kX86ShufpsRRI, vector_src.GetReg(), vector_src.GetReg(), mask);
+    } else {
+      // We need to extract zeroth element and don't need any complex stuff to do it.
       NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), vector_src.GetReg());
     }
 
-    StoreValue(rl_dest, rl_result);
+    StoreFinalValue(rl_dest, rl_result);
   } else if (opsize == kDouble) {
     // TODO Handle double case.
     LOG(FATAL) << "Unsupported add reduce for double.";
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 7cd431e..3905649 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -103,7 +103,7 @@
 
   num_reachable_blocks_ = dfs_order_.size();
 
-  if (num_reachable_blocks_ != num_blocks_) {
+  if (num_reachable_blocks_ != GetNumBlocks()) {
     // Kill all unreachable blocks.
     AllNodesIterator iter(this);
     for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
@@ -173,9 +173,9 @@
   dom_post_order_traversal_.reserve(num_reachable_blocks_);
 
   ClearAllVisitedFlags();
-  DCHECK(temp_scoped_alloc_.get() != nullptr);
+  ScopedArenaAllocator allocator(&cu_->arena_stack);
   ScopedArenaVector<std::pair<BasicBlock*, ArenaBitVector::IndexIterator>> work_stack(
-      temp_scoped_alloc_->Adapter());
+      allocator.Adapter());
   bb->visited = true;
   work_stack.push_back(std::make_pair(bb, bb->i_dominated->Indexes().begin()));
   while (!work_stack.empty()) {
@@ -402,6 +402,8 @@
   for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
     ComputeDominanceFrontier(bb);
   }
+
+  domination_up_to_date_ = true;
 }
 
 /*
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index f9054e0..dde0dfe 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -670,10 +670,13 @@
     MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
     return false;
   }
+
+#if defined(__aarch64__)
   if (resolved_field->IsVolatile()) {
     MaybeRecordStat(MethodCompilationStat::kNotCompiledVolatile);
     return false;
   }
+#endif
 
   Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
 
@@ -689,12 +692,14 @@
         null_check,
         value,
         field_type,
-        resolved_field->GetOffset()));
+        resolved_field->GetOffset(),
+        resolved_field->IsVolatile()));
   } else {
     current_block_->AddInstruction(new (arena_) HInstanceFieldGet(
         current_block_->GetLastInstruction(),
         field_type,
-        resolved_field->GetOffset()));
+        resolved_field->GetOffset(),
+        resolved_field->IsVolatile()));
 
     UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
   }
@@ -723,10 +728,12 @@
     return false;
   }
 
+#if defined(__aarch64__)
   if (resolved_field->IsVolatile()) {
     MaybeRecordStat(MethodCompilationStat::kNotCompiledVolatile);
     return false;
   }
+#endif
 
   Handle<mirror::Class> referrer_class(hs.NewHandle(compiler_driver_->ResolveCompilingMethodsClass(
       soa, dex_cache, class_loader, outer_compilation_unit_)));
@@ -763,10 +770,12 @@
     HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
     DCHECK_EQ(value->GetType(), field_type);
     current_block_->AddInstruction(
-        new (arena_) HStaticFieldSet(cls, value, field_type, resolved_field->GetOffset()));
+        new (arena_) HStaticFieldSet(cls, value, field_type, resolved_field->GetOffset(),
+            resolved_field->IsVolatile()));
   } else {
     current_block_->AddInstruction(
-        new (arena_) HStaticFieldGet(cls, field_type, resolved_field->GetOffset()));
+        new (arena_) HStaticFieldGet(cls, field_type, resolved_field->GetOffset(),
+            resolved_field->IsVolatile()));
     UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
   }
   return true;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 002d9d4..063dc7c 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2556,68 +2556,170 @@
   LOG(FATAL) << "Unreachable";
 }
 
-void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
+  // TODO (ported from quick): revisit Arm barrier kinds
+  DmbOptions flavour = DmbOptions::ISH;  // quiet c++ warnings
+  switch (kind) {
+    case MemBarrierKind::kAnyStore:
+    case MemBarrierKind::kLoadAny:
+    case MemBarrierKind::kAnyAny: {
+      flavour = DmbOptions::ISH;
+      break;
+    }
+    case MemBarrierKind::kStoreStore: {
+      flavour = DmbOptions::ISHST;
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected memory barrier " << kind;
+  }
+  __ dmb(flavour);
+}
+
+void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
+                                                         uint32_t offset,
+                                                         Register out_lo,
+                                                         Register out_hi) {
+  if (offset != 0) {
+    __ LoadImmediate(out_lo, offset);
+    __ add(addr, addr, ShifterOperand(out_lo));
+  }
+  __ ldrexd(out_lo, out_hi, addr);
+}
+
+void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
+                                                          uint32_t offset,
+                                                          Register value_lo,
+                                                          Register value_hi,
+                                                          Register temp1,
+                                                          Register temp2) {
+  Label fail;
+  if (offset != 0) {
+    __ LoadImmediate(temp1, offset);
+    __ add(addr, addr, ShifterOperand(temp1));
+  }
+  __ Bind(&fail);
+  // We need a load followed by store. (The address used in a STREX instruction must
+  // be the same as the address in the most recently executed LDREX instruction.)
+  __ ldrexd(temp1, temp2, addr);
+  __ strexd(temp1, value_lo, value_hi, addr);
+  __ cmp(temp1, ShifterOperand(0));
+  __ b(&fail, NE);
+}
+
+void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
+  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
+
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  bool needs_write_barrier =
-      CodeGenerator::StoreNeedsWriteBarrier(instruction->GetFieldType(), instruction->GetValue());
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RequiresRegister());
+
+  bool is_volatile = field_info.IsVolatile();
+  Primitive::Type field_type = field_info.GetFieldType();
+  bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
+
   // Temporary registers for the write barrier.
-  if (needs_write_barrier) {
+  // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
+  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
     locations->AddTemp(Location::RequiresRegister());
     locations->AddTemp(Location::RequiresRegister());
+  } else if (is_volatile && is_wide) {
+    // Arm encoding have some additional constraints for ldrexd/strexd:
+    // - registers need to be consecutive
+    // - the first register should be even but not R14.
+    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
+    // enable Arm encoding.
+    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
+
+    locations->AddTemp(Location::RequiresRegister());
+    locations->AddTemp(Location::RequiresRegister());
+    if (field_type == Primitive::kPrimDouble) {
+      // For doubles we need two more registers to copy the value.
+      locations->AddTemp(Location::RegisterLocation(R2));
+      locations->AddTemp(Location::RegisterLocation(R3));
+    }
   }
 }
 
-void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
+                                                 const FieldInfo& field_info) {
+  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
+
   LocationSummary* locations = instruction->GetLocations();
-  Register obj = locations->InAt(0).AsRegister<Register>();
-  uint32_t offset = instruction->GetFieldOffset().Uint32Value();
-  Primitive::Type field_type = instruction->GetFieldType();
+  Register base = locations->InAt(0).AsRegister<Register>();
+  Location value = locations->InAt(1);
+
+  bool is_volatile = field_info.IsVolatile();
+  Primitive::Type field_type = field_info.GetFieldType();
+  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+
+  if (is_volatile) {
+    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+  }
 
   switch (field_type) {
     case Primitive::kPrimBoolean:
     case Primitive::kPrimByte: {
-      Register value = locations->InAt(1).AsRegister<Register>();
-      __ StoreToOffset(kStoreByte, value, obj, offset);
+      __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
       break;
     }
 
     case Primitive::kPrimShort:
     case Primitive::kPrimChar: {
-      Register value = locations->InAt(1).AsRegister<Register>();
-      __ StoreToOffset(kStoreHalfword, value, obj, offset);
+      __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
       break;
     }
 
     case Primitive::kPrimInt:
     case Primitive::kPrimNot: {
-      Register value = locations->InAt(1).AsRegister<Register>();
-      __ StoreToOffset(kStoreWord, value, obj, offset);
-      if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue())) {
+      Register value_reg = value.AsRegister<Register>();
+      __ StoreToOffset(kStoreWord, value_reg, base, offset);
+      if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
         Register temp = locations->GetTemp(0).AsRegister<Register>();
         Register card = locations->GetTemp(1).AsRegister<Register>();
-        codegen_->MarkGCCard(temp, card, obj, value);
+        codegen_->MarkGCCard(temp, card, base, value_reg);
       }
       break;
     }
 
     case Primitive::kPrimLong: {
-      Location value = locations->InAt(1);
-      __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
+      if (is_volatile) {
+        // TODO: We could use ldrd and strd that are atomic with Large Physical Address Extension
+        // support. This info is stored in the compiler driver (HasAtomicLdrdAndStrd) and we should
+        // pass it around to be able to optimize.
+        GenerateWideAtomicStore(base, offset,
+                                value.AsRegisterPairLow<Register>(),
+                                value.AsRegisterPairHigh<Register>(),
+                                locations->GetTemp(0).AsRegister<Register>(),
+                                locations->GetTemp(1).AsRegister<Register>());
+      } else {
+        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
+      }
       break;
     }
 
     case Primitive::kPrimFloat: {
-      SRegister value = locations->InAt(1).AsFpuRegister<SRegister>();
-      __ StoreSToOffset(value, obj, offset);
+      __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
       break;
     }
 
     case Primitive::kPrimDouble: {
-      DRegister value = FromLowSToD(locations->InAt(1).AsFpuRegisterPairLow<SRegister>());
-      __ StoreDToOffset(value, obj, offset);
+      DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
+      if (is_volatile) {
+        Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
+        Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
+
+        __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
+
+        GenerateWideAtomicStore(base, offset,
+                                value_reg_lo,
+                                value_reg_hi,
+                                locations->GetTemp(2).AsRegister<Register>(),
+                                locations->GetTemp(3).AsRegister<Register>());
+      } else {
+        __ StoreDToOffset(value_reg, base, offset);
+      }
       break;
     }
 
@@ -2625,75 +2727,138 @@
       LOG(FATAL) << "Unreachable type " << field_type;
       UNREACHABLE();
   }
+
+  if (is_volatile) {
+    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+  }
 }
 
-void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
+  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+
+  if (field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimDouble)) {
+    // Arm encoding have some additional constraints for ldrexd/strexd:
+    // - registers need to be consecutive
+    // - the first register should be even but not R14.
+    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
+    // enable Arm encoding.
+    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
+    locations->AddTemp(Location::RequiresRegister());
+    locations->AddTemp(Location::RequiresRegister());
+  }
 }
 
-void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Register obj = locations->InAt(0).AsRegister<Register>();
-  uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
+                                                 const FieldInfo& field_info) {
+  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
 
-  switch (instruction->GetType()) {
+  LocationSummary* locations = instruction->GetLocations();
+  Register base = locations->InAt(0).AsRegister<Register>();
+  Location out = locations->Out();
+  bool is_volatile = field_info.IsVolatile();
+  Primitive::Type field_type = field_info.GetFieldType();
+  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+
+  switch (field_type) {
     case Primitive::kPrimBoolean: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
+      __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
       break;
     }
 
     case Primitive::kPrimByte: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
+      __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
       break;
     }
 
     case Primitive::kPrimShort: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
+      __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
       break;
     }
 
     case Primitive::kPrimChar: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
+      __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
       break;
     }
 
     case Primitive::kPrimInt:
     case Primitive::kPrimNot: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ LoadFromOffset(kLoadWord, out, obj, offset);
+      __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
       break;
     }
 
     case Primitive::kPrimLong: {
-      // TODO: support volatile.
-      Location out = locations->Out();
-      __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
+      if (is_volatile) {
+        GenerateWideAtomicLoad(base, offset,
+                               out.AsRegisterPairLow<Register>(),
+                               out.AsRegisterPairHigh<Register>());
+      } else {
+        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
+      }
       break;
     }
 
     case Primitive::kPrimFloat: {
-      SRegister out = locations->Out().AsFpuRegister<SRegister>();
-      __ LoadSFromOffset(out, obj, offset);
+      __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
       break;
     }
 
     case Primitive::kPrimDouble: {
-      DRegister out = FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>());
-      __ LoadDFromOffset(out, obj, offset);
+      DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
+      if (is_volatile) {
+        Register lo = locations->GetTemp(0).AsRegister<Register>();
+        Register hi = locations->GetTemp(1).AsRegister<Register>();
+        GenerateWideAtomicLoad(base, offset, lo, hi);
+        __ vmovdrr(out_reg, lo, hi);
+      } else {
+        __ LoadDFromOffset(out_reg, base, offset);
+      }
       break;
     }
 
     case Primitive::kPrimVoid:
-      LOG(FATAL) << "Unreachable type " << instruction->GetType();
+      LOG(FATAL) << "Unreachable type " << field_type;
       UNREACHABLE();
   }
+
+  if (is_volatile) {
+    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+  }
+}
+
+void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
 }
 
 void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
@@ -3206,146 +3371,6 @@
   __ Bind(slow_path->GetExitLabel());
 }
 
-void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Register cls = locations->InAt(0).AsRegister<Register>();
-  uint32_t offset = instruction->GetFieldOffset().Uint32Value();
-
-  switch (instruction->GetType()) {
-    case Primitive::kPrimBoolean: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ LoadFromOffset(kLoadUnsignedByte, out, cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimByte: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ LoadFromOffset(kLoadSignedByte, out, cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimShort: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ LoadFromOffset(kLoadSignedHalfword, out, cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimChar: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ LoadFromOffset(kLoadUnsignedHalfword, out, cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimInt:
-    case Primitive::kPrimNot: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ LoadFromOffset(kLoadWord, out, cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimLong: {
-      // TODO: support volatile.
-      Location out = locations->Out();
-      __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimFloat: {
-      SRegister out = locations->Out().AsFpuRegister<SRegister>();
-      __ LoadSFromOffset(out, cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimDouble: {
-      DRegister out = FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>());
-      __ LoadDFromOffset(out, cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimVoid:
-      LOG(FATAL) << "Unreachable type " << instruction->GetType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  bool needs_write_barrier =
-      CodeGenerator::StoreNeedsWriteBarrier(instruction->GetFieldType(), instruction->GetValue());
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  // Temporary registers for the write barrier.
-  if (needs_write_barrier) {
-    locations->AddTemp(Location::RequiresRegister());
-    locations->AddTemp(Location::RequiresRegister());
-  }
-}
-
-void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Register cls = locations->InAt(0).AsRegister<Register>();
-  uint32_t offset = instruction->GetFieldOffset().Uint32Value();
-  Primitive::Type field_type = instruction->GetFieldType();
-
-  switch (field_type) {
-    case Primitive::kPrimBoolean:
-    case Primitive::kPrimByte: {
-      Register value = locations->InAt(1).AsRegister<Register>();
-      __ StoreToOffset(kStoreByte, value, cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimShort:
-    case Primitive::kPrimChar: {
-      Register value = locations->InAt(1).AsRegister<Register>();
-      __ StoreToOffset(kStoreHalfword, value, cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimInt:
-    case Primitive::kPrimNot: {
-      Register value = locations->InAt(1).AsRegister<Register>();
-      __ StoreToOffset(kStoreWord, value, cls, offset);
-      if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue())) {
-        Register temp = locations->GetTemp(0).AsRegister<Register>();
-        Register card = locations->GetTemp(1).AsRegister<Register>();
-        codegen_->MarkGCCard(temp, card, cls, value);
-      }
-      break;
-    }
-
-    case Primitive::kPrimLong: {
-      Location value = locations->InAt(1);
-      __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimFloat: {
-      SRegister value = locations->InAt(1).AsFpuRegister<SRegister>();
-      __ StoreSToOffset(value, cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimDouble: {
-      DRegister value = FromLowSToD(locations->InAt(1).AsFpuRegisterPairLow<SRegister>());
-      __ StoreDToOffset(value, cls, offset);
-      break;
-    }
-
-    case Primitive::kPrimVoid:
-      LOG(FATAL) << "Unreachable type " << field_type;
-      UNREACHABLE();
-  }
-}
-
 void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 226e635..b86670d 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_H_
 
 #include "code_generator.h"
+#include "dex/compiler_enums.h"
 #include "nodes.h"
 #include "parallel_move_resolver.h"
 #include "utils/arm/assembler_thumb2.h"
@@ -110,6 +111,8 @@
   void HandleInvoke(HInvoke* invoke);
   void HandleBitwiseOperation(HBinaryOperation* operation);
   void HandleShift(HBinaryOperation* operation);
+  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
 
   CodeGeneratorARM* const codegen_;
   InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -138,6 +141,15 @@
   void GenerateClassInitializationCheck(SlowPathCodeARM* slow_path, Register class_reg);
   void HandleBitwiseOperation(HBinaryOperation* operation);
   void HandleShift(HBinaryOperation* operation);
+  void GenerateMemoryBarrier(MemBarrierKind kind);
+  void GenerateWideAtomicStore(Register addr, uint32_t offset,
+                               Register value_lo, Register value_hi,
+                               Register temp1, Register temp2);
+  void GenerateWideAtomicLoad(Register addr, uint32_t offset,
+                              Register out_lo, Register out_hi);
+  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+
 
   ArmAssembler* const assembler_;
   CodeGeneratorARM* const codegen_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index e7edd8a..ddb0e82 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2656,90 +2656,28 @@
   LOG(FATAL) << "Unreachable";
 }
 
-void LocationsBuilderX86::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-  Primitive::Type field_type = instruction->GetFieldType();
-  bool needs_write_barrier =
-    CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
-
-  bool is_byte_type = (field_type == Primitive::kPrimBoolean)
-      || (field_type == Primitive::kPrimByte);
-  // The register allocator does not support multiple
-  // inputs that die at entry with one in a specific register.
-  if (is_byte_type) {
-    // Ensure the value is in a byte register.
-    locations->SetInAt(1, Location::RegisterLocation(EAX));
-  } else {
-    locations->SetInAt(1, Location::RequiresRegister());
-  }
-  // Temporary registers for the write barrier.
-  if (needs_write_barrier) {
-    locations->AddTemp(Location::RequiresRegister());
-    // Ensure the card is in a byte register.
-    locations->AddTemp(Location::RegisterLocation(ECX));
+void InstructionCodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) {
+  /*
+   * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence.
+   * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model.
+   * For those cases, all we need to ensure is that there is a scheduling barrier in place.
+   */
+  switch (kind) {
+    case MemBarrierKind::kAnyAny: {
+      __ mfence();
+      break;
+    }
+    case MemBarrierKind::kAnyStore:
+    case MemBarrierKind::kLoadAny:
+    case MemBarrierKind::kStoreStore: {
+      // nop
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected memory barrier " << kind;
   }
 }
 
-void InstructionCodeGeneratorX86::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Register obj = locations->InAt(0).AsRegister<Register>();
-  uint32_t offset = instruction->GetFieldOffset().Uint32Value();
-  Primitive::Type field_type = instruction->GetFieldType();
-
-  switch (field_type) {
-    case Primitive::kPrimBoolean:
-    case Primitive::kPrimByte: {
-      ByteRegister value = locations->InAt(1).AsRegister<ByteRegister>();
-      __ movb(Address(obj, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimShort:
-    case Primitive::kPrimChar: {
-      Register value = locations->InAt(1).AsRegister<Register>();
-      __ movw(Address(obj, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimInt:
-    case Primitive::kPrimNot: {
-      Register value = locations->InAt(1).AsRegister<Register>();
-      __ movl(Address(obj, offset), value);
-
-      if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
-        Register temp = locations->GetTemp(0).AsRegister<Register>();
-        Register card = locations->GetTemp(1).AsRegister<Register>();
-        codegen_->MarkGCCard(temp, card, obj, value);
-      }
-      break;
-    }
-
-    case Primitive::kPrimLong: {
-      Location value = locations->InAt(1);
-      __ movl(Address(obj, offset), value.AsRegisterPairLow<Register>());
-      __ movl(Address(obj, kX86WordSize + offset), value.AsRegisterPairHigh<Register>());
-      break;
-    }
-
-    case Primitive::kPrimFloat: {
-      XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
-      __ movss(Address(obj, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimDouble: {
-      XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
-      __ movsd(Address(obj, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimVoid:
-      LOG(FATAL) << "Unreachable type " << field_type;
-      UNREACHABLE();
-  }
-}
 
 void CodeGeneratorX86::MarkGCCard(Register temp, Register card, Register object, Register value) {
   Label is_null;
@@ -2753,73 +2691,233 @@
   __ Bind(&is_null);
 }
 
-void LocationsBuilderX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
+  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+
+  if (field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong)) {
+    // Long values can be loaded atomically into an XMM using movsd.
+    // So we use an XMM register as a temp to achieve atomicity (first load the temp into the XMM
+    // and then copy the XMM into the output 32bits at a time).
+    locations->AddTemp(Location::RequiresFpuRegister());
+  }
 }
 
-void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Register obj = locations->InAt(0).AsRegister<Register>();
-  uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
+                                                 const FieldInfo& field_info) {
+  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
 
-  switch (instruction->GetType()) {
+  LocationSummary* locations = instruction->GetLocations();
+  Register base = locations->InAt(0).AsRegister<Register>();
+  Location out = locations->Out();
+  bool is_volatile = field_info.IsVolatile();
+  Primitive::Type field_type = field_info.GetFieldType();
+  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+
+  switch (field_type) {
     case Primitive::kPrimBoolean: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ movzxb(out, Address(obj, offset));
+      __ movzxb(out.AsRegister<Register>(), Address(base, offset));
       break;
     }
 
     case Primitive::kPrimByte: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ movsxb(out, Address(obj, offset));
+      __ movsxb(out.AsRegister<Register>(), Address(base, offset));
       break;
     }
 
     case Primitive::kPrimShort: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ movsxw(out, Address(obj, offset));
+      __ movsxw(out.AsRegister<Register>(), Address(base, offset));
       break;
     }
 
     case Primitive::kPrimChar: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ movzxw(out, Address(obj, offset));
+      __ movzxw(out.AsRegister<Register>(), Address(base, offset));
       break;
     }
 
     case Primitive::kPrimInt:
     case Primitive::kPrimNot: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ movl(out, Address(obj, offset));
+      __ movl(out.AsRegister<Register>(), Address(base, offset));
       break;
     }
 
     case Primitive::kPrimLong: {
-      // TODO: support volatile.
-      __ movl(locations->Out().AsRegisterPairLow<Register>(), Address(obj, offset));
-      __ movl(locations->Out().AsRegisterPairHigh<Register>(), Address(obj, kX86WordSize + offset));
+      if (is_volatile) {
+        XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+        __ movsd(temp, Address(base, offset));
+        __ movd(out.AsRegisterPairLow<Register>(), temp);
+        __ psrlq(temp, Immediate(32));
+        __ movd(out.AsRegisterPairHigh<Register>(), temp);
+      } else {
+        __ movl(out.AsRegisterPairLow<Register>(), Address(base, offset));
+        __ movl(out.AsRegisterPairHigh<Register>(), Address(base, kX86WordSize + offset));
+      }
       break;
     }
 
     case Primitive::kPrimFloat: {
-      XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
-      __ movss(out, Address(obj, offset));
+      __ movss(out.AsFpuRegister<XmmRegister>(), Address(base, offset));
       break;
     }
 
     case Primitive::kPrimDouble: {
-      XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
-      __ movsd(out, Address(obj, offset));
+      __ movsd(out.AsFpuRegister<XmmRegister>(), Address(base, offset));
       break;
     }
 
     case Primitive::kPrimVoid:
-      LOG(FATAL) << "Unreachable type " << instruction->GetType();
+      LOG(FATAL) << "Unreachable type " << field_type;
       UNREACHABLE();
   }
+
+  if (is_volatile) {
+    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+  }
+}
+
+void LocationsBuilderX86::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
+  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
+
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+  locations->SetInAt(0, Location::RequiresRegister());
+  bool is_volatile = field_info.IsVolatile();
+  Primitive::Type field_type = field_info.GetFieldType();
+  bool is_byte_type = (field_type == Primitive::kPrimBoolean)
+    || (field_type == Primitive::kPrimByte);
+
+  // The register allocator does not support multiple
+  // inputs that die at entry with one in a specific register.
+  if (is_byte_type) {
+    // Ensure the value is in a byte register.
+    locations->SetInAt(1, Location::RegisterLocation(EAX));
+  } else {
+    locations->SetInAt(1, Location::RequiresRegister());
+  }
+  // Temporary registers for the write barrier.
+  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
+    locations->AddTemp(Location::RequiresRegister());
+    // Ensure the card is in a byte register.
+    locations->AddTemp(Location::RegisterLocation(ECX));
+  } else if (is_volatile && (field_type == Primitive::kPrimLong)) {
+    // 64bits value can be atomically written to an address with movsd and an XMM register.
+    // We need two XMM registers because there's no easier way to (bit) copy a register pair
+    // into a single XMM register (we copy each pair part into the XMMs and then interleave them).
+    // NB: We could make the register allocator understand fp_reg <-> core_reg moves but given the
+    // isolated cases when we need this it isn't worth adding the extra complexity.
+    locations->AddTemp(Location::RequiresFpuRegister());
+    locations->AddTemp(Location::RequiresFpuRegister());
+  }
+}
+
+void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction,
+                                                 const FieldInfo& field_info) {
+  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
+
+  LocationSummary* locations = instruction->GetLocations();
+  Register base = locations->InAt(0).AsRegister<Register>();
+  Location value = locations->InAt(1);
+  bool is_volatile = field_info.IsVolatile();
+  Primitive::Type field_type = field_info.GetFieldType();
+  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+
+  if (is_volatile) {
+    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+  }
+
+  switch (field_type) {
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte: {
+      __ movb(Address(base, offset), value.AsRegister<ByteRegister>());
+      break;
+    }
+
+    case Primitive::kPrimShort:
+    case Primitive::kPrimChar: {
+      __ movw(Address(base, offset), value.AsRegister<Register>());
+      break;
+    }
+
+    case Primitive::kPrimInt:
+    case Primitive::kPrimNot: {
+      __ movl(Address(base, offset), value.AsRegister<Register>());
+
+      if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
+        Register temp = locations->GetTemp(0).AsRegister<Register>();
+        Register card = locations->GetTemp(1).AsRegister<Register>();
+        codegen_->MarkGCCard(temp, card, base, value.AsRegister<Register>());
+      }
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      if (is_volatile) {
+        XmmRegister temp1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+        XmmRegister temp2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+        __ movd(temp1, value.AsRegisterPairLow<Register>());
+        __ movd(temp2, value.AsRegisterPairHigh<Register>());
+        __ punpckldq(temp1, temp2);
+        __ movsd(Address(base, offset), temp1);
+      } else {
+        __ movl(Address(base, offset), value.AsRegisterPairLow<Register>());
+        __ movl(Address(base, kX86WordSize + offset), value.AsRegisterPairHigh<Register>());
+      }
+      break;
+    }
+
+    case Primitive::kPrimFloat: {
+      __ movss(Address(base, offset), value.AsFpuRegister<XmmRegister>());
+      break;
+    }
+
+    case Primitive::kPrimDouble: {
+      __ movsd(Address(base, offset), value.AsFpuRegister<XmmRegister>());
+      break;
+    }
+
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable type " << field_type;
+      UNREACHABLE();
+  }
+
+  if (is_volatile) {
+    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+  }
+}
+
+void LocationsBuilderX86::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorX86::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void LocationsBuilderX86::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorX86::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void LocationsBuilderX86::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorX86::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void LocationsBuilderX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo());
 }
 
 void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) {
@@ -3383,159 +3481,6 @@
   // No need for memory fence, thanks to the X86 memory model.
 }
 
-void LocationsBuilderX86::VisitStaticFieldGet(HStaticFieldGet* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void InstructionCodeGeneratorX86::VisitStaticFieldGet(HStaticFieldGet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Register cls = locations->InAt(0).AsRegister<Register>();
-  uint32_t offset = instruction->GetFieldOffset().Uint32Value();
-
-  switch (instruction->GetType()) {
-    case Primitive::kPrimBoolean: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ movzxb(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimByte: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ movsxb(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimShort: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ movsxw(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimChar: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ movzxw(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimInt:
-    case Primitive::kPrimNot: {
-      Register out = locations->Out().AsRegister<Register>();
-      __ movl(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimLong: {
-      // TODO: support volatile.
-      __ movl(locations->Out().AsRegisterPairLow<Register>(), Address(cls, offset));
-      __ movl(locations->Out().AsRegisterPairHigh<Register>(), Address(cls, kX86WordSize + offset));
-      break;
-    }
-
-    case Primitive::kPrimFloat: {
-      XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
-      __ movss(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimDouble: {
-      XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
-      __ movsd(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimVoid:
-      LOG(FATAL) << "Unreachable type " << instruction->GetType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderX86::VisitStaticFieldSet(HStaticFieldSet* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-  Primitive::Type field_type = instruction->GetFieldType();
-  bool needs_write_barrier =
-      CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
-  bool is_byte_type = (field_type == Primitive::kPrimBoolean)
-      || (field_type == Primitive::kPrimByte);
-  // The register allocator does not support multiple
-  // inputs that die at entry with one in a specific register.
-  if (is_byte_type) {
-    // Ensure the value is in a byte register.
-    locations->SetInAt(1, Location::RegisterLocation(EAX));
-  } else {
-    locations->SetInAt(1, Location::RequiresRegister());
-  }
-  // Temporary registers for the write barrier.
-  if (needs_write_barrier) {
-    locations->AddTemp(Location::RequiresRegister());
-    // Ensure the card is in a byte register.
-    locations->AddTemp(Location::RegisterLocation(ECX));
-  }
-}
-
-void InstructionCodeGeneratorX86::VisitStaticFieldSet(HStaticFieldSet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Register cls = locations->InAt(0).AsRegister<Register>();
-  uint32_t offset = instruction->GetFieldOffset().Uint32Value();
-  Primitive::Type field_type = instruction->GetFieldType();
-
-  switch (field_type) {
-    case Primitive::kPrimBoolean:
-    case Primitive::kPrimByte: {
-      ByteRegister value = locations->InAt(1).AsRegister<ByteRegister>();
-      __ movb(Address(cls, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimShort:
-    case Primitive::kPrimChar: {
-      Register value = locations->InAt(1).AsRegister<Register>();
-      __ movw(Address(cls, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimInt:
-    case Primitive::kPrimNot: {
-      Register value = locations->InAt(1).AsRegister<Register>();
-      __ movl(Address(cls, offset), value);
-
-      if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
-        Register temp = locations->GetTemp(0).AsRegister<Register>();
-        Register card = locations->GetTemp(1).AsRegister<Register>();
-        codegen_->MarkGCCard(temp, card, cls, value);
-      }
-      break;
-    }
-
-    case Primitive::kPrimLong: {
-      Location value = locations->InAt(1);
-      __ movl(Address(cls, offset), value.AsRegisterPairLow<Register>());
-      __ movl(Address(cls, kX86WordSize + offset), value.AsRegisterPairHigh<Register>());
-      break;
-    }
-
-    case Primitive::kPrimFloat: {
-      XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
-      __ movss(Address(cls, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimDouble: {
-      XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
-      __ movsd(Address(cls, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimVoid:
-      LOG(FATAL) << "Unreachable type " << field_type;
-      UNREACHABLE();
-  }
-}
-
 void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index aed06c0..636f884 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
 
 #include "code_generator.h"
+#include "dex/compiler_enums.h"
 #include "nodes.h"
 #include "parallel_move_resolver.h"
 #include "utils/x86/assembler_x86.h"
@@ -105,6 +106,8 @@
   void HandleBitwiseOperation(HBinaryOperation* instruction);
   void HandleInvoke(HInvoke* invoke);
   void HandleShift(HBinaryOperation* instruction);
+  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
 
   CodeGeneratorX86* const codegen_;
   InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -137,6 +140,9 @@
   void GenerateShlLong(const Location& loc, Register shifter);
   void GenerateShrLong(const Location& loc, Register shifter);
   void GenerateUShrLong(const Location& loc, Register shifter);
+  void GenerateMemoryBarrier(MemBarrierKind kind);
+  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
 
   X86Assembler* const assembler_;
   CodeGeneratorX86* const codegen_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ff7fcdc..1bc3092 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2389,12 +2389,109 @@
   LOG(FATAL) << "Unimplemented";
 }
 
-void LocationsBuilderX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+void InstructionCodeGeneratorX86_64::GenerateMemoryBarrier(MemBarrierKind kind) {
+  /*
+   * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence.
+   * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model.
+   * For those cases, all we need to ensure is that there is a scheduling barrier in place.
+   */
+  switch (kind) {
+    case MemBarrierKind::kAnyAny: {
+      __ mfence();
+      break;
+    }
+    case MemBarrierKind::kAnyStore:
+    case MemBarrierKind::kLoadAny:
+    case MemBarrierKind::kStoreStore: {
+      // nop
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected memory barier " << kind;
+  }
+}
+
+void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) {
+  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  Primitive::Type field_type = instruction->GetFieldType();
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
+                                                    const FieldInfo& field_info) {
+  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+
+  LocationSummary* locations = instruction->GetLocations();
+  CpuRegister base = locations->InAt(0).AsRegister<CpuRegister>();
+  Location out = locations->Out();
+  bool is_volatile = field_info.IsVolatile();
+  Primitive::Type field_type = field_info.GetFieldType();
+  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+
+  switch (field_type) {
+    case Primitive::kPrimBoolean: {
+      __ movzxb(out.AsRegister<CpuRegister>(), Address(base, offset));
+      break;
+    }
+
+    case Primitive::kPrimByte: {
+      __ movsxb(out.AsRegister<CpuRegister>(), Address(base, offset));
+      break;
+    }
+
+    case Primitive::kPrimShort: {
+      __ movsxw(out.AsRegister<CpuRegister>(), Address(base, offset));
+      break;
+    }
+
+    case Primitive::kPrimChar: {
+      __ movzxw(out.AsRegister<CpuRegister>(), Address(base, offset));
+      break;
+    }
+
+    case Primitive::kPrimInt:
+    case Primitive::kPrimNot: {
+      __ movl(out.AsRegister<CpuRegister>(), Address(base, offset));
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      __ movq(out.AsRegister<CpuRegister>(), Address(base, offset));
+      break;
+    }
+
+    case Primitive::kPrimFloat: {
+      __ movss(out.AsFpuRegister<XmmRegister>(), Address(base, offset));
+      break;
+    }
+
+    case Primitive::kPrimDouble: {
+      __ movsd(out.AsFpuRegister<XmmRegister>(), Address(base, offset));
+      break;
+    }
+
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable type " << field_type;
+      UNREACHABLE();
+  }
+
+  if (is_volatile) {
+    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+  }
+}
+
+void LocationsBuilderX86_64::HandleFieldSet(HInstruction* instruction,
+                                            const FieldInfo& field_info) {
+  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
+
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
   bool needs_write_barrier =
-      CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue());
+      CodeGenerator::StoreNeedsWriteBarrier(field_info.GetFieldType(), instruction->InputAt(1));
+
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RequiresRegister());
   if (needs_write_barrier) {
@@ -2404,54 +2501,57 @@
   }
 }
 
-void InstructionCodeGeneratorX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction,
+                                                    const FieldInfo& field_info) {
+  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
+
   LocationSummary* locations = instruction->GetLocations();
-  CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
-  size_t offset = instruction->GetFieldOffset().SizeValue();
-  Primitive::Type field_type = instruction->GetFieldType();
+  CpuRegister base = locations->InAt(0).AsRegister<CpuRegister>();
+  Location value = locations->InAt(1);
+  bool is_volatile = field_info.IsVolatile();
+  Primitive::Type field_type = field_info.GetFieldType();
+  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+
+  if (is_volatile) {
+    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+  }
 
   switch (field_type) {
     case Primitive::kPrimBoolean:
     case Primitive::kPrimByte: {
-      CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
-      __ movb(Address(obj, offset), value);
+      __ movb(Address(base, offset), value.AsRegister<CpuRegister>());
       break;
     }
 
     case Primitive::kPrimShort:
     case Primitive::kPrimChar: {
-      CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
-      __ movw(Address(obj, offset), value);
+      __ movw(Address(base, offset), value.AsRegister<CpuRegister>());
       break;
     }
 
     case Primitive::kPrimInt:
     case Primitive::kPrimNot: {
-      CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
-      __ movl(Address(obj, offset), value);
-      if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue())) {
+      __ movl(Address(base, offset), value.AsRegister<CpuRegister>());
+      if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
         CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
         CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
-        codegen_->MarkGCCard(temp, card, obj, value);
+        codegen_->MarkGCCard(temp, card, base, value.AsRegister<CpuRegister>());
       }
       break;
     }
 
     case Primitive::kPrimLong: {
-      CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
-      __ movq(Address(obj, offset), value);
+      __ movq(Address(base, offset), value.AsRegister<CpuRegister>());
       break;
     }
 
     case Primitive::kPrimFloat: {
-      XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
-      __ movss(Address(obj, offset), value);
+      __ movss(Address(base, offset), value.AsFpuRegister<XmmRegister>());
       break;
     }
 
     case Primitive::kPrimDouble: {
-      XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
-      __ movsd(Address(obj, offset), value);
+      __ movsd(Address(base, offset), value.AsFpuRegister<XmmRegister>());
       break;
     }
 
@@ -2459,74 +2559,42 @@
       LOG(FATAL) << "Unreachable type " << field_type;
       UNREACHABLE();
   }
+
+  if (is_volatile) {
+    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+  }
+}
+
+void LocationsBuilderX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
 }
 
 void LocationsBuilderX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+  HandleFieldGet(instruction);
 }
 
 void InstructionCodeGeneratorX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
-  size_t offset = instruction->GetFieldOffset().SizeValue();
+  HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
 
-  switch (instruction->GetType()) {
-    case Primitive::kPrimBoolean: {
-      CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-      __ movzxb(out, Address(obj, offset));
-      break;
-    }
+void LocationsBuilderX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+  HandleFieldGet(instruction);
+}
 
-    case Primitive::kPrimByte: {
-      CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-      __ movsxb(out, Address(obj, offset));
-      break;
-    }
+void InstructionCodeGeneratorX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
 
-    case Primitive::kPrimShort: {
-      CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-      __ movsxw(out, Address(obj, offset));
-      break;
-    }
+void LocationsBuilderX86_64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
 
-    case Primitive::kPrimChar: {
-      CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-      __ movzxw(out, Address(obj, offset));
-      break;
-    }
-
-    case Primitive::kPrimInt:
-    case Primitive::kPrimNot: {
-      CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-      __ movl(out, Address(obj, offset));
-      break;
-    }
-
-    case Primitive::kPrimLong: {
-      CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-      __ movq(out, Address(obj, offset));
-      break;
-    }
-
-    case Primitive::kPrimFloat: {
-      XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
-      __ movss(out, Address(obj, offset));
-      break;
-    }
-
-    case Primitive::kPrimDouble: {
-      XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
-      __ movsd(out, Address(obj, offset));
-      break;
-    }
-
-    case Primitive::kPrimVoid:
-      LOG(FATAL) << "Unreachable type " << instruction->GetType();
-      UNREACHABLE();
-  }
+void InstructionCodeGeneratorX86_64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
 }
 
 void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) {
@@ -3222,146 +3290,6 @@
                                    check->GetLocations()->InAt(0).AsRegister<CpuRegister>());
 }
 
-void LocationsBuilderX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void InstructionCodeGeneratorX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  CpuRegister cls = locations->InAt(0).AsRegister<CpuRegister>();
-  size_t offset = instruction->GetFieldOffset().SizeValue();
-
-  switch (instruction->GetType()) {
-    case Primitive::kPrimBoolean: {
-      CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-      __ movzxb(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimByte: {
-      CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-      __ movsxb(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimShort: {
-      CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-      __ movsxw(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimChar: {
-      CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-      __ movzxw(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimInt:
-    case Primitive::kPrimNot: {
-      CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-      __ movl(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimLong: {
-      CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-      __ movq(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimFloat: {
-      XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
-      __ movss(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimDouble: {
-      XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
-      __ movsd(out, Address(cls, offset));
-      break;
-    }
-
-    case Primitive::kPrimVoid:
-      LOG(FATAL) << "Unreachable type " << instruction->GetType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderX86_64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  Primitive::Type field_type = instruction->GetFieldType();
-  bool needs_write_barrier =
-      CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue());
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  if (needs_write_barrier) {
-    // Temporary registers for the write barrier.
-    locations->AddTemp(Location::RequiresRegister());
-    locations->AddTemp(Location::RequiresRegister());
-  }
-}
-
-void InstructionCodeGeneratorX86_64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  CpuRegister cls = locations->InAt(0).AsRegister<CpuRegister>();
-  size_t offset = instruction->GetFieldOffset().SizeValue();
-  Primitive::Type field_type = instruction->GetFieldType();
-
-  switch (field_type) {
-    case Primitive::kPrimBoolean:
-    case Primitive::kPrimByte: {
-      CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
-      __ movb(Address(cls, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimShort:
-    case Primitive::kPrimChar: {
-      CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
-      __ movw(Address(cls, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimInt:
-    case Primitive::kPrimNot: {
-      CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
-      __ movl(Address(cls, offset), value);
-      if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue())) {
-        CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
-        CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
-        codegen_->MarkGCCard(temp, card, cls, value);
-      }
-      break;
-    }
-
-    case Primitive::kPrimLong: {
-      CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
-      __ movq(Address(cls, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimFloat: {
-      XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
-      __ movss(Address(cls, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimDouble: {
-      XmmRegister value = locations->InAt(1).AsFpuRegister<XmmRegister>();
-      __ movsd(Address(cls, offset), value);
-      break;
-    }
-
-    case Primitive::kPrimVoid:
-      LOG(FATAL) << "Unreachable type " << field_type;
-      UNREACHABLE();
-  }
-}
-
 void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 794b81f..0708864 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
 
 #include "code_generator.h"
+#include "dex/compiler_enums.h"
 #include "nodes.h"
 #include "parallel_move_resolver.h"
 #include "utils/x86_64/assembler_x86_64.h"
@@ -109,6 +110,8 @@
   void HandleInvoke(HInvoke* invoke);
   void HandleBitwiseOperation(HBinaryOperation* operation);
   void HandleShift(HBinaryOperation* operation);
+  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+  void HandleFieldGet(HInstruction* instruction);
 
   CodeGeneratorX86_64* const codegen_;
   InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -138,6 +141,9 @@
   void HandleBitwiseOperation(HBinaryOperation* operation);
   void GenerateDivRemIntegral(HBinaryOperation* instruction);
   void HandleShift(HBinaryOperation* operation);
+  void GenerateMemoryBarrier(MemBarrierKind kind);
+  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
 
   X86_64Assembler* const assembler_;
   CodeGeneratorX86_64* const codegen_;
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 94ff192..48f1ea9 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -40,18 +40,22 @@
   entry->AddSuccessor(block);
 
   block->AddInstruction(
-      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimNot, MemberOffset(42)));
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimNot,
+          MemberOffset(42), false));
   block->AddInstruction(
-      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimNot, MemberOffset(42)));
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimNot,
+          MemberOffset(42), false));
   HInstruction* to_remove = block->GetLastInstruction();
   block->AddInstruction(
-      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimNot, MemberOffset(43)));
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimNot,
+          MemberOffset(43), false));
   HInstruction* different_offset = block->GetLastInstruction();
   // Kill the value.
   block->AddInstruction(new (&allocator) HInstanceFieldSet(
-      parameter, parameter, Primitive::kPrimNot, MemberOffset(42)));
+      parameter, parameter, Primitive::kPrimNot, MemberOffset(42), false));
   block->AddInstruction(
-      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimNot, MemberOffset(42)));
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimNot,
+          MemberOffset(42), false));
   HInstruction* use_after_kill = block->GetLastInstruction();
   block->AddInstruction(new (&allocator) HExit());
 
@@ -82,7 +86,8 @@
   graph->AddBlock(block);
   entry->AddSuccessor(block);
   block->AddInstruction(
-      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean,
+          MemberOffset(42), false));
 
   block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
   HBasicBlock* then = new (&allocator) HBasicBlock(graph);
@@ -98,13 +103,16 @@
   else_->AddSuccessor(join);
 
   then->AddInstruction(
-      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean,
+          MemberOffset(42), false));
   then->AddInstruction(new (&allocator) HGoto());
   else_->AddInstruction(
-      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean,
+          MemberOffset(42), false));
   else_->AddInstruction(new (&allocator) HGoto());
   join->AddInstruction(
-      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean,
+          MemberOffset(42), false));
   join->AddInstruction(new (&allocator) HExit());
 
   graph->TryBuildingSsa();
@@ -132,7 +140,8 @@
   graph->AddBlock(block);
   entry->AddSuccessor(block);
   block->AddInstruction(
-      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean,
+          MemberOffset(42), false));
   block->AddInstruction(new (&allocator) HGoto());
 
   HBasicBlock* loop_header = new (&allocator) HBasicBlock(graph);
@@ -148,22 +157,25 @@
   loop_body->AddSuccessor(loop_header);
 
   loop_header->AddInstruction(
-      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean,
+          MemberOffset(42), false));
   HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction();
   loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
 
   // Kill inside the loop body to prevent field gets inside the loop header
   // and the body to be GVN'ed.
   loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(
-      parameter, parameter, Primitive::kPrimNot, MemberOffset(42)));
+      parameter, parameter, Primitive::kPrimNot, MemberOffset(42), false));
   HInstruction* field_set = loop_body->GetLastInstruction();
   loop_body->AddInstruction(
-      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean,
+          MemberOffset(42), false));
   HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction();
   loop_body->AddInstruction(new (&allocator) HGoto());
 
   exit->AddInstruction(
-      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean,
+          MemberOffset(42), false));
   HInstruction* field_get_in_exit = exit->GetLastInstruction();
   exit->AddInstruction(new (&allocator) HExit());
 
@@ -242,7 +254,7 @@
   {
     // Make one block with a side effect.
     entry->AddInstruction(new (&allocator) HInstanceFieldSet(
-        parameter, parameter, Primitive::kPrimNot, MemberOffset(42)));
+        parameter, parameter, Primitive::kPrimNot, MemberOffset(42), false));
 
     GlobalValueNumberer gvn(&allocator, graph);
     gvn.Run();
@@ -256,7 +268,7 @@
   {
     outer_loop_body->InsertInstructionBefore(
         new (&allocator) HInstanceFieldSet(
-            parameter, parameter, Primitive::kPrimNot, MemberOffset(42)),
+            parameter, parameter, Primitive::kPrimNot, MemberOffset(42), false),
         outer_loop_body->GetLastInstruction());
 
     GlobalValueNumberer gvn(&allocator, graph);
@@ -273,7 +285,7 @@
     outer_loop_body->RemoveInstruction(outer_loop_body->GetFirstInstruction());
     inner_loop_body->InsertInstructionBefore(
         new (&allocator) HInstanceFieldSet(
-            parameter, parameter, Primitive::kPrimNot, MemberOffset(42)),
+            parameter, parameter, Primitive::kPrimNot, MemberOffset(42), false),
         inner_loop_body->GetLastInstruction());
 
     GlobalValueNumberer gvn(&allocator, graph);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index c963b70..0fc1fd8 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2128,39 +2128,45 @@
 
 class FieldInfo : public ValueObject {
  public:
-  FieldInfo(MemberOffset field_offset, Primitive::Type field_type)
-      : field_offset_(field_offset), field_type_(field_type) {}
+  FieldInfo(MemberOffset field_offset, Primitive::Type field_type, bool is_volatile)
+      : field_offset_(field_offset), field_type_(field_type), is_volatile_(is_volatile) {}
 
   MemberOffset GetFieldOffset() const { return field_offset_; }
   Primitive::Type GetFieldType() const { return field_type_; }
+  bool IsVolatile() const { return is_volatile_; }
 
  private:
   const MemberOffset field_offset_;
   const Primitive::Type field_type_;
+  const bool is_volatile_;
 };
 
 class HInstanceFieldGet : public HExpression<1> {
  public:
   HInstanceFieldGet(HInstruction* value,
                     Primitive::Type field_type,
-                    MemberOffset field_offset)
+                    MemberOffset field_offset,
+                    bool is_volatile)
       : HExpression(field_type, SideEffects::DependsOnSomething()),
-        field_info_(field_offset, field_type) {
+        field_info_(field_offset, field_type, is_volatile) {
     SetRawInputAt(0, value);
   }
 
-  virtual bool CanBeMoved() const { return true; }
-  virtual bool InstructionDataEquals(HInstruction* other) const {
-    size_t other_offset = other->AsInstanceFieldGet()->GetFieldOffset().SizeValue();
-    return other_offset == GetFieldOffset().SizeValue();
+  bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
+
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+    HInstanceFieldGet* other_get = other->AsInstanceFieldGet();
+    return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
   }
 
   virtual size_t ComputeHashCode() const {
     return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
   }
 
+  const FieldInfo& GetFieldInfo() const { return field_info_; }
   MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
   Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
+  bool IsVolatile() const { return field_info_.IsVolatile(); }
 
   DECLARE_INSTRUCTION(InstanceFieldGet);
 
@@ -2175,15 +2181,18 @@
   HInstanceFieldSet(HInstruction* object,
                     HInstruction* value,
                     Primitive::Type field_type,
-                    MemberOffset field_offset)
+                    MemberOffset field_offset,
+                    bool is_volatile)
       : HTemplateInstruction(SideEffects::ChangesSomething()),
-        field_info_(field_offset, field_type) {
+        field_info_(field_offset, field_type, is_volatile) {
     SetRawInputAt(0, object);
     SetRawInputAt(1, value);
   }
 
+  const FieldInfo& GetFieldInfo() const { return field_info_; }
   MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
   Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
+  bool IsVolatile() const { return field_info_.IsVolatile(); }
 
   HInstruction* GetValue() const { return InputAt(1); }
 
@@ -2496,24 +2505,29 @@
  public:
   HStaticFieldGet(HInstruction* cls,
                   Primitive::Type field_type,
-                  MemberOffset field_offset)
+                  MemberOffset field_offset,
+                  bool is_volatile)
       : HExpression(field_type, SideEffects::DependsOnSomething()),
-        field_info_(field_offset, field_type) {
+        field_info_(field_offset, field_type, is_volatile) {
     SetRawInputAt(0, cls);
   }
 
-  bool CanBeMoved() const OVERRIDE { return true; }
+
+  bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
+
   bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
-    size_t other_offset = other->AsStaticFieldGet()->GetFieldOffset().SizeValue();
-    return other_offset == GetFieldOffset().SizeValue();
+    HStaticFieldGet* other_get = other->AsStaticFieldGet();
+    return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
   }
 
   size_t ComputeHashCode() const OVERRIDE {
     return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
   }
 
+  const FieldInfo& GetFieldInfo() const { return field_info_; }
   MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
   Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
+  bool IsVolatile() const { return field_info_.IsVolatile(); }
 
   DECLARE_INSTRUCTION(StaticFieldGet);
 
@@ -2528,15 +2542,18 @@
   HStaticFieldSet(HInstruction* cls,
                   HInstruction* value,
                   Primitive::Type field_type,
-                  MemberOffset field_offset)
+                  MemberOffset field_offset,
+                  bool is_volatile)
       : HTemplateInstruction(SideEffects::ChangesSomething()),
-        field_info_(field_offset, field_type) {
+        field_info_(field_offset, field_type, is_volatile) {
     SetRawInputAt(0, cls);
     SetRawInputAt(1, value);
   }
 
+  const FieldInfo& GetFieldInfo() const { return field_info_; }
   MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
   Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
+  bool IsVolatile() const { return field_info_.IsVolatile(); }
 
   HInstruction* GetValue() const { return InputAt(1); }
 
@@ -2677,7 +2694,7 @@
 
   DECLARE_INSTRUCTION(MonitorOperation);
 
- protected:
+ private:
   const OperationKind kind_;
   const uint32_t dex_pc_;
 
@@ -2685,7 +2702,6 @@
   DISALLOW_COPY_AND_ASSIGN(HMonitorOperation);
 };
 
-
 class MoveOperands : public ArenaObject<kArenaAllocMisc> {
  public:
   MoveOperands(Location source, Location destination, HInstruction* instruction)
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index f677e84..c2ea80e 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -462,7 +462,7 @@
   entry->AddSuccessor(block);
 
   HInstruction* test = new (allocator) HInstanceFieldGet(
-      parameter, Primitive::kPrimBoolean, MemberOffset(22));
+      parameter, Primitive::kPrimBoolean, MemberOffset(22), false);
   block->AddInstruction(test);
   block->AddInstruction(new (allocator) HIf(test));
   HBasicBlock* then = new (allocator) HBasicBlock(graph);
@@ -481,8 +481,10 @@
 
   *phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt);
   join->AddPhi(*phi);
-  *input1 = new (allocator) HInstanceFieldGet(parameter, Primitive::kPrimInt, MemberOffset(42));
-  *input2 = new (allocator) HInstanceFieldGet(parameter, Primitive::kPrimInt, MemberOffset(42));
+  *input1 = new (allocator) HInstanceFieldGet(parameter, Primitive::kPrimInt,
+                                              MemberOffset(42), false);
+  *input2 = new (allocator) HInstanceFieldGet(parameter, Primitive::kPrimInt,
+                                              MemberOffset(42), false);
   then->AddInstruction(*input1);
   else_->AddInstruction(*input2);
   join->AddInstruction(new (allocator) HExit());
@@ -581,7 +583,8 @@
   graph->AddBlock(block);
   entry->AddSuccessor(block);
 
-  *field = new (allocator) HInstanceFieldGet(parameter, Primitive::kPrimInt, MemberOffset(42));
+  *field = new (allocator) HInstanceFieldGet(parameter, Primitive::kPrimInt,
+                                             MemberOffset(42), false);
   block->AddInstruction(*field);
   *ret = new (allocator) HReturn(*field);
   block->AddInstruction(*ret);
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index c86ec4b..87b3813 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -429,6 +429,8 @@
 
   virtual void ldrex(Register rd, Register rn, Condition cond = AL) = 0;
   virtual void strex(Register rd, Register rt, Register rn, Condition cond = AL) = 0;
+  virtual void ldrexd(Register rt, Register rt2, Register rn, Condition cond = AL) = 0;
+  virtual void strexd(Register rd, Register rt, Register rt2, Register rn, Condition cond = AL) = 0;
 
   // Miscellaneous instructions.
   virtual void clrex(Condition cond = AL) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index 8f6d45a..8d1fb60 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -778,6 +778,7 @@
   Emit(encoding);
 }
 
+
 void Arm32Assembler::ldrex(Register rt, Register rn, Condition cond) {
   CHECK_NE(rn, kNoRegister);
   CHECK_NE(rt, kNoRegister);
@@ -793,6 +794,25 @@
 }
 
 
+void Arm32Assembler::ldrexd(Register rt, Register rt2, Register rn, Condition cond) {
+  CHECK_NE(rn, kNoRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt2, kNoRegister);
+  CHECK_NE(rt, R14);
+  CHECK_EQ(0u, static_cast<uint32_t>(rt) % 2);
+  CHECK_EQ(static_cast<uint32_t>(rt) + 1, static_cast<uint32_t>(rt2));
+  CHECK_NE(cond, kNoCondition);
+
+  int32_t encoding =
+      (static_cast<uint32_t>(cond) << kConditionShift) |
+      B24 | B23 | B21 | B20 |
+      static_cast<uint32_t>(rn) << 16 |
+      static_cast<uint32_t>(rt) << 12 |
+      B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
+  Emit(encoding);
+}
+
+
 void Arm32Assembler::strex(Register rd,
                            Register rt,
                            Register rn,
@@ -811,6 +831,28 @@
   Emit(encoding);
 }
 
+void Arm32Assembler::strexd(Register rd, Register rt, Register rt2, Register rn, Condition cond) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(rn, kNoRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt2, kNoRegister);
+  CHECK_NE(rt, R14);
+  CHECK_NE(rd, rt);
+  CHECK_NE(rd, rt2);
+  CHECK_EQ(0u, static_cast<uint32_t>(rt) % 2);
+  CHECK_EQ(static_cast<uint32_t>(rt) + 1, static_cast<uint32_t>(rt2));
+  CHECK_NE(cond, kNoCondition);
+
+  int32_t encoding =
+      (static_cast<uint32_t>(cond) << kConditionShift) |
+      B24 | B23 | B21 |
+      static_cast<uint32_t>(rn) << 16 |
+      static_cast<uint32_t>(rd) << 12 |
+      B11 | B10 | B9 | B8 | B7 | B4 |
+      static_cast<uint32_t>(rt);
+  Emit(encoding);
+}
+
 
 void Arm32Assembler::clrex(Condition cond) {
   CHECK_EQ(cond, AL);   // This cannot be conditional on ARM.
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 6c8d415..b922d66 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -123,6 +123,8 @@
 
   void ldrex(Register rd, Register rn, Condition cond = AL) OVERRIDE;
   void strex(Register rd, Register rt, Register rn, Condition cond = AL) OVERRIDE;
+  void ldrexd(Register rt, Register rt2, Register rn, Condition cond = AL) OVERRIDE;
+  void strexd(Register rd, Register rt, Register rt2, Register rn, Condition cond = AL) OVERRIDE;
 
   // Miscellaneous instructions.
   void clrex(Condition cond = AL) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
index 951792d..4a0ae0b 100644
--- a/compiler/utils/arm/assembler_arm32_test.cc
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -697,4 +697,28 @@
   DriverStr(expected, "vmrs");
 }
 
+TEST_F(AssemblerArm32Test, ldrexd) {
+  GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R0);
+  GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R1);
+  GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R2);
+
+  const char* expected =
+      "ldrexd r0, r1, [r0]\n"
+      "ldrexd r0, r1, [r1]\n"
+      "ldrexd r0, r1, [r2]\n";
+  DriverStr(expected, "ldrexd");
+}
+
+TEST_F(AssemblerArm32Test, strexd) {
+  GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R0);
+  GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R1);
+  GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R2);
+
+  const char* expected =
+      "strexd r9, r0, r1, [r0]\n"
+      "strexd r9, r0, r1, [r1]\n"
+      "strexd r9, r0, r1, [r2]\n";
+  DriverStr(expected, "strexd");
+}
+
 }  // namespace art
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 3eaae56..3eccd3f 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -1662,9 +1662,6 @@
   CHECK_NE(rn, kNoRegister);
   CHECK_NE(rt, kNoRegister);
   CheckCondition(cond);
-  CHECK_NE(rn, kNoRegister);
-  CHECK_NE(rt, kNoRegister);
-  CheckCondition(cond);
   CHECK_LT(imm, (1u << 10));
 
   int32_t encoding = B31 | B30 | B29 | B27 | B22 | B20 |
@@ -1701,6 +1698,22 @@
 }
 
 
+void Thumb2Assembler::ldrexd(Register rt, Register rt2, Register rn, Condition cond) {
+  CHECK_NE(rn, kNoRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt2, kNoRegister);
+  CHECK_NE(rt, rt2);
+  CheckCondition(cond);
+
+  int32_t encoding = B31 | B30 | B29 | B27 | B23 | B22 | B20 |
+      static_cast<uint32_t>(rn) << 16 |
+      static_cast<uint32_t>(rt) << 12 |
+      static_cast<uint32_t>(rt2) << 8 |
+      B6 | B5 | B4 | B3 | B2 | B1 | B0;
+  Emit32(encoding);
+}
+
+
 void Thumb2Assembler::strex(Register rd,
                             Register rt,
                             Register rn,
@@ -1709,6 +1722,26 @@
 }
 
 
+void Thumb2Assembler::strexd(Register rd, Register rt, Register rt2, Register rn, Condition cond) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(rn, kNoRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt2, kNoRegister);
+  CHECK_NE(rt, rt2);
+  CHECK_NE(rd, rt);
+  CHECK_NE(rd, rt2);
+  CheckCondition(cond);
+
+  int32_t encoding = B31 | B30 | B29 | B27 | B23 | B22 |
+      static_cast<uint32_t>(rn) << 16 |
+      static_cast<uint32_t>(rt) << 12 |
+      static_cast<uint32_t>(rt2) << 8 |
+      B6 | B5 | B4 |
+      static_cast<uint32_t>(rd);
+  Emit32(encoding);
+}
+
+
 void Thumb2Assembler::clrex(Condition cond) {
   CheckCondition(cond);
   int32_t encoding = B31 | B30 | B29 | B27 | B28 | B25 | B24 | B23 |
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 48a3a7e..81dd138 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -149,6 +149,8 @@
   void ldrex(Register rd, Register rn, uint16_t imm, Condition cond = AL);
   void strex(Register rd, Register rt, Register rn, uint16_t imm, Condition cond = AL);
 
+  void ldrexd(Register rt, Register rt2, Register rn, Condition cond = AL) OVERRIDE;
+  void strexd(Register rd, Register rt, Register rt2, Register rn, Condition cond = AL) OVERRIDE;
 
   // Miscellaneous instructions.
   void clrex(Condition cond = AL) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index 6ae95a4..425ccd7 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -164,4 +164,32 @@
   DriverStr(expected, "vmrs");
 }
 
+TEST_F(AssemblerThumb2Test, ldrexd) {
+  GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R0);
+  GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R1);
+  GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R2);
+  GetAssembler()->ldrexd(arm::R5, arm::R3, arm::R7);
+
+  const char* expected =
+      "ldrexd r0, r1, [r0]\n"
+      "ldrexd r0, r1, [r1]\n"
+      "ldrexd r0, r1, [r2]\n"
+      "ldrexd r5, r3, [r7]\n";
+  DriverStr(expected, "ldrexd");
+}
+
+TEST_F(AssemblerThumb2Test, strexd) {
+  GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R0);
+  GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R1);
+  GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R2);
+  GetAssembler()->strexd(arm::R9, arm::R5, arm::R3, arm::R7);
+
+  const char* expected =
+      "strexd r9, r0, r1, [r0]\n"
+      "strexd r9, r0, r1, [r1]\n"
+      "strexd r9, r0, r1, [r2]\n"
+      "strexd r9, r5, r3, [r7]\n";
+  DriverStr(expected, "strexd");
+}
+
 }  // namespace art
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index f0353f6..f8c0043 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -443,6 +443,27 @@
 }
 
 
+void X86Assembler::psrlq(XmmRegister reg, const Immediate& shift_count) {
+  DCHECK(shift_count.is_uint8());
+
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x73);
+  EmitXmmRegisterOperand(2, reg);
+  EmitUint8(shift_count.value());
+}
+
+
+void X86Assembler::punpckldq(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x62);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
 void X86Assembler::addsd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0xF2);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 9fecf1e..6c3d131 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -274,6 +274,9 @@
   void movsd(const Address& dst, XmmRegister src);
   void movsd(XmmRegister dst, XmmRegister src);
 
+  void psrlq(XmmRegister reg, const Immediate& shift_count);
+  void punpckldq(XmmRegister dst, XmmRegister src);
+
   void addsd(XmmRegister dst, XmmRegister src);
   void addsd(XmmRegister dst, const Address& src);
   void subsd(XmmRegister dst, XmmRegister src);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index d901673..fccb510 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -105,6 +105,18 @@
   DriverStr(expected, "movl");
 }
 
+TEST_F(AssemblerX86Test, psrlq) {
+  GetAssembler()->psrlq(x86::XMM0, CreateImmediate(32));
+  const char* expected = "psrlq $0x20, %xmm0\n";
+  DriverStr(expected, "psrlq");
+}
+
+TEST_F(AssemblerX86Test, punpckldq) {
+  GetAssembler()->punpckldq(x86::XMM0, x86::XMM1);
+  const char* expected = "punpckldq %xmm1, %xmm0\n";
+  DriverStr(expected, "punpckldq");
+}
+
 TEST_F(AssemblerX86Test, LoadLongConstant) {
   GetAssembler()->LoadLongConstant(x86::XMM0, 51);
   const char* expected =
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index b989c7f..de4ea36 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -2177,11 +2177,18 @@
 
   virtual bool ExecuteWithoutRuntime() OVERRIDE {
     CHECK(args_ != nullptr);
-    CHECK(args_->symbolize_);
+    CHECK(args_->oat_filename_ != nullptr);
 
     MemMap::Init();
 
-    return SymbolizeOat(args_->oat_filename_, args_->output_name_) == EXIT_SUCCESS;
+    if (args_->symbolize_) {
+      return SymbolizeOat(args_->oat_filename_, args_->output_name_) == EXIT_SUCCESS;
+    } else {
+      return DumpOat(nullptr,
+                     args_->oat_filename_,
+                     oat_dumper_options_.release(),
+                     args_->os_) == EXIT_SUCCESS;
+    }
   }
 
   virtual bool ExecuteWithRuntime(Runtime* runtime) {
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 968e89d..9707c7b 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -101,9 +101,12 @@
 void ThreadList::DumpForSigQuit(std::ostream& os) {
   {
     ScopedObjectAccess soa(Thread::Current());
-    Histogram<uint64_t>::CumulativeData data;
-    suspend_all_historam_.CreateHistogram(&data);
-    suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data);  // Dump time to suspend.
+    // Only print if we have samples.
+    if (suspend_all_historam_.SampleSize() > 0) {
+      Histogram<uint64_t>::CumulativeData data;
+      suspend_all_historam_.CreateHistogram(&data);
+      suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data);  // Dump time to suspend.
+    }
   }
   Dump(os);
   DumpUnattachedThreads(os);
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 5f86f1e..a55a137 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -9,4 +9,5 @@
 BadCaseInOpRegRegReg
 CmpLong
 FloatIntConstPassing
+b/18718277
 Done!
diff --git a/test/800-smali/smali/b_18718277.smali b/test/800-smali/smali/b_18718277.smali
new file mode 100644
index 0000000..b14ad20
--- /dev/null
+++ b/test/800-smali/smali/b_18718277.smali
@@ -0,0 +1,29 @@
+.class public LB18718277;
+
+.super Ljava/lang/Object;
+
+.method public static helper(I)I
+    .locals 1
+    add-int/lit8 v0, p0, 2
+    neg-int v0, v0
+    return v0
+.end method
+
+.method public static getInt()I
+    .registers 2
+    const/4 v1, 3
+    invoke-static {v1}, LB18718277;->helper(I)I
+    move-result v0
+    :outer_loop
+    if-eqz v1, :exit_outer_loop
+    const/4 v0, 0
+    if-eqz v0, :skip_dead_loop
+    :dead_loop
+    add-int/2addr v0, v0
+    if-gez v0, :dead_loop
+    :skip_dead_loop
+    add-int/lit8 v1, v1, -1
+    goto :outer_loop
+    :exit_outer_loop
+    return v0
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index a2db051..70641b2 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -65,6 +65,7 @@
         testCases.add(new TestCase("BadCaseInOpRegRegReg", "BadCaseInOpRegRegReg", "getInt", null, null, 2));
         testCases.add(new TestCase("CmpLong", "CmpLong", "run", null, null, 0));
         testCases.add(new TestCase("FloatIntConstPassing", "FloatIntConstPassing", "run", null, null, 2));
+        testCases.add(new TestCase("b/18718277", "B18718277", "getInt", null, null, 0));
     }
 
     public void runTests() {