Revert "ART: Better SSA Allocation when recreating SSA"

Temporarily reverting until memory footprint cost of adding a vreg to ssa entrance map on every applicable MIR node can be assessed..

This reverts commit cb73fb35e5f7c575ed491c0c8e2d2b1a0a22ea2e.

Change-Id: Ia9c03bfc5d365ad8d8b949e870f1e3bcda7f9a54
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index f64f3e0..2c125f6 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -223,8 +223,7 @@
   ArenaBitVector* def_v;
   ArenaBitVector* live_in_v;
   ArenaBitVector* phi_v;
-  int32_t* vreg_to_ssa_map_exit;
-  int32_t* vreg_to_ssa_map_entrance;
+  int32_t* vreg_to_ssa_map;
   ArenaBitVector* ending_check_v;  // For null check and class init check elimination.
 };
 
@@ -237,8 +236,6 @@
  * we may want to revisit in the future.
  */
 struct SSARepresentation {
-  int16_t num_uses_allocated;
-  int16_t num_defs_allocated;
   int16_t num_uses;
   int16_t num_defs;
   int32_t* uses;
@@ -861,10 +858,6 @@
   void CombineBlocks(BasicBlock* bb);
 
   void ClearAllVisitedFlags();
-
-  void AllocateSSAUseData(MIR *mir, int num_uses);
-  void AllocateSSADefData(MIR *mir, int num_defs);
-
   /*
    * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
    * we can verify that all catch entries have native PC entries.
@@ -950,7 +943,6 @@
   GrowableArray<uint32_t> use_counts_;      // Weighted by nesting depth
   GrowableArray<uint32_t> raw_use_counts_;  // Not weighted
   unsigned int num_reachable_blocks_;
-  unsigned int max_num_reachable_blocks_;
   GrowableArray<BasicBlockId>* dfs_order_;
   GrowableArray<BasicBlockId>* dfs_post_order_;
   GrowableArray<BasicBlockId>* dom_post_order_traversal_;