AArch64: fixes in A64 code generation.

- Disabled special method compilation, as it requires hard-float ABI,
- Disabled suspend checks, as runtime is not yet ready (e.g. trampolines
  are not setting the suspend register, etc),
- Changing definition of zero register (the zero register has now 0x3f
  as its register number),
- Fixing some issues with handling of cmp instructions in the assembler:
  we now use the shift-register rather than the extended-register variant
  of cmp and cmn,
- Partially fixing register setup (register sN is now mapped to dN),
- Fixing and completing implementation of register spills/unspills,
- Fixing LoadBaseDispBody() and StoreBaseDispBody().

Change-Id: Ia49ba48b6ca0f782380066345b7a198cb6c1dc1d
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 136a04f..f7a0199 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -25,7 +25,10 @@
 
 bool Arm64Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
                                   const InlineMethod& special) {
-  return Mir2Lir::GenSpecialCase(bb, mir, special);
+  // TODO(Arm64): re-enable this, once hard-float ABI is implemented.
+  //   (this currently does not work, as GetArgMappingToPhysicalReg returns InvalidReg()).
+  // return Mir2Lir::GenSpecialCase(bb, mir, special);
+  return false;
 }
 
 /*
@@ -348,18 +351,16 @@
     OpRegImm64(kOpSub, rs_rA64_SP, frame_size_, /*is_wide*/true);
   }
 
-  /* Spill core callee saves */
-  if (core_spill_mask_) {
-    SpillCoreRegs(rs_rA64_SP, frame_size_, core_spill_mask_);
-  }
   /* Need to spill any FP regs? */
-  if (num_fp_spills_) {
-    /*
-     * NOTE: fp spills are a little different from core spills in that
-     * they are pushed as a contiguous block.  When promoting from
-     * the fp set, we must allocate all singles from s16..highest-promoted
-     */
-    // TODO(Arm64): SpillFPRegs(rA64_SP, frame_size_, core_spill_mask_);
+  if (fp_spill_mask_) {
+    int spill_offset = frame_size_ - kArm64PointerSize*(num_fp_spills_ + num_core_spills_);
+    SpillFPRegs(rs_rA64_SP, spill_offset, fp_spill_mask_);
+  }
+
+  /* Spill core callee saves. */
+  if (core_spill_mask_) {
+    int spill_offset = frame_size_ - kArm64PointerSize*num_core_spills_;
+    SpillCoreRegs(rs_rA64_SP, spill_offset, core_spill_mask_);
   }
 
   FlushIns(ArgLocs, rl_method);
@@ -379,12 +380,15 @@
   LockTemp(rs_x1);
 
   NewLIR0(kPseudoMethodExit);
+
   /* Need to restore any FP callee saves? */
-  if (num_fp_spills_) {
-    // TODO(Arm64): UnspillFPRegs(num_fp_spills_);
+  if (fp_spill_mask_) {
+    int spill_offset = frame_size_ - kArm64PointerSize*(num_fp_spills_ + num_core_spills_);
+    UnSpillFPRegs(rs_rA64_SP, spill_offset, fp_spill_mask_);
   }
   if (core_spill_mask_) {
-    UnSpillCoreRegs(rs_rA64_SP, frame_size_, core_spill_mask_);
+    int spill_offset = frame_size_ - kArm64PointerSize*num_core_spills_;
+    UnSpillCoreRegs(rs_rA64_SP, spill_offset, core_spill_mask_);
   }
 
   OpRegImm64(kOpAdd, rs_rA64_SP, frame_size_, /*is_wide*/true);