Add per array size allocation entrypoints.

- Update architectures that have fast paths for
  array allocation to use it.
- Will add more fast paths in follow-up CLs.

Test: test-art-target test-art-host.
Change-Id: I138cccd16464a85de22a8ed31c915f876e78fb04
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index ed83f1c..46f2c08 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -104,7 +104,7 @@
 
 // Offset of field Thread::tlsPtr_.mterp_current_ibase.
 #define THREAD_CURRENT_IBASE_OFFSET \
-    (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 157) * __SIZEOF_POINTER__)
+    (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 161) * __SIZEOF_POINTER__)
 ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
             art::Thread::MterpCurrentIBaseOffset<POINTER_SIZE>().Int32Value())
 // Offset of field Thread::tlsPtr_.mterp_default_ibase.