Cleanup the constants.

- Alphabetize the operations.
- Remove N_GRAM operation.
- Add TENSOR_INT32.

Test: Compiled and ran the tests.
Bug: 63905942
Change-Id: Ie7d9dec671a409256b686ef4665171492d1ac16a
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index 959506a..77f1021 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -32,7 +32,8 @@
     UINT32                    = 7,
     TENSOR_FLOAT16            = 8,
     TENSOR_FLOAT32            = 9,
-    TENSOR_QUANT8_ASYMM       = 10,
+    TENSOR_INT32              = 10,
+    TENSOR_QUANT8_ASYMM       = 11,
 };
 
 // The type of operations.  Unlike the operation types found in
@@ -42,40 +43,39 @@
 // When modifying, be sure to update HAL_NUM_OPERATION_TYPES in HalIntefaces.h.
 enum OperationType : uint32_t {
     OEM_OPERATION                = 0,
-    AVERAGE_POOL                 = 1,
-    CONCATENATION                = 2,
-    CONV                         = 3,
-    DEPTHWISE_CONV               = 4,
-    MAX_POOL                     = 5,
-    L2_POOL                      = 6,
+    ADD                          = 1,
+    AVERAGE_POOL                 = 2,
+    CAST                         = 3,
+    CONCATENATION                = 4,
+    CONV                         = 5,
+    DEPTHWISE_CONV               = 6,
     DEPTH_TO_SPACE               = 7,
-    SPACE_TO_DEPTH               = 8,
-    LOCAL_RESPONSE_NORMALIZATION = 9,
-    SOFTMAX                      = 10,
-    RESHAPE                      = 11,
-    SPLIT                        = 12,
-    FAKE_QUANT                   = 13,
-    ADD                          = 14,
-    FULLY_CONNECTED              = 15,
-    CAST                         = 16,
-    MUL                          = 17,
-    L2_NORMALIZATION             = 18,
-    LOGISTIC                     = 19,
-    RELU                         = 20,
-    RELU6                        = 21,
-    RELU1                        = 22,
-    TANH                         = 23,
-    DEQUANTIZE                   = 24,
-    FLOOR                        = 25,
-    GATHER                       = 26,
+    DEQUANTIZE                   = 8,
+    EMBEDDING_LOOKUP             = 9,
+    FAKE_QUANT                   = 10,
+    FLOOR                        = 11,
+    FULLY_CONNECTED              = 12,
+    GATHER                       = 13,
+    HASHTABLE_LOOKUP             = 14,
+    L2_NORMALIZATION             = 15,
+    L2_POOL                      = 16,
+    LOCAL_RESPONSE_NORMALIZATION = 17,
+    LOGISTIC                     = 18,
+    LSH_PROJECTION               = 19,
+    LSTM                         = 20,
+    MAX_POOL                     = 21,
+    MUL                          = 22,
+    RELU                         = 23,
+    RELU1                        = 24,
+    RELU6                        = 25,
+    RESHAPE                      = 26,
     RESIZE_BILINEAR              = 27,
-    LSH_PROJECTION               = 28,
-    LSTM                         = 29,
-    SVDF                         = 30,
-    RNN                          = 31,
-    N_GRAM                       = 32,
-    EMBEDDING_LOOKUP             = 33,
-    HASHTABLE_LOOKUP             = 34,
+    RNN                          = 28,
+    SOFTMAX                      = 29,
+    SPACE_TO_DEPTH               = 30,
+    SPLIT                        = 31,
+    SVDF                         = 32,
+    TANH                         = 33,
 };
 
 // Two special values that can be used instead of a regular poolIndex.