Compressed native PC for stack maps

Compress native PC based on instruction alignment. This reduces the
size of stack maps, boot.oat is 0.4% smaller for arm64.

Test: test-art-host, test-art-target, N6P booting

Change-Id: I2b70eecabda88b06fa80a85688fd992070d54278
diff --git a/runtime/arch/code_offset.h b/runtime/arch/code_offset.h
new file mode 100644
index 0000000..ab04b1e
--- /dev/null
+++ b/runtime/arch/code_offset.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_CODE_OFFSET_H_
+#define ART_RUNTIME_ARCH_CODE_OFFSET_H_
+
+#include <iosfwd>
+
+#include "base/bit_utils.h"
+#include "base/logging.h"
+#include "instruction_set.h"
+
+namespace art {
+
+// CodeOffset is a holder for compressed code offsets. Since some architectures have alignment
+// requirements it is possible to compress code offsets to reduce stack map sizes.
+class CodeOffset {
+ public:
+  ALWAYS_INLINE static CodeOffset FromOffset(uint32_t offset, InstructionSet isa = kRuntimeISA) {
+    return CodeOffset(offset / GetInstructionSetInstructionAlignment(isa));
+  }
+
+  ALWAYS_INLINE static CodeOffset FromCompressedOffset(uint32_t offset) {
+    return CodeOffset(offset);
+  }
+
+  ALWAYS_INLINE uint32_t Uint32Value(InstructionSet isa = kRuntimeISA) const {
+    uint32_t decoded = value_ * GetInstructionSetInstructionAlignment(isa);
+    DCHECK_GE(decoded, value_) << "Integer overflow";
+    return decoded;
+  }
+
+  // Return compressed internal value.
+  ALWAYS_INLINE uint32_t CompressedValue() const {
+    return value_;
+  }
+
+  ALWAYS_INLINE CodeOffset() = default;
+  ALWAYS_INLINE CodeOffset(const CodeOffset&) = default;
+  ALWAYS_INLINE CodeOffset& operator=(const CodeOffset&) = default;
+  ALWAYS_INLINE CodeOffset& operator=(CodeOffset&&) = default;
+
+ private:
+  ALWAYS_INLINE explicit CodeOffset(uint32_t value) : value_(value) {}
+
+  uint32_t value_ = 0u;
+};
+
+inline bool operator==(const CodeOffset& a, const CodeOffset& b) {
+  return a.CompressedValue() == b.CompressedValue();
+}
+
+inline bool operator!=(const CodeOffset& a, const CodeOffset& b) {
+  return !(a == b);
+}
+
+inline bool operator<(const CodeOffset& a, const CodeOffset& b) {
+  return a.CompressedValue() < b.CompressedValue();
+}
+
+inline bool operator<=(const CodeOffset& a, const CodeOffset& b) {
+  return a.CompressedValue() <= b.CompressedValue();
+}
+
+inline bool operator>(const CodeOffset& a, const CodeOffset& b) {
+  return a.CompressedValue() > b.CompressedValue();
+}
+
+inline bool operator>=(const CodeOffset& a, const CodeOffset& b) {
+  return a.CompressedValue() >= b.CompressedValue();
+}
+
+inline std::ostream& operator<<(std::ostream& os, const CodeOffset& offset) {
+  return os << offset.Uint32Value();
+}
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_CODE_OFFSET_H_
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index 4a8bea4..99aea62 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -75,6 +75,14 @@
 // X86 instruction alignment. This is the recommended alignment for maximum performance.
 static constexpr size_t kX86Alignment = 16;
 
+// Different than code alignment since code alignment is only first instruction of method.
+static constexpr size_t kThumb2InstructionAlignment = 2;
+static constexpr size_t kArm64InstructionAlignment = 4;
+static constexpr size_t kX86InstructionAlignment = 1;
+static constexpr size_t kX86_64InstructionAlignment = 1;
+static constexpr size_t kMipsInstructionAlignment = 2;
+static constexpr size_t kMips64InstructionAlignment = 2;
+
 const char* GetInstructionSetString(InstructionSet isa);
 
 // Note: Returns kNone when the string cannot be parsed to a known value.
@@ -106,6 +114,17 @@
   }
 }
 
+ALWAYS_INLINE static inline constexpr size_t GetInstructionSetInstructionAlignment(
+    InstructionSet isa) {
+  return (isa == kThumb2 || isa == kArm) ? kThumb2InstructionAlignment :
+         (isa == kArm64) ? kArm64InstructionAlignment :
+         (isa == kX86) ? kX86InstructionAlignment :
+         (isa == kX86_64) ? kX86_64InstructionAlignment :
+         (isa == kMips) ? kMipsInstructionAlignment :
+         (isa == kMips64) ? kMips64InstructionAlignment :
+         0;  // Invalid case, but constexpr doesn't support asserts.
+}
+
 static inline bool IsValidInstructionSet(InstructionSet isa) {
   switch (isa) {
     case kArm:
diff --git a/runtime/arch/instruction_set_test.cc b/runtime/arch/instruction_set_test.cc
index 5aae93a..b251b57 100644
--- a/runtime/arch/instruction_set_test.cc
+++ b/runtime/arch/instruction_set_test.cc
@@ -44,6 +44,15 @@
   EXPECT_STREQ("none", GetInstructionSetString(kNone));
 }
 
+TEST(InstructionSetTest, GetInstructionSetInstructionAlignment) {
+  EXPECT_EQ(GetInstructionSetInstructionAlignment(kThumb2), kThumb2InstructionAlignment);
+  EXPECT_EQ(GetInstructionSetInstructionAlignment(kArm64), kArm64InstructionAlignment);
+  EXPECT_EQ(GetInstructionSetInstructionAlignment(kX86), kX86InstructionAlignment);
+  EXPECT_EQ(GetInstructionSetInstructionAlignment(kX86_64), kX86_64InstructionAlignment);
+  EXPECT_EQ(GetInstructionSetInstructionAlignment(kMips), kMipsInstructionAlignment);
+  EXPECT_EQ(GetInstructionSetInstructionAlignment(kMips64), kMips64InstructionAlignment);
+}
+
 TEST(InstructionSetTest, TestRoundTrip) {
   EXPECT_EQ(kRuntimeISA, GetInstructionSetFromString(GetInstructionSetString(kRuntimeISA)));
 }