ART: Make InstructionSet an enum class and add kLast.

Adding InstructionSet::kLast shall make it easier to encode
the InstructionSet in fewer bits using BitField<>. However,
introducing `kLast` into the `art` namespace is not a good
idea, so we change the InstructionSet to an enum class.
This also uncovered a case of InstructionSet::kNone being
erroneously used instead of vixl32::Condition::None(), so
it's good to remove `kNone` from the `art` namespace.

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Change-Id: I6fa6168dfba4ed6da86d021a69c80224f09997a6
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 866a4d5..29ff235 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -68,7 +68,7 @@
                                              : &Thread::DumpThreadOffset<PointerSize::k32>);
     std::unique_ptr<Disassembler> disasm(Disassembler::Create(isa, opts));
     std::stringstream stream;
-    const uint8_t* base = actual_asm.data() + (isa == kThumb2 ? 1 : 0);
+    const uint8_t* base = actual_asm.data() + (isa == InstructionSet::kThumb2 ? 1 : 0);
     disasm->Dump(stream, base, base + actual_asm.size());
     ReformatAsm(&stream, &lines);
     // Print CFI and assembly interleaved.
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 111469f..fc6a717 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -61,14 +61,14 @@
 
 size_t CompiledCode::CodeDelta(InstructionSet instruction_set) {
   switch (instruction_set) {
-    case kArm:
-    case kArm64:
-    case kMips:
-    case kMips64:
-    case kX86:
-    case kX86_64:
+    case InstructionSet::kArm:
+    case InstructionSet::kArm64:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
       return 0;
-    case kThumb2: {
+    case InstructionSet::kThumb2: {
       // +1 to set the low-order bit so a BLX will switch to Thumb mode
       return 1;
     }
@@ -80,14 +80,14 @@
 
 const void* CompiledCode::CodePointer(const void* code_pointer, InstructionSet instruction_set) {
   switch (instruction_set) {
-    case kArm:
-    case kArm64:
-    case kMips:
-    case kMips64:
-    case kX86:
-    case kX86_64:
+    case InstructionSet::kArm:
+    case InstructionSet::kArm64:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
       return code_pointer;
-    case kThumb2: {
+    case InstructionSet::kThumb2: {
       uintptr_t address = reinterpret_cast<uintptr_t>(code_pointer);
       // Set the low-order bit so a BLX will switch to Thumb mode
       address |= 0x1;
diff --git a/compiler/debug/dwarf/dwarf_test.h b/compiler/debug/dwarf/dwarf_test.h
index b30ff14..5405759 100644
--- a/compiler/debug/dwarf/dwarf_test.h
+++ b/compiler/debug/dwarf/dwarf_test.h
@@ -60,7 +60,8 @@
   template<typename ElfTypes>
   std::vector<std::string> Objdump(const char* args) {
     // Write simple elf file with just the DWARF sections.
-    InstructionSet isa = (sizeof(typename ElfTypes::Addr) == 8) ? kX86_64 : kX86;
+    InstructionSet isa =
+        (sizeof(typename ElfTypes::Addr) == 8) ? InstructionSet::kX86_64 : InstructionSet::kX86;
     ScratchFile file;
     linker::FileOutputStream output_stream(file.GetFile());
     linker::ElfBuilder<ElfTypes> builder(isa, nullptr, &output_stream);
diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h
index 6dacdfa..d0c98a7 100644
--- a/compiler/debug/elf_debug_frame_writer.h
+++ b/compiler/debug/elf_debug_frame_writer.h
@@ -37,8 +37,8 @@
   // debugger that its value in the previous frame is not recoverable.
   bool is64bit = Is64BitInstructionSet(isa);
   switch (isa) {
-    case kArm:
-    case kThumb2: {
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2: {
       dwarf::DebugFrameOpCodeWriter<> opcodes;
       opcodes.DefCFA(Reg::ArmCore(13), 0);  // R13(SP).
       // core registers.
@@ -61,7 +61,7 @@
       WriteCIE(is64bit, return_reg, opcodes, format, buffer);
       return;
     }
-    case kArm64: {
+    case InstructionSet::kArm64: {
       dwarf::DebugFrameOpCodeWriter<> opcodes;
       opcodes.DefCFA(Reg::Arm64Core(31), 0);  // R31(SP).
       // core registers.
@@ -84,8 +84,8 @@
       WriteCIE(is64bit, return_reg, opcodes, format, buffer);
       return;
     }
-    case kMips:
-    case kMips64: {
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64: {
       dwarf::DebugFrameOpCodeWriter<> opcodes;
       opcodes.DefCFA(Reg::MipsCore(29), 0);  // R29(SP).
       // core registers.
@@ -108,7 +108,7 @@
       WriteCIE(is64bit, return_reg, opcodes, format, buffer);
       return;
     }
-    case kX86: {
+    case InstructionSet::kX86: {
       // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296
       constexpr bool generate_opcodes_for_x86_fp = false;
       dwarf::DebugFrameOpCodeWriter<> opcodes;
@@ -134,7 +134,7 @@
       WriteCIE(is64bit, return_reg, opcodes, format, buffer);
       return;
     }
-    case kX86_64: {
+    case InstructionSet::kX86_64: {
       dwarf::DebugFrameOpCodeWriter<> opcodes;
       opcodes.DefCFA(Reg::X86_64Core(4), 8);  // R4(RSP).
       opcodes.Offset(Reg::X86_64Core(16), -8);  // R16(RIP).
@@ -160,7 +160,7 @@
       WriteCIE(is64bit, return_reg, opcodes, format, buffer);
       return;
     }
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   LOG(FATAL) << "Cannot write CIE frame for ISA " << isa;
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 49d52c4..6e72b46 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -68,19 +68,19 @@
     int code_factor_bits_ = 0;
     int dwarf_isa = -1;
     switch (isa) {
-      case kArm:  // arm actually means thumb2.
-      case kThumb2:
+      case InstructionSet::kArm:  // arm actually means thumb2.
+      case InstructionSet::kThumb2:
         code_factor_bits_ = 1;  // 16-bit instuctions
         dwarf_isa = 1;  // DW_ISA_ARM_thumb.
         break;
-      case kArm64:
-      case kMips:
-      case kMips64:
+      case InstructionSet::kArm64:
+      case InstructionSet::kMips:
+      case InstructionSet::kMips64:
         code_factor_bits_ = 2;  // 32-bit instructions
         break;
-      case kNone:
-      case kX86:
-      case kX86_64:
+      case InstructionSet::kNone:
+      case InstructionSet::kX86:
+      case InstructionSet::kX86_64:
         break;
     }
     std::unordered_set<uint64_t> seen_addresses(compilation_unit.methods.size());
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index bf47e8f..bb856b2 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -33,20 +33,20 @@
 
 static Reg GetDwarfCoreReg(InstructionSet isa, int machine_reg) {
   switch (isa) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return Reg::ArmCore(machine_reg);
-    case kArm64:
+    case InstructionSet::kArm64:
       return Reg::Arm64Core(machine_reg);
-    case kX86:
+    case InstructionSet::kX86:
       return Reg::X86Core(machine_reg);
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return Reg::X86_64Core(machine_reg);
-    case kMips:
+    case InstructionSet::kMips:
       return Reg::MipsCore(machine_reg);
-    case kMips64:
+    case InstructionSet::kMips64:
       return Reg::Mips64Core(machine_reg);
-    case kNone:
+    case InstructionSet::kNone:
       LOG(FATAL) << "No instruction set";
   }
   UNREACHABLE();
@@ -54,20 +54,20 @@
 
 static Reg GetDwarfFpReg(InstructionSet isa, int machine_reg) {
   switch (isa) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return Reg::ArmFp(machine_reg);
-    case kArm64:
+    case InstructionSet::kArm64:
       return Reg::Arm64Fp(machine_reg);
-    case kX86:
+    case InstructionSet::kX86:
       return Reg::X86Fp(machine_reg);
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return Reg::X86_64Fp(machine_reg);
-    case kMips:
+    case InstructionSet::kMips:
       return Reg::MipsFp(machine_reg);
-    case kMips64:
+    case InstructionSet::kMips64:
       return Reg::Mips64Fp(machine_reg);
-    case kNone:
+    case InstructionSet::kNone:
       LOG(FATAL) << "No instruction set";
   }
   UNREACHABLE();
@@ -230,7 +230,7 @@
           break;  // the high word is correctly implied by the low word.
         }
       } else if (kind == Kind::kInFpuRegister) {
-        if ((isa == kArm || isa == kThumb2) &&
+        if ((isa == InstructionSet::kArm || isa == InstructionSet::kThumb2) &&
             piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegister &&
             reg_hi.GetValue() == value + 1 && value % 2 == 0) {
           // Translate S register pair to D register (e.g. S4+S5 to D2).
diff --git a/compiler/debug/elf_symtab_writer.h b/compiler/debug/elf_symtab_writer.h
index b37f984..0907e10 100644
--- a/compiler/debug/elf_symtab_writer.h
+++ b/compiler/debug/elf_symtab_writer.h
@@ -89,7 +89,7 @@
     // instructions, so that disassembler tools can correctly disassemble.
     // Note that even if we generate just a single mapping symbol, ARM's Streamline
     // requires it to match function symbol.  Just address 0 does not work.
-    if (info.isa == kThumb2) {
+    if (info.isa == InstructionSet::kThumb2) {
       if (address < mapping_symbol_address || !kGenerateSingleArmMappingSymbol) {
         symtab->Add(strtab->Write("$t"), text, address & ~1, 0, STB_LOCAL, STT_NOTYPE);
         mapping_symbol_address = address;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 7581962..a94dbe9 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -381,9 +381,9 @@
       quicken_data.push_back(static_cast<uint8_t>(info.dex_member_index >> 8));
     }
     InstructionSet instruction_set = driver->GetInstructionSet();
-    if (instruction_set == kThumb2) {
+    if (instruction_set == InstructionSet::kThumb2) {
       // Don't use the thumb2 instruction set to avoid the one off code delta.
-      instruction_set = kArm;
+      instruction_set = InstructionSet::kArm;
     }
     return CompiledMethod::SwapAllocCompiledMethod(
         driver,
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index e1ea630..de481ca 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -31,7 +31,7 @@
   CompilerDriver driver(&compiler_options,
                         &verification_results,
                         Compiler::kOptimizing,
-                        /* instruction_set_ */ kNone,
+                        /* instruction_set_ */ InstructionSet::kNone,
                         /* instruction_set_features */ nullptr,
                         /* image_classes */ nullptr,
                         /* compiled_classes */ nullptr,
@@ -91,7 +91,7 @@
         for (auto&& f : cfi_info) {
           for (auto&& p : patches) {
             compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod(
-                &driver, kNone, c, 0u, 0u, 0u, s, v, f, p));
+                &driver, InstructionSet::kNone, c, 0u, 0u, 0u, s, v, f, p));
           }
         }
       }
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 135f9c7..3d4da5e 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -290,7 +290,8 @@
       verification_results_(verification_results),
       compiler_(Compiler::Create(this, compiler_kind)),
       compiler_kind_(compiler_kind),
-      instruction_set_(instruction_set == kArm ? kThumb2 : instruction_set),
+      instruction_set_(
+          instruction_set == InstructionSet::kArm ? InstructionSet::kThumb2 : instruction_set),
       instruction_set_features_(instruction_set_features),
       requires_constructor_barrier_lock_("constructor barrier lock"),
       non_relative_linker_patch_count_(0u),
@@ -451,13 +452,13 @@
 // GetQuickGenericJniStub allowing down calls that aren't compiled using a JNI compiler?
 static bool InstructionSetHasGenericJniStub(InstructionSet isa) {
   switch (isa) {
-    case kArm:
-    case kArm64:
-    case kThumb2:
-    case kMips:
-    case kMips64:
-    case kX86:
-    case kX86_64: return true;
+    case InstructionSet::kArm:
+    case InstructionSet::kArm64:
+    case InstructionSet::kThumb2:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64: return true;
     default: return false;
   }
 }
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index b434e90..897b50b 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -98,7 +98,7 @@
              static_cast<const void*>(fake_header_code_and_maps_.data() +
                                           (fake_header_code_and_maps_.size() - code_size)));
 
-    if (kRuntimeISA == kArm) {
+    if (kRuntimeISA == InstructionSet::kArm) {
       // Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer().
       CHECK_ALIGNED(stack_maps_offset, 2);
     }
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 5b57718..236b5c0 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -102,13 +102,13 @@
   }
 };
 
-#define TEST_ISA(isa) \
-  TEST_F(JNICFITest, isa) { \
-    std::vector<uint8_t> expected_asm(expected_asm_##isa, \
-        expected_asm_##isa + arraysize(expected_asm_##isa)); \
-    std::vector<uint8_t> expected_cfi(expected_cfi_##isa, \
-        expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
-    TestImpl(isa, #isa, expected_asm, expected_cfi); \
+#define TEST_ISA(isa)                                                 \
+  TEST_F(JNICFITest, isa) {                                           \
+    std::vector<uint8_t> expected_asm(expected_asm_##isa,             \
+        expected_asm_##isa + arraysize(expected_asm_##isa));          \
+    std::vector<uint8_t> expected_cfi(expected_cfi_##isa,             \
+        expected_cfi_##isa + arraysize(expected_cfi_##isa));          \
+    TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi);  \
   }
 
 #ifdef ART_ENABLE_CODEGEN_arm
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 42a5f86..55c27d1 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -54,38 +54,38 @@
     InstructionSet instruction_set) {
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
           new (allocator) arm::ArmManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64:
+    case InstructionSet::kArm64:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
           new (allocator) arm64::Arm64ManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips:
+    case InstructionSet::kMips:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
           new (allocator) mips::MipsManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64:
+    case InstructionSet::kMips64:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
           new (allocator) mips64::Mips64ManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86:
+    case InstructionSet::kX86:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
           new (allocator) x86::X86ManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
           new (allocator) x86_64::X86_64ManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
@@ -156,38 +156,38 @@
                                                                    InstructionSet instruction_set) {
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return std::unique_ptr<JniCallingConvention>(
           new (allocator) arm::ArmJniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64:
+    case InstructionSet::kArm64:
       return std::unique_ptr<JniCallingConvention>(
           new (allocator) arm64::Arm64JniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips:
+    case InstructionSet::kMips:
       return std::unique_ptr<JniCallingConvention>(
           new (allocator) mips::MipsJniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64:
+    case InstructionSet::kMips64:
       return std::unique_ptr<JniCallingConvention>(
           new (allocator) mips64::Mips64JniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86:
+    case InstructionSet::kX86:
       return std::unique_ptr<JniCallingConvention>(
           new (allocator) x86::X86JniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return std::unique_ptr<JniCallingConvention>(
           new (allocator) x86_64::X86_64JniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index e32b681..b3177aa 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -323,7 +323,7 @@
   // Note that we always have outgoing param space available for at least two params.
   if (kUseReadBarrier && is_static && !is_critical_native) {
     const bool kReadBarrierFastPath =
-        (instruction_set != kMips) && (instruction_set != kMips64);
+        (instruction_set != InstructionSet::kMips) && (instruction_set != InstructionSet::kMips64);
     std::unique_ptr<JNIMacroLabel> skip_cold_path_label;
     if (kReadBarrierFastPath) {
       skip_cold_path_label = __ CreateLabel();
@@ -531,7 +531,8 @@
     if (LIKELY(!is_critical_native)) {
       // For normal JNI, store the return value on the stack because the call to
       // JniMethodEnd will clobber the return value. It will be restored in (13).
-      if ((instruction_set == kMips || instruction_set == kMips64) &&
+      if ((instruction_set == InstructionSet::kMips ||
+           instruction_set == InstructionSet::kMips64) &&
           main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
           return_save_location.Uint32Value() % 8 != 0) {
         // Ensure doubles are 8-byte aligned for MIPS
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc
index 3d56833..48747fc 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2.cc
@@ -47,7 +47,7 @@
 constexpr uint32_t kMaxBcondNegativeDisplacement = (1u << 20) - kPcDisplacement;
 
 Thumb2RelativePatcher::Thumb2RelativePatcher(RelativePatcherTargetProvider* provider)
-    : ArmBaseRelativePatcher(provider, kThumb2) {
+    : ArmBaseRelativePatcher(provider, InstructionSet::kThumb2) {
 }
 
 void Thumb2RelativePatcher::PatchCall(std::vector<uint8_t>* code,
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index fe76dfe..2c22a35 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -28,7 +28,7 @@
 
 class Thumb2RelativePatcherTest : public RelativePatcherTest {
  public:
-  Thumb2RelativePatcherTest() : RelativePatcherTest(kThumb2, "default") { }
+  Thumb2RelativePatcherTest() : RelativePatcherTest(InstructionSet::kThumb2, "default") { }
 
  protected:
   static const uint8_t kCallRawCode[];
@@ -173,7 +173,8 @@
       return false;  // No thunk.
     } else {
       uint32_t thunk_end =
-          CompiledCode::AlignCode(method3_offset - sizeof(OatQuickMethodHeader), kThumb2) +
+          CompiledCode::AlignCode(method3_offset - sizeof(OatQuickMethodHeader),
+                                  InstructionSet::kThumb2) +
           MethodCallThunkSize();
       uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
       CHECK_EQ(result3.second, header_offset + sizeof(OatQuickMethodHeader) + 1 /* thumb mode */);
@@ -420,7 +421,8 @@
 
   // Check linked code.
   uint32_t method3_offset = GetMethodOffset(3u);
-  uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2);
+  uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(),
+                                                  InstructionSet::kThumb2);
   uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */);
   ASSERT_EQ(diff & 1u, 0u);
   ASSERT_LT(diff >> 1, 1u << 8);  // Simple encoding, (diff >> 1) fits into 8 bits.
@@ -495,8 +497,7 @@
   ASSERT_TRUE(IsAligned<kArmAlignment>(method3_offset));
   uint32_t method3_header_offset = method3_offset - sizeof(OatQuickMethodHeader);
   uint32_t thunk_size = MethodCallThunkSize();
-  uint32_t thunk_offset =
-      RoundDown(method3_header_offset - thunk_size, GetInstructionSetAlignment(kThumb2));
+  uint32_t thunk_offset = RoundDown(method3_header_offset - thunk_size, kArmAlignment);
   DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size),
             method3_header_offset);
   ASSERT_TRUE(IsAligned<kArmAlignment>(thunk_offset));
@@ -527,7 +528,8 @@
 
   // Check linked code.
   uint32_t method3_offset = GetMethodOffset(3u);
-  uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2);
+  uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(),
+                                                  InstructionSet::kThumb2);
   uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */);
   ASSERT_EQ(diff & 1u, 0u);
   ASSERT_LT(diff >> 1, 1u << 8);  // Simple encoding, (diff >> 1) fits into 8 bits.
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 663e43b..52a0796 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -76,7 +76,8 @@
   if (num_adrp == 0u) {
     return 0u;
   }
-  uint32_t alignment_bytes = CompiledMethod::AlignCode(code_size, kArm64) - code_size;
+  uint32_t alignment_bytes =
+      CompiledMethod::AlignCode(code_size, InstructionSet::kArm64) - code_size;
   return kAdrpThunkSize * num_adrp + alignment_bytes;
 }
 
@@ -84,7 +85,7 @@
 
 Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherTargetProvider* provider,
                                            const Arm64InstructionSetFeatures* features)
-    : ArmBaseRelativePatcher(provider, kArm64),
+    : ArmBaseRelativePatcher(provider, InstructionSet::kArm64),
       fix_cortex_a53_843419_(features->NeedFixCortexA53_843419()),
       reserved_adrp_thunks_(0u),
       processed_adrp_thunks_(0u) {
@@ -105,7 +106,8 @@
   // Add thunks for previous method if any.
   if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) {
     size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_;
-    offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks;
+    offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64) +
+             kAdrpThunkSize * num_adrp_thunks;
     reserved_adrp_thunks_ = adrp_thunk_locations_.size();
   }
 
@@ -149,7 +151,8 @@
     // Add thunks for the last method if any.
     if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) {
       size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_;
-      offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks;
+      offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64) +
+               kAdrpThunkSize * num_adrp_thunks;
       reserved_adrp_thunks_ = adrp_thunk_locations_.size();
     }
   }
@@ -159,7 +162,7 @@
 uint32_t Arm64RelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) {
   if (fix_cortex_a53_843419_) {
     if (!current_method_thunks_.empty()) {
-      uint32_t aligned_offset = CompiledMethod::AlignCode(offset, kArm64);
+      uint32_t aligned_offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64);
       if (kIsDebugBuild) {
         CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize);
         size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize;
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 8a5b4cc..05459a2 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -29,7 +29,7 @@
 class Arm64RelativePatcherTest : public RelativePatcherTest {
  public:
   explicit Arm64RelativePatcherTest(const std::string& variant)
-      : RelativePatcherTest(kArm64, variant) { }
+      : RelativePatcherTest(InstructionSet::kArm64, variant) { }
 
  protected:
   static const uint8_t kCallRawCode[];
@@ -153,7 +153,8 @@
     // There may be a thunk before method2.
     if (last_result.second != last_method_offset) {
       // Thunk present. Check that there's only one.
-      uint32_t thunk_end = CompiledCode::AlignCode(gap_end, kArm64) + MethodCallThunkSize();
+      uint32_t thunk_end =
+          CompiledCode::AlignCode(gap_end, InstructionSet::kArm64) + MethodCallThunkSize();
       uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
       CHECK_EQ(last_result.second, header_offset + sizeof(OatQuickMethodHeader));
     }
@@ -347,7 +348,8 @@
     CHECK_EQ(compiled_method_refs_[0].index, 1u);
     CHECK_EQ(compiled_method_refs_.size(), compiled_methods_.size());
     uint32_t method1_size = compiled_methods_[0]->GetQuickCode().size();
-    uint32_t thunk_offset = CompiledCode::AlignCode(method1_offset + method1_size, kArm64);
+    uint32_t thunk_offset =
+        CompiledCode::AlignCode(method1_offset + method1_size, InstructionSet::kArm64);
     uint32_t b_diff = thunk_offset - (method1_offset + num_nops * 4u);
     CHECK_ALIGNED(b_diff, 4u);
     ASSERT_LT(b_diff, 128 * MB);
@@ -602,7 +604,7 @@
 
   // Check linked code.
   uint32_t thunk_offset =
-      CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
+      CompiledCode::AlignCode(last_method_offset + last_method_code.size(), InstructionSet::kArm64);
   uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
   CHECK_ALIGNED(diff, 4u);
   ASSERT_LT(diff, 128 * MB);
@@ -688,8 +690,7 @@
   ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_offset));
   uint32_t last_method_header_offset = last_method_offset - sizeof(OatQuickMethodHeader);
   uint32_t thunk_size = MethodCallThunkSize();
-  uint32_t thunk_offset =
-      RoundDown(last_method_header_offset - thunk_size, GetInstructionSetAlignment(kArm64));
+  uint32_t thunk_offset = RoundDown(last_method_header_offset - thunk_size, kArm64Alignment);
   DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size),
             last_method_header_offset);
   uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1);
@@ -721,7 +722,7 @@
 
   // Check linked code.
   uint32_t thunk_offset =
-      CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
+      CompiledCode::AlignCode(last_method_offset + last_method_code.size(), InstructionSet::kArm64);
   uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
   CHECK_ALIGNED(diff, 4u);
   ASSERT_LT(diff, 128 * MB);
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 7941237..b30b55e 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -417,10 +417,10 @@
                     InstructionSet isa,
                     const InstructionSetFeatures* features)
         : Section(owner, name, type, flags, link, info, align, entsize) {
-      if (isa == kMips || isa == kMips64) {
+      if (isa == InstructionSet::kMips || isa == InstructionSet::kMips64) {
         bool fpu32 = false;    // assume mips64 values
         uint8_t isa_rev = 6;   // assume mips64 values
-        if (isa == kMips) {
+        if (isa == InstructionSet::kMips) {
           // adjust for mips32 values
           fpu32 = features->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint();
           isa_rev = features->AsMipsInstructionSetFeatures()->IsR6()
@@ -430,14 +430,15 @@
                   : 1;
         }
         abiflags_.version = 0;  // version of flags structure
-        abiflags_.isa_level = (isa == kMips) ? 32 : 64;
+        abiflags_.isa_level = (isa == InstructionSet::kMips) ? 32 : 64;
         abiflags_.isa_rev = isa_rev;
-        abiflags_.gpr_size = (isa == kMips) ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64;
+        abiflags_.gpr_size = (isa == InstructionSet::kMips) ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64;
         abiflags_.cpr1_size = fpu32 ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64;
         abiflags_.cpr2_size = MIPS_AFL_REG_NONE;
         // Set the fp_abi to MIPS_ABI_FP_64A for mips32 with 64-bit FPUs (ie: mips32 R5 and R6).
         // Otherwise set to MIPS_ABI_FP_DOUBLE.
-        abiflags_.fp_abi = (isa == kMips && !fpu32) ? MIPS_ABI_FP_64A : MIPS_ABI_FP_DOUBLE;
+        abiflags_.fp_abi =
+            (isa == InstructionSet::kMips && !fpu32) ? MIPS_ABI_FP_64A : MIPS_ABI_FP_DOUBLE;
         abiflags_.isa_ext = 0;
         abiflags_.ases = 0;
         // To keep the code simple, we are not using odd FP reg for single floats for both
@@ -689,7 +690,7 @@
     Elf_Word bss_address = RoundUp(text_address + text_size, kPageSize);
     Elf_Word abiflags_address = RoundUp(bss_address + bss_size, kPageSize);
     Elf_Word abiflags_size = 0;
-    if (isa_ == kMips || isa_ == kMips64) {
+    if (isa_ == InstructionSet::kMips || isa_ == InstructionSet::kMips64) {
       abiflags_size = abiflags_.GetSize();
     }
     Elf_Word dynstr_address = RoundUp(abiflags_address + abiflags_size, kPageSize);
@@ -835,29 +836,29 @@
   static Elf_Ehdr MakeElfHeader(InstructionSet isa, const InstructionSetFeatures* features) {
     Elf_Ehdr elf_header = Elf_Ehdr();
     switch (isa) {
-      case kArm:
+      case InstructionSet::kArm:
         // Fall through.
-      case kThumb2: {
+      case InstructionSet::kThumb2: {
         elf_header.e_machine = EM_ARM;
         elf_header.e_flags = EF_ARM_EABI_VER5;
         break;
       }
-      case kArm64: {
+      case InstructionSet::kArm64: {
         elf_header.e_machine = EM_AARCH64;
         elf_header.e_flags = 0;
         break;
       }
-      case kX86: {
+      case InstructionSet::kX86: {
         elf_header.e_machine = EM_386;
         elf_header.e_flags = 0;
         break;
       }
-      case kX86_64: {
+      case InstructionSet::kX86_64: {
         elf_header.e_machine = EM_X86_64;
         elf_header.e_flags = 0;
         break;
       }
-      case kMips: {
+      case InstructionSet::kMips: {
         elf_header.e_machine = EM_MIPS;
         elf_header.e_flags = (EF_MIPS_NOREORDER |
                               EF_MIPS_PIC       |
@@ -868,7 +869,7 @@
                                    : EF_MIPS_ARCH_32R2));
         break;
       }
-      case kMips64: {
+      case InstructionSet::kMips64: {
         elf_header.e_machine = EM_MIPS;
         elf_header.e_flags = (EF_MIPS_NOREORDER |
                               EF_MIPS_PIC       |
@@ -876,7 +877,7 @@
                               EF_MIPS_ARCH_64R6);
         break;
       }
-      case kNone: {
+      case InstructionSet::kNone: {
         LOG(FATAL) << "No instruction set";
         break;
       }
diff --git a/compiler/linker/mips/relative_patcher_mips32r6_test.cc b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
index 586e2aa..629fdd5 100644
--- a/compiler/linker/mips/relative_patcher_mips32r6_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
@@ -22,7 +22,7 @@
 
 class Mips32r6RelativePatcherTest : public RelativePatcherTest {
  public:
-  Mips32r6RelativePatcherTest() : RelativePatcherTest(kMips, "mips32r6") {}
+  Mips32r6RelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips, "mips32r6") {}
 
  protected:
   static const uint8_t kUnpatchedPcRelativeRawCode[];
diff --git a/compiler/linker/mips/relative_patcher_mips_test.cc b/compiler/linker/mips/relative_patcher_mips_test.cc
index ebe5406..d876c76 100644
--- a/compiler/linker/mips/relative_patcher_mips_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips_test.cc
@@ -23,7 +23,7 @@
 
 class MipsRelativePatcherTest : public RelativePatcherTest {
  public:
-  MipsRelativePatcherTest() : RelativePatcherTest(kMips, "mips32r2") {}
+  MipsRelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips, "mips32r2") {}
 
  protected:
   static const uint8_t kUnpatchedPcRelativeRawCode[];
diff --git a/compiler/linker/mips64/relative_patcher_mips64_test.cc b/compiler/linker/mips64/relative_patcher_mips64_test.cc
index 4edcae7..a02f500 100644
--- a/compiler/linker/mips64/relative_patcher_mips64_test.cc
+++ b/compiler/linker/mips64/relative_patcher_mips64_test.cc
@@ -23,7 +23,7 @@
 
 class Mips64RelativePatcherTest : public RelativePatcherTest {
  public:
-  Mips64RelativePatcherTest() : RelativePatcherTest(kMips64, "default") {}
+  Mips64RelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips64, "default") {}
 
  protected:
   static const uint8_t kUnpatchedPcRelativeRawCode[];
diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc
index dc15bb0..13877f8 100644
--- a/compiler/linker/relative_patcher.cc
+++ b/compiler/linker/relative_patcher.cc
@@ -95,31 +95,31 @@
   UNUSED(provider);
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86:
+    case InstructionSet::kX86:
       return std::unique_ptr<RelativePatcher>(new X86RelativePatcher());
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return std::unique_ptr<RelativePatcher>(new X86_64RelativePatcher());
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm
-    case kArm:
+    case InstructionSet::kArm:
       // Fall through: we generate Thumb2 code for "arm".
-    case kThumb2:
+    case InstructionSet::kThumb2:
       return std::unique_ptr<RelativePatcher>(new Thumb2RelativePatcher(provider));
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64:
+    case InstructionSet::kArm64:
       return std::unique_ptr<RelativePatcher>(
           new Arm64RelativePatcher(provider, features->AsArm64InstructionSetFeatures()));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips:
+    case InstructionSet::kMips:
       return std::unique_ptr<RelativePatcher>(
           new MipsRelativePatcher(features->AsMipsInstructionSetFeatures()));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64:
+    case InstructionSet::kMips64:
       return std::unique_ptr<RelativePatcher>(new Mips64RelativePatcher());
 #endif
     default:
diff --git a/compiler/linker/x86/relative_patcher_x86_test.cc b/compiler/linker/x86/relative_patcher_x86_test.cc
index 4f74cee..b855dec 100644
--- a/compiler/linker/x86/relative_patcher_x86_test.cc
+++ b/compiler/linker/x86/relative_patcher_x86_test.cc
@@ -23,7 +23,7 @@
 
 class X86RelativePatcherTest : public RelativePatcherTest {
  public:
-  X86RelativePatcherTest() : RelativePatcherTest(kX86, "default") { }
+  X86RelativePatcherTest() : RelativePatcherTest(InstructionSet::kX86, "default") { }
 
  protected:
   static const uint8_t kCallRawCode[];
diff --git a/compiler/linker/x86_64/relative_patcher_x86_64_test.cc b/compiler/linker/x86_64/relative_patcher_x86_64_test.cc
index ae17aa7..6baa92d 100644
--- a/compiler/linker/x86_64/relative_patcher_x86_64_test.cc
+++ b/compiler/linker/x86_64/relative_patcher_x86_64_test.cc
@@ -23,7 +23,7 @@
 
 class X86_64RelativePatcherTest : public RelativePatcherTest {
  public:
-  X86_64RelativePatcherTest() : RelativePatcherTest(kX86_64, "default") { }
+  X86_64RelativePatcherTest() : RelativePatcherTest(InstructionSet::kX86_64, "default") { }
 
  protected:
   static const uint8_t kCallRawCode[];
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index b8d1f52..5625f04 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -786,43 +786,43 @@
   ArenaAllocator* allocator = graph->GetAllocator();
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm
-    case kArm:
-    case kThumb2: {
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2: {
       return std::unique_ptr<CodeGenerator>(
           new (allocator) arm::CodeGeneratorARMVIXL(
               graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats));
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64: {
+    case InstructionSet::kArm64: {
       return std::unique_ptr<CodeGenerator>(
           new (allocator) arm64::CodeGeneratorARM64(
               graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats));
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips: {
+    case InstructionSet::kMips: {
       return std::unique_ptr<CodeGenerator>(
           new (allocator) mips::CodeGeneratorMIPS(
               graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats));
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64: {
+    case InstructionSet::kMips64: {
       return std::unique_ptr<CodeGenerator>(
           new (allocator) mips64::CodeGeneratorMIPS64(
               graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats));
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86: {
+    case InstructionSet::kX86: {
       return std::unique_ptr<CodeGenerator>(
           new (allocator) x86::CodeGeneratorX86(
               graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats));
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64: {
+    case InstructionSet::kX86_64: {
       return std::unique_ptr<CodeGenerator>(
           new (allocator) x86_64::CodeGeneratorX86_64(
               graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats));
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 64c88eb..18ad60d 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -626,7 +626,7 @@
 
   bool CallPushesPC() const {
     InstructionSet instruction_set = GetInstructionSet();
-    return instruction_set == kX86 || instruction_set == kX86_64;
+    return instruction_set == InstructionSet::kX86 || instruction_set == InstructionSet::kX86_64;
   }
 
   // Arm64 has its own type for a label, so we need to templatize these methods
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c7811ab..e01b7b7 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1557,12 +1557,13 @@
   MacroAssembler* masm = GetVIXLAssembler();
   __ Bind(&frame_entry_label_);
 
-  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
+  bool do_overflow_check =
+      FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm64) || !IsLeafMethod();
   if (do_overflow_check) {
     UseScratchRegisterScope temps(masm);
     Register temp = temps.AcquireX();
     DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
-    __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
+    __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kArm64)));
     {
       // Ensure that between load and RecordPcInfo there are no pools emitted.
       ExactAssemblyScope eas(GetVIXLAssembler(),
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 90f3ae8..edd3072 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2568,7 +2568,7 @@
   if (!skip_overflow_check) {
     UseScratchRegisterScope temps(GetVIXLAssembler());
     vixl32::Register temp = temps.Acquire();
-    __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(kArm)));
+    __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(InstructionSet::kArm)));
     // The load must immediately precede RecordPcInfo.
     ExactAssemblyScope aas(GetVIXLAssembler(),
                            vixl32::kMaxInstructionSizeInBytes,
@@ -5303,7 +5303,7 @@
   vixl32::Label less, greater, done;
   vixl32::Label* final_label = codegen_->GetFinalLabel(compare, &done);
   DataType::Type type = compare->InputAt(0)->GetType();
-  vixl32::Condition less_cond = vixl32::Condition(kNone);
+  vixl32::Condition less_cond = vixl32::Condition::None();
   switch (type) {
     case DataType::Type::kBool:
     case DataType::Type::kUint8:
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 2f65e8c..b3fed07 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1132,7 +1132,7 @@
   StackMapStream* stack_map_stream = GetStackMapStream();
   for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
     uint32_t old_position =
-        stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
+        stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips);
     uint32_t new_position = __ GetAdjustedPosition(old_position);
     DCHECK_GE(new_position, old_position);
     stack_map_stream->SetStackMapNativePcOffset(i, new_position);
@@ -1347,13 +1347,14 @@
 void CodeGeneratorMIPS::GenerateFrameEntry() {
   __ Bind(&frame_entry_label_);
 
-  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips) || !IsLeafMethod();
+  bool do_overflow_check =
+      FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips) || !IsLeafMethod();
 
   if (do_overflow_check) {
     __ LoadFromOffset(kLoadWord,
                       ZERO,
                       SP,
-                      -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips)));
+                      -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips)));
     RecordPcInfo(nullptr, 0);
   }
 
@@ -1365,8 +1366,9 @@
   }
 
   // Make sure the frame size isn't unreasonably large.
-  if (GetFrameSize() > GetStackOverflowReservedBytes(kMips)) {
-    LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips) << " bytes";
+  if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips)) {
+    LOG(FATAL) << "Stack frame larger than "
+        << GetStackOverflowReservedBytes(InstructionSet::kMips) << " bytes";
   }
 
   // Spill callee-saved registers.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 6cbfa14..53a7f26 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1076,7 +1076,7 @@
   StackMapStream* stack_map_stream = GetStackMapStream();
   for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
     uint32_t old_position =
-        stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64);
+        stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips64);
     uint32_t new_position = __ GetAdjustedPosition(old_position);
     DCHECK_GE(new_position, old_position);
     stack_map_stream->SetStackMapNativePcOffset(i, new_position);
@@ -1161,13 +1161,15 @@
 void CodeGeneratorMIPS64::GenerateFrameEntry() {
   __ Bind(&frame_entry_label_);
 
-  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
+  bool do_overflow_check =
+      FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips64) || !IsLeafMethod();
 
   if (do_overflow_check) {
-    __ LoadFromOffset(kLoadWord,
-                      ZERO,
-                      SP,
-                      -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
+    __ LoadFromOffset(
+        kLoadWord,
+        ZERO,
+        SP,
+        -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips64)));
     RecordPcInfo(nullptr, 0);
   }
 
@@ -1176,8 +1178,9 @@
   }
 
   // Make sure the frame size isn't unreasonably large.
-  if (GetFrameSize() > GetStackOverflowReservedBytes(kMips64)) {
-    LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips64) << " bytes";
+  if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips64)) {
+    LOG(FATAL) << "Stack frame larger than "
+        << GetStackOverflowReservedBytes(InstructionSet::kMips64) << " bytes";
   }
 
   // Spill callee-saved registers.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 44614e1..f84dd00 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1072,7 +1072,8 @@
   DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
 
   if (!skip_overflow_check) {
-    __ testl(EAX, Address(ESP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86))));
+    size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86);
+    __ testl(EAX, Address(ESP, -static_cast<int32_t>(reserved_bytes)));
     RecordPcInfo(nullptr, 0);
   }
 
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 259bb4a..16d1f18 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1277,8 +1277,8 @@
   DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
 
   if (!skip_overflow_check) {
-    __ testq(CpuRegister(RAX), Address(
-        CpuRegister(RSP), -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86_64))));
+    size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86_64);
+    __ testq(CpuRegister(RAX), Address(CpuRegister(RSP), -static_cast<int32_t>(reserved_bytes)));
     RecordPcInfo(nullptr, 0);
   }
 
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index e35c7c7..ba431a5 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -44,22 +44,22 @@
   ::std::vector<CodegenTargetConfig> test_config_candidates = {
 #ifdef ART_ENABLE_CODEGEN_arm
     // TODO: Should't this be `kThumb2` instead of `kArm` here?
-    CodegenTargetConfig(kArm, create_codegen_arm_vixl32),
+    CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32),
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    CodegenTargetConfig(kArm64, create_codegen_arm64),
+    CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64),
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    CodegenTargetConfig(kX86, create_codegen_x86),
+    CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86),
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    CodegenTargetConfig(kX86_64, create_codegen_x86_64),
+    CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64),
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    CodegenTargetConfig(kMips, create_codegen_mips),
+    CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips),
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    CodegenTargetConfig(kMips64, create_codegen_mips64)
+    CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64)
 #endif
   };
 
@@ -825,7 +825,7 @@
 TEST_F(CodegenTest, MipsClobberRA) {
   std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
       MipsInstructionSetFeatures::FromCppDefines());
-  if (!CanExecute(kMips) || features_mips->IsR6()) {
+  if (!CanExecute(InstructionSet::kMips) || features_mips->IsR6()) {
     // HMipsComputeBaseMethodAddress and the NAL instruction behind it
     // should only be generated on non-R6.
     return;
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index bcbcc12..c41c290 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -207,7 +207,7 @@
 static bool CanExecuteOnHardware(InstructionSet target_isa) {
   return (target_isa == kRuntimeISA)
       // Handle the special case of ARM, with two instructions sets (ARM32 and Thumb-2).
-      || (kRuntimeISA == kArm && target_isa == kThumb2);
+      || (kRuntimeISA == InstructionSet::kArm && target_isa == InstructionSet::kThumb2);
 }
 
 static bool CanExecute(InstructionSet target_isa) {
@@ -271,7 +271,7 @@
   typedef Expected (*fptr)();
   CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
   fptr f = reinterpret_cast<fptr>(allocator.GetMemory());
-  if (target_isa == kThumb2) {
+  if (target_isa == InstructionSet::kThumb2) {
     // For thumb we need the bottom bit set.
     f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1);
   }
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 102acb3..ed2f8e9 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -342,7 +342,7 @@
 }
 
 inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
-  DCHECK(HasShifterOperand(instruction, kArm64));
+  DCHECK(HasShifterOperand(instruction, InstructionSet::kArm64));
   // Although the `neg` instruction is an alias of the `sub` instruction, `HNeg`
   // does *not* support extension. This is because the `extended register` form
   // of the `sub` instruction interprets the left register with code 31 as the
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index f7fd910..12c6988 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -153,7 +153,7 @@
     }
 
     const uint8_t* base = disassembler_->GetDisassemblerOptions()->base_address_;
-    if (instruction_set_ == kThumb2) {
+    if (instruction_set_ == InstructionSet::kThumb2) {
       // ARM and Thumb-2 use the same disassembler. The bottom bit of the
       // address is used to distinguish between the two.
       base += 1;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 189d5ae..2bd2d5f 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -250,7 +250,7 @@
   DataType::Type type = mul->GetPackedType();
   InstructionSet isa = codegen_->GetInstructionSet();
   switch (isa) {
-    case kArm64:
+    case InstructionSet::kArm64:
       if (!(type == DataType::Type::kUint8 ||
             type == DataType::Type::kInt8 ||
             type == DataType::Type::kUint16 ||
@@ -259,8 +259,8 @@
         return false;
       }
       break;
-    case kMips:
-    case kMips64:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
       if (!(type == DataType::Type::kUint8 ||
             type == DataType::Type::kInt8 ||
             type == DataType::Type::kUint16 ||
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 9422f9f..d41e49a 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -84,7 +84,7 @@
 bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* use,
                                                                  HInstruction* bitfield_op,
                                                                  bool do_merge) {
-  DCHECK(HasShifterOperand(use, kArm));
+  DCHECK(HasShifterOperand(use, InstructionSet::kArm));
   DCHECK(use->IsBinaryOperation());
   DCHECK(CanFitInShifterOperand(bitfield_op));
   DCHECK(!bitfield_op->HasEnvironmentUses());
@@ -166,7 +166,7 @@
   // Check whether we can merge the instruction in all its users' shifter operand.
   for (const HUseListNode<HInstruction*>& use : uses) {
     HInstruction* user = use.GetUser();
-    if (!HasShifterOperand(user, kArm)) {
+    if (!HasShifterOperand(user, InstructionSet::kArm)) {
       return false;
     }
     if (!CanMergeIntoShifterOperand(user, bitfield_op)) {
@@ -242,7 +242,7 @@
 }
 
 void InstructionSimplifierArmVisitor::VisitMul(HMul* instruction) {
-  if (TryCombineMultiplyAccumulate(instruction, kArm)) {
+  if (TryCombineMultiplyAccumulate(instruction, InstructionSet::kArm)) {
     RecordSimplification();
   }
 }
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index c0ab68f..69e1463 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -90,7 +90,7 @@
 bool InstructionSimplifierArm64Visitor::TryMergeIntoShifterOperand(HInstruction* use,
                                                                    HInstruction* bitfield_op,
                                                                    bool do_merge) {
-  DCHECK(HasShifterOperand(use, kArm64));
+  DCHECK(HasShifterOperand(use, InstructionSet::kArm64));
   DCHECK(use->IsBinaryOperation() || use->IsNeg());
   DCHECK(CanFitInShifterOperand(bitfield_op));
   DCHECK(!bitfield_op->HasEnvironmentUses());
@@ -170,7 +170,7 @@
   // Check whether we can merge the instruction in all its users' shifter operand.
   for (const HUseListNode<HInstruction*>& use : uses) {
     HInstruction* user = use.GetUser();
-    if (!HasShifterOperand(user, kArm64)) {
+    if (!HasShifterOperand(user, InstructionSet::kArm64)) {
       return false;
     }
     if (!CanMergeIntoShifterOperand(user, bitfield_op)) {
@@ -218,7 +218,7 @@
 }
 
 void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) {
-  if (TryCombineMultiplyAccumulate(instruction, kArm64)) {
+  if (TryCombineMultiplyAccumulate(instruction, InstructionSet::kArm64)) {
     RecordSimplification();
   }
 }
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 1c13084..ccdcb35 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -90,13 +90,13 @@
 bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) {
   DataType::Type type = mul->GetType();
   switch (isa) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       if (type != DataType::Type::kInt32) {
         return false;
       }
       break;
-    case kArm64:
+    case InstructionSet::kArm64:
       if (!DataType::IsIntOrLongType(type)) {
         return false;
       }
@@ -148,7 +148,7 @@
         mul->GetBlock()->RemoveInstruction(mul);
         return true;
       }
-    } else if (use->IsNeg() && isa != kArm) {
+    } else if (use->IsNeg() && isa != InstructionSet::kArm) {
       HMultiplyAccumulate* mulacc =
           new (allocator) HMultiplyAccumulate(type,
                                               HInstruction::kSub,
diff --git a/compiler/optimizing/instruction_simplifier_shared.h b/compiler/optimizing/instruction_simplifier_shared.h
index b016a87..758fc76 100644
--- a/compiler/optimizing/instruction_simplifier_shared.h
+++ b/compiler/optimizing/instruction_simplifier_shared.h
@@ -41,7 +41,8 @@
 inline bool HasShifterOperand(HInstruction* instr, InstructionSet isa) {
   // On ARM64 `neg` instructions are an alias of `sub` using the zero register
   // as the first register input.
-  bool res = instr->IsAdd() || instr->IsAnd() || (isa == kArm64 && instr->IsNeg()) ||
+  bool res = instr->IsAdd() || instr->IsAnd() ||
+      (isa == InstructionSet::kArm64 && instr->IsNeg()) ||
       instr->IsOr() || instr->IsSub() || instr->IsXor();
   return res;
 }
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 74de077..c672dae 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -1414,8 +1414,8 @@
 
 uint32_t HLoopOptimization::GetVectorSizeInBytes() {
   switch (compiler_driver_->GetInstructionSet()) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return 8;  // 64-bit SIMD
     default:
       return 16;  // 128-bit SIMD
@@ -1425,8 +1425,8 @@
 bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) {
   const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures();
   switch (compiler_driver_->GetInstructionSet()) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       // Allow vectorization for all ARM devices, because Android assumes that
       // ARM 32-bit always supports advanced SIMD (64-bit SIMD).
       switch (type) {
@@ -1446,7 +1446,7 @@
           break;
       }
       return false;
-    case kArm64:
+    case InstructionSet::kArm64:
       // Allow vectorization for all ARM devices, because Android assumes that
       // ARMv8 AArch64 always supports advanced SIMD (128-bit SIMD).
       switch (type) {
@@ -1474,8 +1474,8 @@
         default:
           return false;
       }
-    case kX86:
-    case kX86_64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
       // Allow vectorization for SSE4.1-enabled X86 devices only (128-bit SIMD).
       if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) {
         switch (type) {
@@ -1506,7 +1506,7 @@
         }  // switch type
       }
       return false;
-    case kMips:
+    case InstructionSet::kMips:
       if (features->AsMipsInstructionSetFeatures()->HasMsa()) {
         switch (type) {
           case DataType::Type::kBool:
@@ -1535,7 +1535,7 @@
         }  // switch type
       }
       return false;
-    case kMips64:
+    case InstructionSet::kMips64:
       if (features->AsMips64InstructionSetFeatures()->HasMsa()) {
         switch (type) {
           case DataType::Type::kBool:
@@ -2170,7 +2170,7 @@
 uint32_t HLoopOptimization::GetUnrollingFactor(HBasicBlock* block, int64_t trip_count) {
   uint32_t max_peel = MaxNumberPeeled();
   switch (compiler_driver_->GetInstructionSet()) {
-    case kArm64: {
+    case InstructionSet::kArm64: {
       // Don't unroll with insufficient iterations.
       // TODO: Unroll loops with unknown trip count.
       DCHECK_NE(vector_length_, 0u);
@@ -2192,8 +2192,8 @@
       DCHECK_GE(unroll_factor, 1u);
       return unroll_factor;
     }
-    case kX86:
-    case kX86_64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
     default:
       return kNoUnrollingFactor;
   }
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index b7380b0..4ad2996 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -153,15 +153,15 @@
   InternalCodeAllocator code_allocator_;
 };
 
-#define TEST_ISA(isa)                                         \
-  TEST_F(OptimizingCFITest, isa) {                            \
-    std::vector<uint8_t> expected_asm(                        \
-        expected_asm_##isa,                                   \
-        expected_asm_##isa + arraysize(expected_asm_##isa));  \
-    std::vector<uint8_t> expected_cfi(                        \
-        expected_cfi_##isa,                                   \
-        expected_cfi_##isa + arraysize(expected_cfi_##isa));  \
-    TestImpl(isa, #isa, expected_asm, expected_cfi);          \
+#define TEST_ISA(isa)                                                 \
+  TEST_F(OptimizingCFITest, isa) {                                    \
+    std::vector<uint8_t> expected_asm(                                \
+        expected_asm_##isa,                                           \
+        expected_asm_##isa + arraysize(expected_asm_##isa));          \
+    std::vector<uint8_t> expected_cfi(                                \
+        expected_cfi_##isa,                                           \
+        expected_cfi_##isa + arraysize(expected_cfi_##isa));          \
+    TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi);  \
   }
 
 #ifdef ART_ENABLE_CODEGEN_arm
@@ -204,7 +204,7 @@
   std::vector<uint8_t> expected_cfi(
       expected_cfi_kThumb2_adjust,
       expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust));
-  SetUpFrame(kThumb2);
+  SetUpFrame(InstructionSet::kThumb2);
 #define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \
     ->GetAssembler())->GetVIXLAssembler()->
   vixl32::Label target;
@@ -216,7 +216,7 @@
   __ Bind(&target);
 #undef __
   Finish();
-  Check(kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
+  Check(InstructionSet::kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
 }
 #endif
 
@@ -235,7 +235,7 @@
   std::vector<uint8_t> expected_cfi(
       expected_cfi_kMips_adjust,
       expected_cfi_kMips_adjust + arraysize(expected_cfi_kMips_adjust));
-  SetUpFrame(kMips);
+  SetUpFrame(InstructionSet::kMips);
 #define __ down_cast<mips::MipsAssembler*>(GetCodeGenerator()->GetAssembler())->
   mips::MipsLabel target;
   __ Beqz(mips::A0, &target);
@@ -246,7 +246,7 @@
   __ Bind(&target);
 #undef __
   Finish();
-  Check(kMips, "kMips_adjust", expected_asm, expected_cfi);
+  Check(InstructionSet::kMips, "kMips_adjust", expected_asm, expected_cfi);
 }
 #endif
 
@@ -265,7 +265,7 @@
   std::vector<uint8_t> expected_cfi(
       expected_cfi_kMips64_adjust,
       expected_cfi_kMips64_adjust + arraysize(expected_cfi_kMips64_adjust));
-  SetUpFrame(kMips64);
+  SetUpFrame(InstructionSet::kMips64);
 #define __ down_cast<mips64::Mips64Assembler*>(GetCodeGenerator()->GetAssembler())->
   mips64::Mips64Label target;
   __ Beqc(mips64::A1, mips64::A2, &target);
@@ -276,7 +276,7 @@
   __ Bind(&target);
 #undef __
   Finish();
-  Check(kMips64, "kMips64_adjust", expected_asm, expected_cfi);
+  Check(InstructionSet::kMips64, "kMips64_adjust", expected_asm, expected_cfi);
 }
 #endif
 
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 29319f8..9233eb5 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -437,13 +437,13 @@
 }
 
 static bool IsInstructionSetSupported(InstructionSet instruction_set) {
-  return instruction_set == kArm
-      || instruction_set == kArm64
-      || instruction_set == kThumb2
-      || instruction_set == kMips
-      || instruction_set == kMips64
-      || instruction_set == kX86
-      || instruction_set == kX86_64;
+  return instruction_set == InstructionSet::kArm
+      || instruction_set == InstructionSet::kArm64
+      || instruction_set == InstructionSet::kThumb2
+      || instruction_set == InstructionSet::kMips
+      || instruction_set == InstructionSet::kMips64
+      || instruction_set == InstructionSet::kX86
+      || instruction_set == InstructionSet::kX86_64;
 }
 
 // Strip pass name suffix to get optimization name.
@@ -637,8 +637,8 @@
   ArenaAllocator* allocator = graph->GetAllocator();
   switch (instruction_set) {
 #if defined(ART_ENABLE_CODEGEN_arm)
-    case kThumb2:
-    case kArm: {
+    case InstructionSet::kThumb2:
+    case InstructionSet::kArm: {
       arm::InstructionSimplifierArm* simplifier =
           new (allocator) arm::InstructionSimplifierArm(graph, stats);
       SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -657,7 +657,7 @@
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64: {
+    case InstructionSet::kArm64: {
       arm64::InstructionSimplifierArm64* simplifier =
           new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
       SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -676,7 +676,7 @@
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips: {
+    case InstructionSet::kMips: {
       mips::InstructionSimplifierMips* simplifier =
           new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
       SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -695,7 +695,7 @@
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64: {
+    case InstructionSet::kMips64: {
       SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
       GVNOptimization* gvn =
           new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -708,7 +708,7 @@
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86: {
+    case InstructionSet::kX86: {
       SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
       GVNOptimization* gvn =
           new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -727,7 +727,7 @@
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64: {
+    case InstructionSet::kX86_64: {
       SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
       GVNOptimization* gvn =
           new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -949,7 +949,7 @@
 
   // Always use the Thumb-2 assembler: some runtime functionality
   // (like implicit stack overflow checks) assume Thumb-2.
-  DCHECK_NE(instruction_set, kArm);
+  DCHECK_NE(instruction_set, InstructionSet::kArm);
 
   // Do not attempt to compile on architectures we do not support.
   if (!IsInstructionSetSupported(instruction_set)) {
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 86e9713..bad73e1 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -70,13 +70,13 @@
 
 bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
                                                 InstructionSet instruction_set) {
-  return instruction_set == kArm
-      || instruction_set == kArm64
-      || instruction_set == kMips
-      || instruction_set == kMips64
-      || instruction_set == kThumb2
-      || instruction_set == kX86
-      || instruction_set == kX86_64;
+  return instruction_set == InstructionSet::kArm
+      || instruction_set == InstructionSet::kArm64
+      || instruction_set == InstructionSet::kMips
+      || instruction_set == InstructionSet::kMips64
+      || instruction_set == InstructionSet::kThumb2
+      || instruction_set == InstructionSet::kX86
+      || instruction_set == InstructionSet::kX86_64;
 }
 
 class AllRangesIterator : public ValueObject {
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 57eb762..8cc376c 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -796,7 +796,7 @@
 
   switch (instruction_set_) {
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64: {
+    case InstructionSet::kArm64: {
       arm64::HSchedulerARM64 scheduler(&allocator, selector);
       scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
       scheduler.Schedule(graph_);
@@ -804,8 +804,8 @@
     }
 #endif
 #if defined(ART_ENABLE_CODEGEN_arm)
-    case kThumb2:
-    case kArm: {
+    case InstructionSet::kThumb2:
+    case InstructionSet::kArm: {
       arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
       arm::HSchedulerARM scheduler(&allocator, selector, &arm_latency_visitor);
       scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index dfc1633..75dce81 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -43,22 +43,22 @@
   ::std::vector<CodegenTargetConfig> test_config_candidates = {
 #ifdef ART_ENABLE_CODEGEN_arm
     // TODO: Should't this be `kThumb2` instead of `kArm` here?
-    CodegenTargetConfig(kArm, create_codegen_arm_vixl32),
+    CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32),
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    CodegenTargetConfig(kArm64, create_codegen_arm64),
+    CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64),
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    CodegenTargetConfig(kX86, create_codegen_x86),
+    CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86),
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    CodegenTargetConfig(kX86_64, create_codegen_x86_64),
+    CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64),
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    CodegenTargetConfig(kMips, create_codegen_mips),
+    CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips),
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    CodegenTargetConfig(kMips64, create_codegen_mips64)
+    CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64)
 #endif
   };
 
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 91f86d5..7e517f3 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -928,18 +928,24 @@
 
 TEST(StackMapTest, CodeOffsetTest) {
   // Test minimum alignments, encoding, and decoding.
-  CodeOffset offset_thumb2 = CodeOffset::FromOffset(kThumb2InstructionAlignment, kThumb2);
-  CodeOffset offset_arm64 = CodeOffset::FromOffset(kArm64InstructionAlignment, kArm64);
-  CodeOffset offset_x86 = CodeOffset::FromOffset(kX86InstructionAlignment, kX86);
-  CodeOffset offset_x86_64 = CodeOffset::FromOffset(kX86_64InstructionAlignment, kX86_64);
-  CodeOffset offset_mips = CodeOffset::FromOffset(kMipsInstructionAlignment, kMips);
-  CodeOffset offset_mips64 = CodeOffset::FromOffset(kMips64InstructionAlignment, kMips64);
-  EXPECT_EQ(offset_thumb2.Uint32Value(kThumb2), kThumb2InstructionAlignment);
-  EXPECT_EQ(offset_arm64.Uint32Value(kArm64), kArm64InstructionAlignment);
-  EXPECT_EQ(offset_x86.Uint32Value(kX86), kX86InstructionAlignment);
-  EXPECT_EQ(offset_x86_64.Uint32Value(kX86_64), kX86_64InstructionAlignment);
-  EXPECT_EQ(offset_mips.Uint32Value(kMips), kMipsInstructionAlignment);
-  EXPECT_EQ(offset_mips64.Uint32Value(kMips64), kMips64InstructionAlignment);
+  CodeOffset offset_thumb2 =
+      CodeOffset::FromOffset(kThumb2InstructionAlignment, InstructionSet::kThumb2);
+  CodeOffset offset_arm64 =
+      CodeOffset::FromOffset(kArm64InstructionAlignment, InstructionSet::kArm64);
+  CodeOffset offset_x86 =
+      CodeOffset::FromOffset(kX86InstructionAlignment, InstructionSet::kX86);
+  CodeOffset offset_x86_64 =
+      CodeOffset::FromOffset(kX86_64InstructionAlignment, InstructionSet::kX86_64);
+  CodeOffset offset_mips =
+      CodeOffset::FromOffset(kMipsInstructionAlignment, InstructionSet::kMips);
+  CodeOffset offset_mips64 =
+      CodeOffset::FromOffset(kMips64InstructionAlignment, InstructionSet::kMips64);
+  EXPECT_EQ(offset_thumb2.Uint32Value(InstructionSet::kThumb2), kThumb2InstructionAlignment);
+  EXPECT_EQ(offset_arm64.Uint32Value(InstructionSet::kArm64), kArm64InstructionAlignment);
+  EXPECT_EQ(offset_x86.Uint32Value(InstructionSet::kX86), kX86InstructionAlignment);
+  EXPECT_EQ(offset_x86_64.Uint32Value(InstructionSet::kX86_64), kX86_64InstructionAlignment);
+  EXPECT_EQ(offset_mips.Uint32Value(InstructionSet::kMips), kMipsInstructionAlignment);
+  EXPECT_EQ(offset_mips64.Uint32Value(InstructionSet::kMips64), kMips64InstructionAlignment);
 }
 
 TEST(StackMapTest, TestDeduplicateStackMask) {
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 9527a60..921d401 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -247,15 +247,15 @@
   ArenaAllocator allocator(&pool);
   switch (isa) {
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64:
+    case InstructionSet::kArm64:
       return arm64::CreateTrampoline(&allocator, abi, offset);
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64:
+    case InstructionSet::kMips64:
       return mips64::CreateTrampoline(&allocator, abi, offset);
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return x86_64::CreateTrampoline(&allocator, offset);
 #endif
     default:
@@ -273,16 +273,16 @@
   ArenaAllocator allocator(&pool);
   switch (isa) {
 #ifdef ART_ENABLE_CODEGEN_arm
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return arm::CreateTrampoline(&allocator, abi, offset);
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips:
+    case InstructionSet::kMips:
       return mips::CreateTrampoline(&allocator, abi, offset);
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86:
+    case InstructionSet::kX86:
       UNUSED(abi);
       return x86::CreateTrampoline(&allocator, offset);
 #endif
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index e239004..c13c9af 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -232,7 +232,7 @@
 class ArmVIXLJNIMacroLabel FINAL
     : public JNIMacroLabelCommon<ArmVIXLJNIMacroLabel,
                                  vixl32::Label,
-                                 kArm> {
+                                 InstructionSet::kArm> {
  public:
   vixl32::Label* AsArm() {
     return AsPlatformLabel();
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index fda87aa..ce39a13 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -235,7 +235,7 @@
 class Arm64JNIMacroLabel FINAL
     : public JNIMacroLabelCommon<Arm64JNIMacroLabel,
                                  vixl::aarch64::Label,
-                                 kArm64> {
+                                 InstructionSet::kArm64> {
  public:
   vixl::aarch64::Label* AsArm64() {
     return AsPlatformLabel();
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 5307d17..655d17d 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -81,7 +81,7 @@
 
   if (toolsdir.empty()) {
     setup_results();
-    toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(kThumb2);
+    toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(InstructionSet::kThumb2);
     SetAndroidData();
   }
 
@@ -215,10 +215,10 @@
                                    is_synchronized,
                                    is_critical_native,
                                    shorty,
-                                   kThumb2));
+                                   InstructionSet::kThumb2));
   std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
       ManagedRuntimeCallingConvention::Create(
-          &allocator, is_static, is_synchronized, shorty, kThumb2));
+          &allocator, is_static, is_synchronized, shorty, InstructionSet::kThumb2));
   const int frame_size(jni_conv->FrameSize());
   ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
 
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 0616b35..3f7691b 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -56,12 +56,12 @@
 
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return MacroAsm32UniquePtr(new (allocator) arm::ArmVIXLJNIMacroAssembler(allocator));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips:
+    case InstructionSet::kMips:
       return MacroAsm32UniquePtr(new (allocator) mips::MipsAssembler(
           allocator,
           instruction_set_features != nullptr
@@ -69,7 +69,7 @@
               : nullptr));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86:
+    case InstructionSet::kX86:
       return MacroAsm32UniquePtr(new (allocator) x86::X86JNIMacroAssembler(allocator));
 #endif
     default:
@@ -91,11 +91,11 @@
 
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64:
+    case InstructionSet::kArm64:
       return MacroAsm64UniquePtr(new (allocator) arm64::Arm64JNIMacroAssembler(allocator));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64:
+    case InstructionSet::kMips64:
       return MacroAsm64UniquePtr(new (allocator) mips64::Mips64Assembler(
           allocator,
           instruction_set_features != nullptr
@@ -103,7 +103,7 @@
               : nullptr));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return MacroAsm64UniquePtr(new (allocator) x86_64::X86_64JNIMacroAssembler(allocator));
 #endif
     default:
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 56eaf19..99219d8 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -171,7 +171,7 @@
 class X86JNIMacroLabel FINAL
     : public JNIMacroLabelCommon<X86JNIMacroLabel,
                                  art::Label,
-                                 kX86> {
+                                 InstructionSet::kX86> {
  public:
   art::Label* AsX86() {
     return AsPlatformLabel();
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index d1a3032..d766ad4 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -197,7 +197,7 @@
 class X86_64JNIMacroLabel FINAL
     : public JNIMacroLabelCommon<X86_64JNIMacroLabel,
                                  art::Label,
-                                 kX86_64> {
+                                 InstructionSet::kX86_64> {
  public:
   art::Label* AsX86_64() {
     return AsPlatformLabel();