ART: Make InstructionSet an enum class and add kLast.

Adding InstructionSet::kLast shall make it easier to encode
the InstructionSet in fewer bits using BitField<>. However,
introducing `kLast` into the `art` namespace is not a good
idea, so we change the InstructionSet to an enum class.
This also uncovered a case of InstructionSet::kNone being
erroneously used instead of vixl32::Condition::None(), so
it's good to remove `kNone` from the `art` namespace.

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Change-Id: I6fa6168dfba4ed6da86d021a69c80224f09997a6
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index 2929f36..60dfdce 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -148,7 +148,7 @@
       } else if (option.starts_with("--instruction-set=")) {
         StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data();
         instruction_set_ = GetInstructionSetFromString(instruction_set_str.data());
-        if (instruction_set_ == kNone) {
+        if (instruction_set_ == InstructionSet::kNone) {
           fprintf(stderr, "Unsupported instruction set %s\n", instruction_set_str.data());
           PrintUsage();
           return false;
@@ -263,7 +263,7 @@
 
         DBG_LOG << "boot_image_location parent_dir_name was " << parent_dir_name;
 
-        if (GetInstructionSetFromString(parent_dir_name.c_str()) != kNone) {
+        if (GetInstructionSetFromString(parent_dir_name.c_str()) != InstructionSet::kNone) {
           *error_msg = "Do not specify the architecture as part of the boot image location";
           return false;
         }
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 866a4d5..29ff235 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -68,7 +68,7 @@
                                              : &Thread::DumpThreadOffset<PointerSize::k32>);
     std::unique_ptr<Disassembler> disasm(Disassembler::Create(isa, opts));
     std::stringstream stream;
-    const uint8_t* base = actual_asm.data() + (isa == kThumb2 ? 1 : 0);
+    const uint8_t* base = actual_asm.data() + (isa == InstructionSet::kThumb2 ? 1 : 0);
     disasm->Dump(stream, base, base + actual_asm.size());
     ReformatAsm(&stream, &lines);
     // Print CFI and assembly interleaved.
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 111469f..fc6a717 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -61,14 +61,14 @@
 
 size_t CompiledCode::CodeDelta(InstructionSet instruction_set) {
   switch (instruction_set) {
-    case kArm:
-    case kArm64:
-    case kMips:
-    case kMips64:
-    case kX86:
-    case kX86_64:
+    case InstructionSet::kArm:
+    case InstructionSet::kArm64:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
       return 0;
-    case kThumb2: {
+    case InstructionSet::kThumb2: {
       // +1 to set the low-order bit so a BLX will switch to Thumb mode
       return 1;
     }
@@ -80,14 +80,14 @@
 
 const void* CompiledCode::CodePointer(const void* code_pointer, InstructionSet instruction_set) {
   switch (instruction_set) {
-    case kArm:
-    case kArm64:
-    case kMips:
-    case kMips64:
-    case kX86:
-    case kX86_64:
+    case InstructionSet::kArm:
+    case InstructionSet::kArm64:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
       return code_pointer;
-    case kThumb2: {
+    case InstructionSet::kThumb2: {
       uintptr_t address = reinterpret_cast<uintptr_t>(code_pointer);
       // Set the low-order bit so a BLX will switch to Thumb mode
       address |= 0x1;
diff --git a/compiler/debug/dwarf/dwarf_test.h b/compiler/debug/dwarf/dwarf_test.h
index b30ff14..5405759 100644
--- a/compiler/debug/dwarf/dwarf_test.h
+++ b/compiler/debug/dwarf/dwarf_test.h
@@ -60,7 +60,8 @@
   template<typename ElfTypes>
   std::vector<std::string> Objdump(const char* args) {
     // Write simple elf file with just the DWARF sections.
-    InstructionSet isa = (sizeof(typename ElfTypes::Addr) == 8) ? kX86_64 : kX86;
+    InstructionSet isa =
+        (sizeof(typename ElfTypes::Addr) == 8) ? InstructionSet::kX86_64 : InstructionSet::kX86;
     ScratchFile file;
     linker::FileOutputStream output_stream(file.GetFile());
     linker::ElfBuilder<ElfTypes> builder(isa, nullptr, &output_stream);
diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h
index 6dacdfa..d0c98a7 100644
--- a/compiler/debug/elf_debug_frame_writer.h
+++ b/compiler/debug/elf_debug_frame_writer.h
@@ -37,8 +37,8 @@
   // debugger that its value in the previous frame is not recoverable.
   bool is64bit = Is64BitInstructionSet(isa);
   switch (isa) {
-    case kArm:
-    case kThumb2: {
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2: {
       dwarf::DebugFrameOpCodeWriter<> opcodes;
       opcodes.DefCFA(Reg::ArmCore(13), 0);  // R13(SP).
       // core registers.
@@ -61,7 +61,7 @@
       WriteCIE(is64bit, return_reg, opcodes, format, buffer);
       return;
     }
-    case kArm64: {
+    case InstructionSet::kArm64: {
       dwarf::DebugFrameOpCodeWriter<> opcodes;
       opcodes.DefCFA(Reg::Arm64Core(31), 0);  // R31(SP).
       // core registers.
@@ -84,8 +84,8 @@
       WriteCIE(is64bit, return_reg, opcodes, format, buffer);
       return;
     }
-    case kMips:
-    case kMips64: {
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64: {
       dwarf::DebugFrameOpCodeWriter<> opcodes;
       opcodes.DefCFA(Reg::MipsCore(29), 0);  // R29(SP).
       // core registers.
@@ -108,7 +108,7 @@
       WriteCIE(is64bit, return_reg, opcodes, format, buffer);
       return;
     }
-    case kX86: {
+    case InstructionSet::kX86: {
       // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296
       constexpr bool generate_opcodes_for_x86_fp = false;
       dwarf::DebugFrameOpCodeWriter<> opcodes;
@@ -134,7 +134,7 @@
       WriteCIE(is64bit, return_reg, opcodes, format, buffer);
       return;
     }
-    case kX86_64: {
+    case InstructionSet::kX86_64: {
       dwarf::DebugFrameOpCodeWriter<> opcodes;
       opcodes.DefCFA(Reg::X86_64Core(4), 8);  // R4(RSP).
       opcodes.Offset(Reg::X86_64Core(16), -8);  // R16(RIP).
@@ -160,7 +160,7 @@
       WriteCIE(is64bit, return_reg, opcodes, format, buffer);
       return;
     }
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   LOG(FATAL) << "Cannot write CIE frame for ISA " << isa;
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 49d52c4..6e72b46 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -68,19 +68,19 @@
     int code_factor_bits_ = 0;
     int dwarf_isa = -1;
     switch (isa) {
-      case kArm:  // arm actually means thumb2.
-      case kThumb2:
+      case InstructionSet::kArm:  // arm actually means thumb2.
+      case InstructionSet::kThumb2:
         code_factor_bits_ = 1;  // 16-bit instuctions
         dwarf_isa = 1;  // DW_ISA_ARM_thumb.
         break;
-      case kArm64:
-      case kMips:
-      case kMips64:
+      case InstructionSet::kArm64:
+      case InstructionSet::kMips:
+      case InstructionSet::kMips64:
         code_factor_bits_ = 2;  // 32-bit instructions
         break;
-      case kNone:
-      case kX86:
-      case kX86_64:
+      case InstructionSet::kNone:
+      case InstructionSet::kX86:
+      case InstructionSet::kX86_64:
         break;
     }
     std::unordered_set<uint64_t> seen_addresses(compilation_unit.methods.size());
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index bf47e8f..bb856b2 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -33,20 +33,20 @@
 
 static Reg GetDwarfCoreReg(InstructionSet isa, int machine_reg) {
   switch (isa) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return Reg::ArmCore(machine_reg);
-    case kArm64:
+    case InstructionSet::kArm64:
       return Reg::Arm64Core(machine_reg);
-    case kX86:
+    case InstructionSet::kX86:
       return Reg::X86Core(machine_reg);
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return Reg::X86_64Core(machine_reg);
-    case kMips:
+    case InstructionSet::kMips:
       return Reg::MipsCore(machine_reg);
-    case kMips64:
+    case InstructionSet::kMips64:
       return Reg::Mips64Core(machine_reg);
-    case kNone:
+    case InstructionSet::kNone:
       LOG(FATAL) << "No instruction set";
   }
   UNREACHABLE();
@@ -54,20 +54,20 @@
 
 static Reg GetDwarfFpReg(InstructionSet isa, int machine_reg) {
   switch (isa) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return Reg::ArmFp(machine_reg);
-    case kArm64:
+    case InstructionSet::kArm64:
       return Reg::Arm64Fp(machine_reg);
-    case kX86:
+    case InstructionSet::kX86:
       return Reg::X86Fp(machine_reg);
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return Reg::X86_64Fp(machine_reg);
-    case kMips:
+    case InstructionSet::kMips:
       return Reg::MipsFp(machine_reg);
-    case kMips64:
+    case InstructionSet::kMips64:
       return Reg::Mips64Fp(machine_reg);
-    case kNone:
+    case InstructionSet::kNone:
       LOG(FATAL) << "No instruction set";
   }
   UNREACHABLE();
@@ -230,7 +230,7 @@
           break;  // the high word is correctly implied by the low word.
         }
       } else if (kind == Kind::kInFpuRegister) {
-        if ((isa == kArm || isa == kThumb2) &&
+        if ((isa == InstructionSet::kArm || isa == InstructionSet::kThumb2) &&
             piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegister &&
             reg_hi.GetValue() == value + 1 && value % 2 == 0) {
           // Translate S register pair to D register (e.g. S4+S5 to D2).
diff --git a/compiler/debug/elf_symtab_writer.h b/compiler/debug/elf_symtab_writer.h
index b37f984..0907e10 100644
--- a/compiler/debug/elf_symtab_writer.h
+++ b/compiler/debug/elf_symtab_writer.h
@@ -89,7 +89,7 @@
     // instructions, so that disassembler tools can correctly disassemble.
     // Note that even if we generate just a single mapping symbol, ARM's Streamline
     // requires it to match function symbol.  Just address 0 does not work.
-    if (info.isa == kThumb2) {
+    if (info.isa == InstructionSet::kThumb2) {
       if (address < mapping_symbol_address || !kGenerateSingleArmMappingSymbol) {
         symtab->Add(strtab->Write("$t"), text, address & ~1, 0, STB_LOCAL, STT_NOTYPE);
         mapping_symbol_address = address;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 7581962..a94dbe9 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -381,9 +381,9 @@
       quicken_data.push_back(static_cast<uint8_t>(info.dex_member_index >> 8));
     }
     InstructionSet instruction_set = driver->GetInstructionSet();
-    if (instruction_set == kThumb2) {
+    if (instruction_set == InstructionSet::kThumb2) {
       // Don't use the thumb2 instruction set to avoid the one off code delta.
-      instruction_set = kArm;
+      instruction_set = InstructionSet::kArm;
     }
     return CompiledMethod::SwapAllocCompiledMethod(
         driver,
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index e1ea630..de481ca 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -31,7 +31,7 @@
   CompilerDriver driver(&compiler_options,
                         &verification_results,
                         Compiler::kOptimizing,
-                        /* instruction_set_ */ kNone,
+                        /* instruction_set_ */ InstructionSet::kNone,
                         /* instruction_set_features */ nullptr,
                         /* image_classes */ nullptr,
                         /* compiled_classes */ nullptr,
@@ -91,7 +91,7 @@
         for (auto&& f : cfi_info) {
           for (auto&& p : patches) {
             compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod(
-                &driver, kNone, c, 0u, 0u, 0u, s, v, f, p));
+                &driver, InstructionSet::kNone, c, 0u, 0u, 0u, s, v, f, p));
           }
         }
       }
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 135f9c7..3d4da5e 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -290,7 +290,8 @@
       verification_results_(verification_results),
       compiler_(Compiler::Create(this, compiler_kind)),
       compiler_kind_(compiler_kind),
-      instruction_set_(instruction_set == kArm ? kThumb2 : instruction_set),
+      instruction_set_(
+          instruction_set == InstructionSet::kArm ? InstructionSet::kThumb2 : instruction_set),
       instruction_set_features_(instruction_set_features),
       requires_constructor_barrier_lock_("constructor barrier lock"),
       non_relative_linker_patch_count_(0u),
@@ -451,13 +452,13 @@
 // GetQuickGenericJniStub allowing down calls that aren't compiled using a JNI compiler?
 static bool InstructionSetHasGenericJniStub(InstructionSet isa) {
   switch (isa) {
-    case kArm:
-    case kArm64:
-    case kThumb2:
-    case kMips:
-    case kMips64:
-    case kX86:
-    case kX86_64: return true;
+    case InstructionSet::kArm:
+    case InstructionSet::kArm64:
+    case InstructionSet::kThumb2:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64: return true;
     default: return false;
   }
 }
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index b434e90..897b50b 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -98,7 +98,7 @@
              static_cast<const void*>(fake_header_code_and_maps_.data() +
                                           (fake_header_code_and_maps_.size() - code_size)));
 
-    if (kRuntimeISA == kArm) {
+    if (kRuntimeISA == InstructionSet::kArm) {
       // Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer().
       CHECK_ALIGNED(stack_maps_offset, 2);
     }
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 5b57718..236b5c0 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -102,13 +102,13 @@
   }
 };
 
-#define TEST_ISA(isa) \
-  TEST_F(JNICFITest, isa) { \
-    std::vector<uint8_t> expected_asm(expected_asm_##isa, \
-        expected_asm_##isa + arraysize(expected_asm_##isa)); \
-    std::vector<uint8_t> expected_cfi(expected_cfi_##isa, \
-        expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
-    TestImpl(isa, #isa, expected_asm, expected_cfi); \
+#define TEST_ISA(isa)                                                 \
+  TEST_F(JNICFITest, isa) {                                           \
+    std::vector<uint8_t> expected_asm(expected_asm_##isa,             \
+        expected_asm_##isa + arraysize(expected_asm_##isa));          \
+    std::vector<uint8_t> expected_cfi(expected_cfi_##isa,             \
+        expected_cfi_##isa + arraysize(expected_cfi_##isa));          \
+    TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi);  \
   }
 
 #ifdef ART_ENABLE_CODEGEN_arm
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 42a5f86..55c27d1 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -54,38 +54,38 @@
     InstructionSet instruction_set) {
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
           new (allocator) arm::ArmManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64:
+    case InstructionSet::kArm64:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
           new (allocator) arm64::Arm64ManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips:
+    case InstructionSet::kMips:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
           new (allocator) mips::MipsManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64:
+    case InstructionSet::kMips64:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
           new (allocator) mips64::Mips64ManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86:
+    case InstructionSet::kX86:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
           new (allocator) x86::X86ManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
           new (allocator) x86_64::X86_64ManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
@@ -156,38 +156,38 @@
                                                                    InstructionSet instruction_set) {
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return std::unique_ptr<JniCallingConvention>(
           new (allocator) arm::ArmJniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64:
+    case InstructionSet::kArm64:
       return std::unique_ptr<JniCallingConvention>(
           new (allocator) arm64::Arm64JniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips:
+    case InstructionSet::kMips:
       return std::unique_ptr<JniCallingConvention>(
           new (allocator) mips::MipsJniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64:
+    case InstructionSet::kMips64:
       return std::unique_ptr<JniCallingConvention>(
           new (allocator) mips64::Mips64JniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86:
+    case InstructionSet::kX86:
       return std::unique_ptr<JniCallingConvention>(
           new (allocator) x86::X86JniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return std::unique_ptr<JniCallingConvention>(
           new (allocator) x86_64::X86_64JniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index e32b681..b3177aa 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -323,7 +323,7 @@
   // Note that we always have outgoing param space available for at least two params.
   if (kUseReadBarrier && is_static && !is_critical_native) {
     const bool kReadBarrierFastPath =
-        (instruction_set != kMips) && (instruction_set != kMips64);
+        (instruction_set != InstructionSet::kMips) && (instruction_set != InstructionSet::kMips64);
     std::unique_ptr<JNIMacroLabel> skip_cold_path_label;
     if (kReadBarrierFastPath) {
       skip_cold_path_label = __ CreateLabel();
@@ -531,7 +531,8 @@
     if (LIKELY(!is_critical_native)) {
       // For normal JNI, store the return value on the stack because the call to
       // JniMethodEnd will clobber the return value. It will be restored in (13).
-      if ((instruction_set == kMips || instruction_set == kMips64) &&
+      if ((instruction_set == InstructionSet::kMips ||
+           instruction_set == InstructionSet::kMips64) &&
           main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
           return_save_location.Uint32Value() % 8 != 0) {
         // Ensure doubles are 8-byte aligned for MIPS
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc
index 3d56833..48747fc 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2.cc
@@ -47,7 +47,7 @@
 constexpr uint32_t kMaxBcondNegativeDisplacement = (1u << 20) - kPcDisplacement;
 
 Thumb2RelativePatcher::Thumb2RelativePatcher(RelativePatcherTargetProvider* provider)
-    : ArmBaseRelativePatcher(provider, kThumb2) {
+    : ArmBaseRelativePatcher(provider, InstructionSet::kThumb2) {
 }
 
 void Thumb2RelativePatcher::PatchCall(std::vector<uint8_t>* code,
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index fe76dfe..2c22a35 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -28,7 +28,7 @@
 
 class Thumb2RelativePatcherTest : public RelativePatcherTest {
  public:
-  Thumb2RelativePatcherTest() : RelativePatcherTest(kThumb2, "default") { }
+  Thumb2RelativePatcherTest() : RelativePatcherTest(InstructionSet::kThumb2, "default") { }
 
  protected:
   static const uint8_t kCallRawCode[];
@@ -173,7 +173,8 @@
       return false;  // No thunk.
     } else {
       uint32_t thunk_end =
-          CompiledCode::AlignCode(method3_offset - sizeof(OatQuickMethodHeader), kThumb2) +
+          CompiledCode::AlignCode(method3_offset - sizeof(OatQuickMethodHeader),
+                                  InstructionSet::kThumb2) +
           MethodCallThunkSize();
       uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
       CHECK_EQ(result3.second, header_offset + sizeof(OatQuickMethodHeader) + 1 /* thumb mode */);
@@ -420,7 +421,8 @@
 
   // Check linked code.
   uint32_t method3_offset = GetMethodOffset(3u);
-  uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2);
+  uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(),
+                                                  InstructionSet::kThumb2);
   uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */);
   ASSERT_EQ(diff & 1u, 0u);
   ASSERT_LT(diff >> 1, 1u << 8);  // Simple encoding, (diff >> 1) fits into 8 bits.
@@ -495,8 +497,7 @@
   ASSERT_TRUE(IsAligned<kArmAlignment>(method3_offset));
   uint32_t method3_header_offset = method3_offset - sizeof(OatQuickMethodHeader);
   uint32_t thunk_size = MethodCallThunkSize();
-  uint32_t thunk_offset =
-      RoundDown(method3_header_offset - thunk_size, GetInstructionSetAlignment(kThumb2));
+  uint32_t thunk_offset = RoundDown(method3_header_offset - thunk_size, kArmAlignment);
   DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size),
             method3_header_offset);
   ASSERT_TRUE(IsAligned<kArmAlignment>(thunk_offset));
@@ -527,7 +528,8 @@
 
   // Check linked code.
   uint32_t method3_offset = GetMethodOffset(3u);
-  uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2);
+  uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(),
+                                                  InstructionSet::kThumb2);
   uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */);
   ASSERT_EQ(diff & 1u, 0u);
   ASSERT_LT(diff >> 1, 1u << 8);  // Simple encoding, (diff >> 1) fits into 8 bits.
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 663e43b..52a0796 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -76,7 +76,8 @@
   if (num_adrp == 0u) {
     return 0u;
   }
-  uint32_t alignment_bytes = CompiledMethod::AlignCode(code_size, kArm64) - code_size;
+  uint32_t alignment_bytes =
+      CompiledMethod::AlignCode(code_size, InstructionSet::kArm64) - code_size;
   return kAdrpThunkSize * num_adrp + alignment_bytes;
 }
 
@@ -84,7 +85,7 @@
 
 Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherTargetProvider* provider,
                                            const Arm64InstructionSetFeatures* features)
-    : ArmBaseRelativePatcher(provider, kArm64),
+    : ArmBaseRelativePatcher(provider, InstructionSet::kArm64),
       fix_cortex_a53_843419_(features->NeedFixCortexA53_843419()),
       reserved_adrp_thunks_(0u),
       processed_adrp_thunks_(0u) {
@@ -105,7 +106,8 @@
   // Add thunks for previous method if any.
   if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) {
     size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_;
-    offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks;
+    offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64) +
+             kAdrpThunkSize * num_adrp_thunks;
     reserved_adrp_thunks_ = adrp_thunk_locations_.size();
   }
 
@@ -149,7 +151,8 @@
     // Add thunks for the last method if any.
     if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) {
       size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_;
-      offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks;
+      offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64) +
+               kAdrpThunkSize * num_adrp_thunks;
       reserved_adrp_thunks_ = adrp_thunk_locations_.size();
     }
   }
@@ -159,7 +162,7 @@
 uint32_t Arm64RelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) {
   if (fix_cortex_a53_843419_) {
     if (!current_method_thunks_.empty()) {
-      uint32_t aligned_offset = CompiledMethod::AlignCode(offset, kArm64);
+      uint32_t aligned_offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64);
       if (kIsDebugBuild) {
         CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize);
         size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize;
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 8a5b4cc..05459a2 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -29,7 +29,7 @@
 class Arm64RelativePatcherTest : public RelativePatcherTest {
  public:
   explicit Arm64RelativePatcherTest(const std::string& variant)
-      : RelativePatcherTest(kArm64, variant) { }
+      : RelativePatcherTest(InstructionSet::kArm64, variant) { }
 
  protected:
   static const uint8_t kCallRawCode[];
@@ -153,7 +153,8 @@
     // There may be a thunk before method2.
     if (last_result.second != last_method_offset) {
       // Thunk present. Check that there's only one.
-      uint32_t thunk_end = CompiledCode::AlignCode(gap_end, kArm64) + MethodCallThunkSize();
+      uint32_t thunk_end =
+          CompiledCode::AlignCode(gap_end, InstructionSet::kArm64) + MethodCallThunkSize();
       uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
       CHECK_EQ(last_result.second, header_offset + sizeof(OatQuickMethodHeader));
     }
@@ -347,7 +348,8 @@
     CHECK_EQ(compiled_method_refs_[0].index, 1u);
     CHECK_EQ(compiled_method_refs_.size(), compiled_methods_.size());
     uint32_t method1_size = compiled_methods_[0]->GetQuickCode().size();
-    uint32_t thunk_offset = CompiledCode::AlignCode(method1_offset + method1_size, kArm64);
+    uint32_t thunk_offset =
+        CompiledCode::AlignCode(method1_offset + method1_size, InstructionSet::kArm64);
     uint32_t b_diff = thunk_offset - (method1_offset + num_nops * 4u);
     CHECK_ALIGNED(b_diff, 4u);
     ASSERT_LT(b_diff, 128 * MB);
@@ -602,7 +604,7 @@
 
   // Check linked code.
   uint32_t thunk_offset =
-      CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
+      CompiledCode::AlignCode(last_method_offset + last_method_code.size(), InstructionSet::kArm64);
   uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
   CHECK_ALIGNED(diff, 4u);
   ASSERT_LT(diff, 128 * MB);
@@ -688,8 +690,7 @@
   ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_offset));
   uint32_t last_method_header_offset = last_method_offset - sizeof(OatQuickMethodHeader);
   uint32_t thunk_size = MethodCallThunkSize();
-  uint32_t thunk_offset =
-      RoundDown(last_method_header_offset - thunk_size, GetInstructionSetAlignment(kArm64));
+  uint32_t thunk_offset = RoundDown(last_method_header_offset - thunk_size, kArm64Alignment);
   DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size),
             last_method_header_offset);
   uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1);
@@ -721,7 +722,7 @@
 
   // Check linked code.
   uint32_t thunk_offset =
-      CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
+      CompiledCode::AlignCode(last_method_offset + last_method_code.size(), InstructionSet::kArm64);
   uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
   CHECK_ALIGNED(diff, 4u);
   ASSERT_LT(diff, 128 * MB);
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 7941237..b30b55e 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -417,10 +417,10 @@
                     InstructionSet isa,
                     const InstructionSetFeatures* features)
         : Section(owner, name, type, flags, link, info, align, entsize) {
-      if (isa == kMips || isa == kMips64) {
+      if (isa == InstructionSet::kMips || isa == InstructionSet::kMips64) {
         bool fpu32 = false;    // assume mips64 values
         uint8_t isa_rev = 6;   // assume mips64 values
-        if (isa == kMips) {
+        if (isa == InstructionSet::kMips) {
           // adjust for mips32 values
           fpu32 = features->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint();
           isa_rev = features->AsMipsInstructionSetFeatures()->IsR6()
@@ -430,14 +430,15 @@
                   : 1;
         }
         abiflags_.version = 0;  // version of flags structure
-        abiflags_.isa_level = (isa == kMips) ? 32 : 64;
+        abiflags_.isa_level = (isa == InstructionSet::kMips) ? 32 : 64;
         abiflags_.isa_rev = isa_rev;
-        abiflags_.gpr_size = (isa == kMips) ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64;
+        abiflags_.gpr_size = (isa == InstructionSet::kMips) ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64;
         abiflags_.cpr1_size = fpu32 ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64;
         abiflags_.cpr2_size = MIPS_AFL_REG_NONE;
         // Set the fp_abi to MIPS_ABI_FP_64A for mips32 with 64-bit FPUs (ie: mips32 R5 and R6).
         // Otherwise set to MIPS_ABI_FP_DOUBLE.
-        abiflags_.fp_abi = (isa == kMips && !fpu32) ? MIPS_ABI_FP_64A : MIPS_ABI_FP_DOUBLE;
+        abiflags_.fp_abi =
+            (isa == InstructionSet::kMips && !fpu32) ? MIPS_ABI_FP_64A : MIPS_ABI_FP_DOUBLE;
         abiflags_.isa_ext = 0;
         abiflags_.ases = 0;
         // To keep the code simple, we are not using odd FP reg for single floats for both
@@ -689,7 +690,7 @@
     Elf_Word bss_address = RoundUp(text_address + text_size, kPageSize);
     Elf_Word abiflags_address = RoundUp(bss_address + bss_size, kPageSize);
     Elf_Word abiflags_size = 0;
-    if (isa_ == kMips || isa_ == kMips64) {
+    if (isa_ == InstructionSet::kMips || isa_ == InstructionSet::kMips64) {
       abiflags_size = abiflags_.GetSize();
     }
     Elf_Word dynstr_address = RoundUp(abiflags_address + abiflags_size, kPageSize);
@@ -835,29 +836,29 @@
   static Elf_Ehdr MakeElfHeader(InstructionSet isa, const InstructionSetFeatures* features) {
     Elf_Ehdr elf_header = Elf_Ehdr();
     switch (isa) {
-      case kArm:
+      case InstructionSet::kArm:
         // Fall through.
-      case kThumb2: {
+      case InstructionSet::kThumb2: {
         elf_header.e_machine = EM_ARM;
         elf_header.e_flags = EF_ARM_EABI_VER5;
         break;
       }
-      case kArm64: {
+      case InstructionSet::kArm64: {
         elf_header.e_machine = EM_AARCH64;
         elf_header.e_flags = 0;
         break;
       }
-      case kX86: {
+      case InstructionSet::kX86: {
         elf_header.e_machine = EM_386;
         elf_header.e_flags = 0;
         break;
       }
-      case kX86_64: {
+      case InstructionSet::kX86_64: {
         elf_header.e_machine = EM_X86_64;
         elf_header.e_flags = 0;
         break;
       }
-      case kMips: {
+      case InstructionSet::kMips: {
         elf_header.e_machine = EM_MIPS;
         elf_header.e_flags = (EF_MIPS_NOREORDER |
                               EF_MIPS_PIC       |
@@ -868,7 +869,7 @@
                                    : EF_MIPS_ARCH_32R2));
         break;
       }
-      case kMips64: {
+      case InstructionSet::kMips64: {
         elf_header.e_machine = EM_MIPS;
         elf_header.e_flags = (EF_MIPS_NOREORDER |
                               EF_MIPS_PIC       |
@@ -876,7 +877,7 @@
                               EF_MIPS_ARCH_64R6);
         break;
       }
-      case kNone: {
+      case InstructionSet::kNone: {
         LOG(FATAL) << "No instruction set";
         break;
       }
diff --git a/compiler/linker/mips/relative_patcher_mips32r6_test.cc b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
index 586e2aa..629fdd5 100644
--- a/compiler/linker/mips/relative_patcher_mips32r6_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
@@ -22,7 +22,7 @@
 
 class Mips32r6RelativePatcherTest : public RelativePatcherTest {
  public:
-  Mips32r6RelativePatcherTest() : RelativePatcherTest(kMips, "mips32r6") {}
+  Mips32r6RelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips, "mips32r6") {}
 
  protected:
   static const uint8_t kUnpatchedPcRelativeRawCode[];
diff --git a/compiler/linker/mips/relative_patcher_mips_test.cc b/compiler/linker/mips/relative_patcher_mips_test.cc
index ebe5406..d876c76 100644
--- a/compiler/linker/mips/relative_patcher_mips_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips_test.cc
@@ -23,7 +23,7 @@
 
 class MipsRelativePatcherTest : public RelativePatcherTest {
  public:
-  MipsRelativePatcherTest() : RelativePatcherTest(kMips, "mips32r2") {}
+  MipsRelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips, "mips32r2") {}
 
  protected:
   static const uint8_t kUnpatchedPcRelativeRawCode[];
diff --git a/compiler/linker/mips64/relative_patcher_mips64_test.cc b/compiler/linker/mips64/relative_patcher_mips64_test.cc
index 4edcae7..a02f500 100644
--- a/compiler/linker/mips64/relative_patcher_mips64_test.cc
+++ b/compiler/linker/mips64/relative_patcher_mips64_test.cc
@@ -23,7 +23,7 @@
 
 class Mips64RelativePatcherTest : public RelativePatcherTest {
  public:
-  Mips64RelativePatcherTest() : RelativePatcherTest(kMips64, "default") {}
+  Mips64RelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips64, "default") {}
 
  protected:
   static const uint8_t kUnpatchedPcRelativeRawCode[];
diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc
index dc15bb0..13877f8 100644
--- a/compiler/linker/relative_patcher.cc
+++ b/compiler/linker/relative_patcher.cc
@@ -95,31 +95,31 @@
   UNUSED(provider);
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86:
+    case InstructionSet::kX86:
       return std::unique_ptr<RelativePatcher>(new X86RelativePatcher());
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return std::unique_ptr<RelativePatcher>(new X86_64RelativePatcher());
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm
-    case kArm:
+    case InstructionSet::kArm:
       // Fall through: we generate Thumb2 code for "arm".
-    case kThumb2:
+    case InstructionSet::kThumb2:
       return std::unique_ptr<RelativePatcher>(new Thumb2RelativePatcher(provider));
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64:
+    case InstructionSet::kArm64:
       return std::unique_ptr<RelativePatcher>(
           new Arm64RelativePatcher(provider, features->AsArm64InstructionSetFeatures()));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips:
+    case InstructionSet::kMips:
       return std::unique_ptr<RelativePatcher>(
           new MipsRelativePatcher(features->AsMipsInstructionSetFeatures()));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64:
+    case InstructionSet::kMips64:
       return std::unique_ptr<RelativePatcher>(new Mips64RelativePatcher());
 #endif
     default:
diff --git a/compiler/linker/x86/relative_patcher_x86_test.cc b/compiler/linker/x86/relative_patcher_x86_test.cc
index 4f74cee..b855dec 100644
--- a/compiler/linker/x86/relative_patcher_x86_test.cc
+++ b/compiler/linker/x86/relative_patcher_x86_test.cc
@@ -23,7 +23,7 @@
 
 class X86RelativePatcherTest : public RelativePatcherTest {
  public:
-  X86RelativePatcherTest() : RelativePatcherTest(kX86, "default") { }
+  X86RelativePatcherTest() : RelativePatcherTest(InstructionSet::kX86, "default") { }
 
  protected:
   static const uint8_t kCallRawCode[];
diff --git a/compiler/linker/x86_64/relative_patcher_x86_64_test.cc b/compiler/linker/x86_64/relative_patcher_x86_64_test.cc
index ae17aa7..6baa92d 100644
--- a/compiler/linker/x86_64/relative_patcher_x86_64_test.cc
+++ b/compiler/linker/x86_64/relative_patcher_x86_64_test.cc
@@ -23,7 +23,7 @@
 
 class X86_64RelativePatcherTest : public RelativePatcherTest {
  public:
-  X86_64RelativePatcherTest() : RelativePatcherTest(kX86_64, "default") { }
+  X86_64RelativePatcherTest() : RelativePatcherTest(InstructionSet::kX86_64, "default") { }
 
  protected:
   static const uint8_t kCallRawCode[];
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index b8d1f52..5625f04 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -786,43 +786,43 @@
   ArenaAllocator* allocator = graph->GetAllocator();
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm
-    case kArm:
-    case kThumb2: {
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2: {
       return std::unique_ptr<CodeGenerator>(
           new (allocator) arm::CodeGeneratorARMVIXL(
               graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats));
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64: {
+    case InstructionSet::kArm64: {
       return std::unique_ptr<CodeGenerator>(
           new (allocator) arm64::CodeGeneratorARM64(
               graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats));
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips: {
+    case InstructionSet::kMips: {
       return std::unique_ptr<CodeGenerator>(
           new (allocator) mips::CodeGeneratorMIPS(
               graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats));
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64: {
+    case InstructionSet::kMips64: {
       return std::unique_ptr<CodeGenerator>(
           new (allocator) mips64::CodeGeneratorMIPS64(
               graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats));
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86: {
+    case InstructionSet::kX86: {
       return std::unique_ptr<CodeGenerator>(
           new (allocator) x86::CodeGeneratorX86(
               graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats));
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64: {
+    case InstructionSet::kX86_64: {
       return std::unique_ptr<CodeGenerator>(
           new (allocator) x86_64::CodeGeneratorX86_64(
               graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats));
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 64c88eb..18ad60d 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -626,7 +626,7 @@
 
   bool CallPushesPC() const {
     InstructionSet instruction_set = GetInstructionSet();
-    return instruction_set == kX86 || instruction_set == kX86_64;
+    return instruction_set == InstructionSet::kX86 || instruction_set == InstructionSet::kX86_64;
   }
 
   // Arm64 has its own type for a label, so we need to templatize these methods
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c7811ab..e01b7b7 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1557,12 +1557,13 @@
   MacroAssembler* masm = GetVIXLAssembler();
   __ Bind(&frame_entry_label_);
 
-  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
+  bool do_overflow_check =
+      FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm64) || !IsLeafMethod();
   if (do_overflow_check) {
     UseScratchRegisterScope temps(masm);
     Register temp = temps.AcquireX();
     DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
-    __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
+    __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kArm64)));
     {
       // Ensure that between load and RecordPcInfo there are no pools emitted.
       ExactAssemblyScope eas(GetVIXLAssembler(),
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 90f3ae8..edd3072 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2568,7 +2568,7 @@
   if (!skip_overflow_check) {
     UseScratchRegisterScope temps(GetVIXLAssembler());
     vixl32::Register temp = temps.Acquire();
-    __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(kArm)));
+    __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(InstructionSet::kArm)));
     // The load must immediately precede RecordPcInfo.
     ExactAssemblyScope aas(GetVIXLAssembler(),
                            vixl32::kMaxInstructionSizeInBytes,
@@ -5303,7 +5303,7 @@
   vixl32::Label less, greater, done;
   vixl32::Label* final_label = codegen_->GetFinalLabel(compare, &done);
   DataType::Type type = compare->InputAt(0)->GetType();
-  vixl32::Condition less_cond = vixl32::Condition(kNone);
+  vixl32::Condition less_cond = vixl32::Condition::None();
   switch (type) {
     case DataType::Type::kBool:
     case DataType::Type::kUint8:
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 2f65e8c..b3fed07 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1132,7 +1132,7 @@
   StackMapStream* stack_map_stream = GetStackMapStream();
   for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
     uint32_t old_position =
-        stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
+        stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips);
     uint32_t new_position = __ GetAdjustedPosition(old_position);
     DCHECK_GE(new_position, old_position);
     stack_map_stream->SetStackMapNativePcOffset(i, new_position);
@@ -1347,13 +1347,14 @@
 void CodeGeneratorMIPS::GenerateFrameEntry() {
   __ Bind(&frame_entry_label_);
 
-  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips) || !IsLeafMethod();
+  bool do_overflow_check =
+      FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips) || !IsLeafMethod();
 
   if (do_overflow_check) {
     __ LoadFromOffset(kLoadWord,
                       ZERO,
                       SP,
-                      -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips)));
+                      -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips)));
     RecordPcInfo(nullptr, 0);
   }
 
@@ -1365,8 +1366,9 @@
   }
 
   // Make sure the frame size isn't unreasonably large.
-  if (GetFrameSize() > GetStackOverflowReservedBytes(kMips)) {
-    LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips) << " bytes";
+  if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips)) {
+    LOG(FATAL) << "Stack frame larger than "
+        << GetStackOverflowReservedBytes(InstructionSet::kMips) << " bytes";
   }
 
   // Spill callee-saved registers.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 6cbfa14..53a7f26 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1076,7 +1076,7 @@
   StackMapStream* stack_map_stream = GetStackMapStream();
   for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
     uint32_t old_position =
-        stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64);
+        stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips64);
     uint32_t new_position = __ GetAdjustedPosition(old_position);
     DCHECK_GE(new_position, old_position);
     stack_map_stream->SetStackMapNativePcOffset(i, new_position);
@@ -1161,13 +1161,15 @@
 void CodeGeneratorMIPS64::GenerateFrameEntry() {
   __ Bind(&frame_entry_label_);
 
-  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
+  bool do_overflow_check =
+      FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips64) || !IsLeafMethod();
 
   if (do_overflow_check) {
-    __ LoadFromOffset(kLoadWord,
-                      ZERO,
-                      SP,
-                      -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
+    __ LoadFromOffset(
+        kLoadWord,
+        ZERO,
+        SP,
+        -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips64)));
     RecordPcInfo(nullptr, 0);
   }
 
@@ -1176,8 +1178,9 @@
   }
 
   // Make sure the frame size isn't unreasonably large.
-  if (GetFrameSize() > GetStackOverflowReservedBytes(kMips64)) {
-    LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips64) << " bytes";
+  if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips64)) {
+    LOG(FATAL) << "Stack frame larger than "
+        << GetStackOverflowReservedBytes(InstructionSet::kMips64) << " bytes";
   }
 
   // Spill callee-saved registers.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 44614e1..f84dd00 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1072,7 +1072,8 @@
   DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
 
   if (!skip_overflow_check) {
-    __ testl(EAX, Address(ESP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86))));
+    size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86);
+    __ testl(EAX, Address(ESP, -static_cast<int32_t>(reserved_bytes)));
     RecordPcInfo(nullptr, 0);
   }
 
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 259bb4a..16d1f18 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1277,8 +1277,8 @@
   DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
 
   if (!skip_overflow_check) {
-    __ testq(CpuRegister(RAX), Address(
-        CpuRegister(RSP), -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86_64))));
+    size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86_64);
+    __ testq(CpuRegister(RAX), Address(CpuRegister(RSP), -static_cast<int32_t>(reserved_bytes)));
     RecordPcInfo(nullptr, 0);
   }
 
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index e35c7c7..ba431a5 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -44,22 +44,22 @@
   ::std::vector<CodegenTargetConfig> test_config_candidates = {
 #ifdef ART_ENABLE_CODEGEN_arm
     // TODO: Should't this be `kThumb2` instead of `kArm` here?
-    CodegenTargetConfig(kArm, create_codegen_arm_vixl32),
+    CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32),
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    CodegenTargetConfig(kArm64, create_codegen_arm64),
+    CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64),
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    CodegenTargetConfig(kX86, create_codegen_x86),
+    CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86),
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    CodegenTargetConfig(kX86_64, create_codegen_x86_64),
+    CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64),
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    CodegenTargetConfig(kMips, create_codegen_mips),
+    CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips),
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    CodegenTargetConfig(kMips64, create_codegen_mips64)
+    CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64)
 #endif
   };
 
@@ -825,7 +825,7 @@
 TEST_F(CodegenTest, MipsClobberRA) {
   std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
       MipsInstructionSetFeatures::FromCppDefines());
-  if (!CanExecute(kMips) || features_mips->IsR6()) {
+  if (!CanExecute(InstructionSet::kMips) || features_mips->IsR6()) {
     // HMipsComputeBaseMethodAddress and the NAL instruction behind it
     // should only be generated on non-R6.
     return;
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index bcbcc12..c41c290 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -207,7 +207,7 @@
 static bool CanExecuteOnHardware(InstructionSet target_isa) {
   return (target_isa == kRuntimeISA)
       // Handle the special case of ARM, with two instructions sets (ARM32 and Thumb-2).
-      || (kRuntimeISA == kArm && target_isa == kThumb2);
+      || (kRuntimeISA == InstructionSet::kArm && target_isa == InstructionSet::kThumb2);
 }
 
 static bool CanExecute(InstructionSet target_isa) {
@@ -271,7 +271,7 @@
   typedef Expected (*fptr)();
   CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
   fptr f = reinterpret_cast<fptr>(allocator.GetMemory());
-  if (target_isa == kThumb2) {
+  if (target_isa == InstructionSet::kThumb2) {
     // For thumb we need the bottom bit set.
     f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1);
   }
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 102acb3..ed2f8e9 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -342,7 +342,7 @@
 }
 
 inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
-  DCHECK(HasShifterOperand(instruction, kArm64));
+  DCHECK(HasShifterOperand(instruction, InstructionSet::kArm64));
   // Although the `neg` instruction is an alias of the `sub` instruction, `HNeg`
   // does *not* support extension. This is because the `extended register` form
   // of the `sub` instruction interprets the left register with code 31 as the
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index f7fd910..12c6988 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -153,7 +153,7 @@
     }
 
     const uint8_t* base = disassembler_->GetDisassemblerOptions()->base_address_;
-    if (instruction_set_ == kThumb2) {
+    if (instruction_set_ == InstructionSet::kThumb2) {
       // ARM and Thumb-2 use the same disassembler. The bottom bit of the
       // address is used to distinguish between the two.
       base += 1;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 189d5ae..2bd2d5f 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -250,7 +250,7 @@
   DataType::Type type = mul->GetPackedType();
   InstructionSet isa = codegen_->GetInstructionSet();
   switch (isa) {
-    case kArm64:
+    case InstructionSet::kArm64:
       if (!(type == DataType::Type::kUint8 ||
             type == DataType::Type::kInt8 ||
             type == DataType::Type::kUint16 ||
@@ -259,8 +259,8 @@
         return false;
       }
       break;
-    case kMips:
-    case kMips64:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
       if (!(type == DataType::Type::kUint8 ||
             type == DataType::Type::kInt8 ||
             type == DataType::Type::kUint16 ||
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 9422f9f..d41e49a 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -84,7 +84,7 @@
 bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* use,
                                                                  HInstruction* bitfield_op,
                                                                  bool do_merge) {
-  DCHECK(HasShifterOperand(use, kArm));
+  DCHECK(HasShifterOperand(use, InstructionSet::kArm));
   DCHECK(use->IsBinaryOperation());
   DCHECK(CanFitInShifterOperand(bitfield_op));
   DCHECK(!bitfield_op->HasEnvironmentUses());
@@ -166,7 +166,7 @@
   // Check whether we can merge the instruction in all its users' shifter operand.
   for (const HUseListNode<HInstruction*>& use : uses) {
     HInstruction* user = use.GetUser();
-    if (!HasShifterOperand(user, kArm)) {
+    if (!HasShifterOperand(user, InstructionSet::kArm)) {
       return false;
     }
     if (!CanMergeIntoShifterOperand(user, bitfield_op)) {
@@ -242,7 +242,7 @@
 }
 
 void InstructionSimplifierArmVisitor::VisitMul(HMul* instruction) {
-  if (TryCombineMultiplyAccumulate(instruction, kArm)) {
+  if (TryCombineMultiplyAccumulate(instruction, InstructionSet::kArm)) {
     RecordSimplification();
   }
 }
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index c0ab68f..69e1463 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -90,7 +90,7 @@
 bool InstructionSimplifierArm64Visitor::TryMergeIntoShifterOperand(HInstruction* use,
                                                                    HInstruction* bitfield_op,
                                                                    bool do_merge) {
-  DCHECK(HasShifterOperand(use, kArm64));
+  DCHECK(HasShifterOperand(use, InstructionSet::kArm64));
   DCHECK(use->IsBinaryOperation() || use->IsNeg());
   DCHECK(CanFitInShifterOperand(bitfield_op));
   DCHECK(!bitfield_op->HasEnvironmentUses());
@@ -170,7 +170,7 @@
   // Check whether we can merge the instruction in all its users' shifter operand.
   for (const HUseListNode<HInstruction*>& use : uses) {
     HInstruction* user = use.GetUser();
-    if (!HasShifterOperand(user, kArm64)) {
+    if (!HasShifterOperand(user, InstructionSet::kArm64)) {
       return false;
     }
     if (!CanMergeIntoShifterOperand(user, bitfield_op)) {
@@ -218,7 +218,7 @@
 }
 
 void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) {
-  if (TryCombineMultiplyAccumulate(instruction, kArm64)) {
+  if (TryCombineMultiplyAccumulate(instruction, InstructionSet::kArm64)) {
     RecordSimplification();
   }
 }
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 1c13084..ccdcb35 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -90,13 +90,13 @@
 bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) {
   DataType::Type type = mul->GetType();
   switch (isa) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       if (type != DataType::Type::kInt32) {
         return false;
       }
       break;
-    case kArm64:
+    case InstructionSet::kArm64:
       if (!DataType::IsIntOrLongType(type)) {
         return false;
       }
@@ -148,7 +148,7 @@
         mul->GetBlock()->RemoveInstruction(mul);
         return true;
       }
-    } else if (use->IsNeg() && isa != kArm) {
+    } else if (use->IsNeg() && isa != InstructionSet::kArm) {
       HMultiplyAccumulate* mulacc =
           new (allocator) HMultiplyAccumulate(type,
                                               HInstruction::kSub,
diff --git a/compiler/optimizing/instruction_simplifier_shared.h b/compiler/optimizing/instruction_simplifier_shared.h
index b016a87..758fc76 100644
--- a/compiler/optimizing/instruction_simplifier_shared.h
+++ b/compiler/optimizing/instruction_simplifier_shared.h
@@ -41,7 +41,8 @@
 inline bool HasShifterOperand(HInstruction* instr, InstructionSet isa) {
   // On ARM64 `neg` instructions are an alias of `sub` using the zero register
   // as the first register input.
-  bool res = instr->IsAdd() || instr->IsAnd() || (isa == kArm64 && instr->IsNeg()) ||
+  bool res = instr->IsAdd() || instr->IsAnd() ||
+      (isa == InstructionSet::kArm64 && instr->IsNeg()) ||
       instr->IsOr() || instr->IsSub() || instr->IsXor();
   return res;
 }
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 74de077..c672dae 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -1414,8 +1414,8 @@
 
 uint32_t HLoopOptimization::GetVectorSizeInBytes() {
   switch (compiler_driver_->GetInstructionSet()) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return 8;  // 64-bit SIMD
     default:
       return 16;  // 128-bit SIMD
@@ -1425,8 +1425,8 @@
 bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) {
   const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures();
   switch (compiler_driver_->GetInstructionSet()) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       // Allow vectorization for all ARM devices, because Android assumes that
       // ARM 32-bit always supports advanced SIMD (64-bit SIMD).
       switch (type) {
@@ -1446,7 +1446,7 @@
           break;
       }
       return false;
-    case kArm64:
+    case InstructionSet::kArm64:
       // Allow vectorization for all ARM devices, because Android assumes that
       // ARMv8 AArch64 always supports advanced SIMD (128-bit SIMD).
       switch (type) {
@@ -1474,8 +1474,8 @@
         default:
           return false;
       }
-    case kX86:
-    case kX86_64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
       // Allow vectorization for SSE4.1-enabled X86 devices only (128-bit SIMD).
       if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) {
         switch (type) {
@@ -1506,7 +1506,7 @@
         }  // switch type
       }
       return false;
-    case kMips:
+    case InstructionSet::kMips:
       if (features->AsMipsInstructionSetFeatures()->HasMsa()) {
         switch (type) {
           case DataType::Type::kBool:
@@ -1535,7 +1535,7 @@
         }  // switch type
       }
       return false;
-    case kMips64:
+    case InstructionSet::kMips64:
       if (features->AsMips64InstructionSetFeatures()->HasMsa()) {
         switch (type) {
           case DataType::Type::kBool:
@@ -2170,7 +2170,7 @@
 uint32_t HLoopOptimization::GetUnrollingFactor(HBasicBlock* block, int64_t trip_count) {
   uint32_t max_peel = MaxNumberPeeled();
   switch (compiler_driver_->GetInstructionSet()) {
-    case kArm64: {
+    case InstructionSet::kArm64: {
       // Don't unroll with insufficient iterations.
       // TODO: Unroll loops with unknown trip count.
       DCHECK_NE(vector_length_, 0u);
@@ -2192,8 +2192,8 @@
       DCHECK_GE(unroll_factor, 1u);
       return unroll_factor;
     }
-    case kX86:
-    case kX86_64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
     default:
       return kNoUnrollingFactor;
   }
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index b7380b0..4ad2996 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -153,15 +153,15 @@
   InternalCodeAllocator code_allocator_;
 };
 
-#define TEST_ISA(isa)                                         \
-  TEST_F(OptimizingCFITest, isa) {                            \
-    std::vector<uint8_t> expected_asm(                        \
-        expected_asm_##isa,                                   \
-        expected_asm_##isa + arraysize(expected_asm_##isa));  \
-    std::vector<uint8_t> expected_cfi(                        \
-        expected_cfi_##isa,                                   \
-        expected_cfi_##isa + arraysize(expected_cfi_##isa));  \
-    TestImpl(isa, #isa, expected_asm, expected_cfi);          \
+#define TEST_ISA(isa)                                                 \
+  TEST_F(OptimizingCFITest, isa) {                                    \
+    std::vector<uint8_t> expected_asm(                                \
+        expected_asm_##isa,                                           \
+        expected_asm_##isa + arraysize(expected_asm_##isa));          \
+    std::vector<uint8_t> expected_cfi(                                \
+        expected_cfi_##isa,                                           \
+        expected_cfi_##isa + arraysize(expected_cfi_##isa));          \
+    TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi);  \
   }
 
 #ifdef ART_ENABLE_CODEGEN_arm
@@ -204,7 +204,7 @@
   std::vector<uint8_t> expected_cfi(
       expected_cfi_kThumb2_adjust,
       expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust));
-  SetUpFrame(kThumb2);
+  SetUpFrame(InstructionSet::kThumb2);
 #define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \
     ->GetAssembler())->GetVIXLAssembler()->
   vixl32::Label target;
@@ -216,7 +216,7 @@
   __ Bind(&target);
 #undef __
   Finish();
-  Check(kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
+  Check(InstructionSet::kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
 }
 #endif
 
@@ -235,7 +235,7 @@
   std::vector<uint8_t> expected_cfi(
       expected_cfi_kMips_adjust,
       expected_cfi_kMips_adjust + arraysize(expected_cfi_kMips_adjust));
-  SetUpFrame(kMips);
+  SetUpFrame(InstructionSet::kMips);
 #define __ down_cast<mips::MipsAssembler*>(GetCodeGenerator()->GetAssembler())->
   mips::MipsLabel target;
   __ Beqz(mips::A0, &target);
@@ -246,7 +246,7 @@
   __ Bind(&target);
 #undef __
   Finish();
-  Check(kMips, "kMips_adjust", expected_asm, expected_cfi);
+  Check(InstructionSet::kMips, "kMips_adjust", expected_asm, expected_cfi);
 }
 #endif
 
@@ -265,7 +265,7 @@
   std::vector<uint8_t> expected_cfi(
       expected_cfi_kMips64_adjust,
       expected_cfi_kMips64_adjust + arraysize(expected_cfi_kMips64_adjust));
-  SetUpFrame(kMips64);
+  SetUpFrame(InstructionSet::kMips64);
 #define __ down_cast<mips64::Mips64Assembler*>(GetCodeGenerator()->GetAssembler())->
   mips64::Mips64Label target;
   __ Beqc(mips64::A1, mips64::A2, &target);
@@ -276,7 +276,7 @@
   __ Bind(&target);
 #undef __
   Finish();
-  Check(kMips64, "kMips64_adjust", expected_asm, expected_cfi);
+  Check(InstructionSet::kMips64, "kMips64_adjust", expected_asm, expected_cfi);
 }
 #endif
 
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 29319f8..9233eb5 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -437,13 +437,13 @@
 }
 
 static bool IsInstructionSetSupported(InstructionSet instruction_set) {
-  return instruction_set == kArm
-      || instruction_set == kArm64
-      || instruction_set == kThumb2
-      || instruction_set == kMips
-      || instruction_set == kMips64
-      || instruction_set == kX86
-      || instruction_set == kX86_64;
+  return instruction_set == InstructionSet::kArm
+      || instruction_set == InstructionSet::kArm64
+      || instruction_set == InstructionSet::kThumb2
+      || instruction_set == InstructionSet::kMips
+      || instruction_set == InstructionSet::kMips64
+      || instruction_set == InstructionSet::kX86
+      || instruction_set == InstructionSet::kX86_64;
 }
 
 // Strip pass name suffix to get optimization name.
@@ -637,8 +637,8 @@
   ArenaAllocator* allocator = graph->GetAllocator();
   switch (instruction_set) {
 #if defined(ART_ENABLE_CODEGEN_arm)
-    case kThumb2:
-    case kArm: {
+    case InstructionSet::kThumb2:
+    case InstructionSet::kArm: {
       arm::InstructionSimplifierArm* simplifier =
           new (allocator) arm::InstructionSimplifierArm(graph, stats);
       SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -657,7 +657,7 @@
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64: {
+    case InstructionSet::kArm64: {
       arm64::InstructionSimplifierArm64* simplifier =
           new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
       SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -676,7 +676,7 @@
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips: {
+    case InstructionSet::kMips: {
       mips::InstructionSimplifierMips* simplifier =
           new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
       SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -695,7 +695,7 @@
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64: {
+    case InstructionSet::kMips64: {
       SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
       GVNOptimization* gvn =
           new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -708,7 +708,7 @@
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86: {
+    case InstructionSet::kX86: {
       SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
       GVNOptimization* gvn =
           new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -727,7 +727,7 @@
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64: {
+    case InstructionSet::kX86_64: {
       SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
       GVNOptimization* gvn =
           new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -949,7 +949,7 @@
 
   // Always use the Thumb-2 assembler: some runtime functionality
   // (like implicit stack overflow checks) assume Thumb-2.
-  DCHECK_NE(instruction_set, kArm);
+  DCHECK_NE(instruction_set, InstructionSet::kArm);
 
   // Do not attempt to compile on architectures we do not support.
   if (!IsInstructionSetSupported(instruction_set)) {
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 86e9713..bad73e1 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -70,13 +70,13 @@
 
 bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
                                                 InstructionSet instruction_set) {
-  return instruction_set == kArm
-      || instruction_set == kArm64
-      || instruction_set == kMips
-      || instruction_set == kMips64
-      || instruction_set == kThumb2
-      || instruction_set == kX86
-      || instruction_set == kX86_64;
+  return instruction_set == InstructionSet::kArm
+      || instruction_set == InstructionSet::kArm64
+      || instruction_set == InstructionSet::kMips
+      || instruction_set == InstructionSet::kMips64
+      || instruction_set == InstructionSet::kThumb2
+      || instruction_set == InstructionSet::kX86
+      || instruction_set == InstructionSet::kX86_64;
 }
 
 class AllRangesIterator : public ValueObject {
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 57eb762..8cc376c 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -796,7 +796,7 @@
 
   switch (instruction_set_) {
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64: {
+    case InstructionSet::kArm64: {
       arm64::HSchedulerARM64 scheduler(&allocator, selector);
       scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
       scheduler.Schedule(graph_);
@@ -804,8 +804,8 @@
     }
 #endif
 #if defined(ART_ENABLE_CODEGEN_arm)
-    case kThumb2:
-    case kArm: {
+    case InstructionSet::kThumb2:
+    case InstructionSet::kArm: {
       arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
       arm::HSchedulerARM scheduler(&allocator, selector, &arm_latency_visitor);
       scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index dfc1633..75dce81 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -43,22 +43,22 @@
   ::std::vector<CodegenTargetConfig> test_config_candidates = {
 #ifdef ART_ENABLE_CODEGEN_arm
     // TODO: Should't this be `kThumb2` instead of `kArm` here?
-    CodegenTargetConfig(kArm, create_codegen_arm_vixl32),
+    CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32),
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-    CodegenTargetConfig(kArm64, create_codegen_arm64),
+    CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64),
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    CodegenTargetConfig(kX86, create_codegen_x86),
+    CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86),
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    CodegenTargetConfig(kX86_64, create_codegen_x86_64),
+    CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64),
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    CodegenTargetConfig(kMips, create_codegen_mips),
+    CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips),
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    CodegenTargetConfig(kMips64, create_codegen_mips64)
+    CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64)
 #endif
   };
 
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 91f86d5..7e517f3 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -928,18 +928,24 @@
 
 TEST(StackMapTest, CodeOffsetTest) {
   // Test minimum alignments, encoding, and decoding.
-  CodeOffset offset_thumb2 = CodeOffset::FromOffset(kThumb2InstructionAlignment, kThumb2);
-  CodeOffset offset_arm64 = CodeOffset::FromOffset(kArm64InstructionAlignment, kArm64);
-  CodeOffset offset_x86 = CodeOffset::FromOffset(kX86InstructionAlignment, kX86);
-  CodeOffset offset_x86_64 = CodeOffset::FromOffset(kX86_64InstructionAlignment, kX86_64);
-  CodeOffset offset_mips = CodeOffset::FromOffset(kMipsInstructionAlignment, kMips);
-  CodeOffset offset_mips64 = CodeOffset::FromOffset(kMips64InstructionAlignment, kMips64);
-  EXPECT_EQ(offset_thumb2.Uint32Value(kThumb2), kThumb2InstructionAlignment);
-  EXPECT_EQ(offset_arm64.Uint32Value(kArm64), kArm64InstructionAlignment);
-  EXPECT_EQ(offset_x86.Uint32Value(kX86), kX86InstructionAlignment);
-  EXPECT_EQ(offset_x86_64.Uint32Value(kX86_64), kX86_64InstructionAlignment);
-  EXPECT_EQ(offset_mips.Uint32Value(kMips), kMipsInstructionAlignment);
-  EXPECT_EQ(offset_mips64.Uint32Value(kMips64), kMips64InstructionAlignment);
+  CodeOffset offset_thumb2 =
+      CodeOffset::FromOffset(kThumb2InstructionAlignment, InstructionSet::kThumb2);
+  CodeOffset offset_arm64 =
+      CodeOffset::FromOffset(kArm64InstructionAlignment, InstructionSet::kArm64);
+  CodeOffset offset_x86 =
+      CodeOffset::FromOffset(kX86InstructionAlignment, InstructionSet::kX86);
+  CodeOffset offset_x86_64 =
+      CodeOffset::FromOffset(kX86_64InstructionAlignment, InstructionSet::kX86_64);
+  CodeOffset offset_mips =
+      CodeOffset::FromOffset(kMipsInstructionAlignment, InstructionSet::kMips);
+  CodeOffset offset_mips64 =
+      CodeOffset::FromOffset(kMips64InstructionAlignment, InstructionSet::kMips64);
+  EXPECT_EQ(offset_thumb2.Uint32Value(InstructionSet::kThumb2), kThumb2InstructionAlignment);
+  EXPECT_EQ(offset_arm64.Uint32Value(InstructionSet::kArm64), kArm64InstructionAlignment);
+  EXPECT_EQ(offset_x86.Uint32Value(InstructionSet::kX86), kX86InstructionAlignment);
+  EXPECT_EQ(offset_x86_64.Uint32Value(InstructionSet::kX86_64), kX86_64InstructionAlignment);
+  EXPECT_EQ(offset_mips.Uint32Value(InstructionSet::kMips), kMipsInstructionAlignment);
+  EXPECT_EQ(offset_mips64.Uint32Value(InstructionSet::kMips64), kMips64InstructionAlignment);
 }
 
 TEST(StackMapTest, TestDeduplicateStackMask) {
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 9527a60..921d401 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -247,15 +247,15 @@
   ArenaAllocator allocator(&pool);
   switch (isa) {
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64:
+    case InstructionSet::kArm64:
       return arm64::CreateTrampoline(&allocator, abi, offset);
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64:
+    case InstructionSet::kMips64:
       return mips64::CreateTrampoline(&allocator, abi, offset);
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return x86_64::CreateTrampoline(&allocator, offset);
 #endif
     default:
@@ -273,16 +273,16 @@
   ArenaAllocator allocator(&pool);
   switch (isa) {
 #ifdef ART_ENABLE_CODEGEN_arm
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return arm::CreateTrampoline(&allocator, abi, offset);
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips:
+    case InstructionSet::kMips:
       return mips::CreateTrampoline(&allocator, abi, offset);
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86:
+    case InstructionSet::kX86:
       UNUSED(abi);
       return x86::CreateTrampoline(&allocator, offset);
 #endif
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index e239004..c13c9af 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -232,7 +232,7 @@
 class ArmVIXLJNIMacroLabel FINAL
     : public JNIMacroLabelCommon<ArmVIXLJNIMacroLabel,
                                  vixl32::Label,
-                                 kArm> {
+                                 InstructionSet::kArm> {
  public:
   vixl32::Label* AsArm() {
     return AsPlatformLabel();
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index fda87aa..ce39a13 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -235,7 +235,7 @@
 class Arm64JNIMacroLabel FINAL
     : public JNIMacroLabelCommon<Arm64JNIMacroLabel,
                                  vixl::aarch64::Label,
-                                 kArm64> {
+                                 InstructionSet::kArm64> {
  public:
   vixl::aarch64::Label* AsArm64() {
     return AsPlatformLabel();
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 5307d17..655d17d 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -81,7 +81,7 @@
 
   if (toolsdir.empty()) {
     setup_results();
-    toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(kThumb2);
+    toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(InstructionSet::kThumb2);
     SetAndroidData();
   }
 
@@ -215,10 +215,10 @@
                                    is_synchronized,
                                    is_critical_native,
                                    shorty,
-                                   kThumb2));
+                                   InstructionSet::kThumb2));
   std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
       ManagedRuntimeCallingConvention::Create(
-          &allocator, is_static, is_synchronized, shorty, kThumb2));
+          &allocator, is_static, is_synchronized, shorty, InstructionSet::kThumb2));
   const int frame_size(jni_conv->FrameSize());
   ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
 
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 0616b35..3f7691b 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -56,12 +56,12 @@
 
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return MacroAsm32UniquePtr(new (allocator) arm::ArmVIXLJNIMacroAssembler(allocator));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips:
+    case InstructionSet::kMips:
       return MacroAsm32UniquePtr(new (allocator) mips::MipsAssembler(
           allocator,
           instruction_set_features != nullptr
@@ -69,7 +69,7 @@
               : nullptr));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
-    case kX86:
+    case InstructionSet::kX86:
       return MacroAsm32UniquePtr(new (allocator) x86::X86JNIMacroAssembler(allocator));
 #endif
     default:
@@ -91,11 +91,11 @@
 
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64:
+    case InstructionSet::kArm64:
       return MacroAsm64UniquePtr(new (allocator) arm64::Arm64JNIMacroAssembler(allocator));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64:
+    case InstructionSet::kMips64:
       return MacroAsm64UniquePtr(new (allocator) mips64::Mips64Assembler(
           allocator,
           instruction_set_features != nullptr
@@ -103,7 +103,7 @@
               : nullptr));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return MacroAsm64UniquePtr(new (allocator) x86_64::X86_64JNIMacroAssembler(allocator));
 #endif
     default:
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 56eaf19..99219d8 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -171,7 +171,7 @@
 class X86JNIMacroLabel FINAL
     : public JNIMacroLabelCommon<X86JNIMacroLabel,
                                  art::Label,
-                                 kX86> {
+                                 InstructionSet::kX86> {
  public:
   art::Label* AsX86() {
     return AsPlatformLabel();
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index d1a3032..d766ad4 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -197,7 +197,7 @@
 class X86_64JNIMacroLabel FINAL
     : public JNIMacroLabelCommon<X86_64JNIMacroLabel,
                                  art::Label,
-                                 kX86_64> {
+                                 InstructionSet::kX86_64> {
  public:
   art::Label* AsX86_64() {
     return AsPlatformLabel();
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 1cd9142..46474d2 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -589,7 +589,7 @@
  public:
   explicit Dex2Oat(TimingLogger* timings) :
       compiler_kind_(Compiler::kOptimizing),
-      instruction_set_(kRuntimeISA == kArm ? kThumb2 : kRuntimeISA),
+      instruction_set_(kRuntimeISA == InstructionSet::kArm ? InstructionSet::kThumb2 : kRuntimeISA),
       // Take the default set of instruction features from the build.
       image_file_location_oat_checksum_(0),
       image_file_location_oat_data_begin_(0),
@@ -893,13 +893,13 @@
     // Checks are all explicit until we know the architecture.
     // Set the compilation target's implicit checks options.
     switch (instruction_set_) {
-      case kArm:
-      case kThumb2:
-      case kArm64:
-      case kX86:
-      case kX86_64:
-      case kMips:
-      case kMips64:
+      case InstructionSet::kArm:
+      case InstructionSet::kThumb2:
+      case InstructionSet::kArm64:
+      case InstructionSet::kX86:
+      case InstructionSet::kX86_64:
+      case InstructionSet::kMips:
+      case InstructionSet::kMips64:
         compiler_options_->implicit_null_checks_ = true;
         compiler_options_->implicit_so_checks_ = true;
         break;
diff --git a/dex2oat/dex2oat_options.cc b/dex2oat/dex2oat_options.cc
index 3606c61..2cf0701 100644
--- a/dex2oat/dex2oat_options.cc
+++ b/dex2oat/dex2oat_options.cc
@@ -27,7 +27,7 @@
 struct CmdlineType<InstructionSet> : CmdlineTypeParser<InstructionSet> {
   Result Parse(const std::string& option) {
     InstructionSet set = GetInstructionSetFromString(option.c_str());
-    if (set == kNone) {
+    if (set == InstructionSet::kNone) {
       return Result::Failure(std::string("Not a valid instruction set: '") + option + "'");
     }
     return Result::Success(set);
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index 93f5a1d..b139a12 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -227,7 +227,8 @@
   if (bss_size_ != 0u) {
     builder_->GetBss()->WriteNoBitsSection(bss_size_);
   }
-  if (builder_->GetIsa() == kMips || builder_->GetIsa() == kMips64) {
+  if (builder_->GetIsa() == InstructionSet::kMips ||
+      builder_->GetIsa() == InstructionSet::kMips64) {
     builder_->WriteMIPSabiflagsSection();
   }
   builder_->WriteDynamicSection();
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 3efebfd..1ee2e4e 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -392,7 +392,7 @@
 
   // TODO: make selectable.
   Compiler::Kind compiler_kind = Compiler::kQuick;
-  InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86;
+  InstructionSet insn_set = kIsTargetBuild ? InstructionSet::kThumb2 : InstructionSet::kX86;
   std::string error_msg;
   SetupCompiler(compiler_kind, insn_set, std::vector<std::string>(), /*out*/ &error_msg);
 
@@ -491,7 +491,7 @@
 }
 
 TEST_F(OatTest, OatHeaderIsValid) {
-  InstructionSet insn_set = kX86;
+  InstructionSet insn_set = InstructionSet::kX86;
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> insn_features(
     InstructionSetFeatures::FromVariant(insn_set, "default", &error_msg));
@@ -516,7 +516,7 @@
   // TODO: make selectable.
   Compiler::Kind compiler_kind = Compiler::kQuick;
   InstructionSet insn_set = kRuntimeISA;
-  if (insn_set == kArm) insn_set = kThumb2;
+  if (insn_set == InstructionSet::kArm) insn_set = InstructionSet::kThumb2;
   std::string error_msg;
   std::vector<std::string> compiler_options;
   compiler_options.push_back("--compiler-filter=extract");
@@ -844,7 +844,7 @@
 }
 
 TEST_F(OatTest, UpdateChecksum) {
-  InstructionSet insn_set = kX86;
+  InstructionSet insn_set = InstructionSet::kX86;
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> insn_features(
     InstructionSetFeatures::FromVariant(insn_set, "default", &error_msg));
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
index 2c57e40..7e9ecab 100644
--- a/dexoptanalyzer/dexoptanalyzer.cc
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -161,7 +161,7 @@
       } else if (option.starts_with("--isa=")) {
         std::string isa_str = option.substr(strlen("--isa=")).ToString();
         isa_ = GetInstructionSetFromString(isa_str.c_str());
-        if (isa_ == kNone) {
+        if (isa_ == InstructionSet::kNone) {
           Usage("Invalid isa '%s'", option.data());
         }
       } else if (option.starts_with("--image=")) {
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index 5af51c1..2ed41c8 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -36,17 +36,17 @@
 }
 
 Disassembler* Disassembler::Create(InstructionSet instruction_set, DisassemblerOptions* options) {
-  if (instruction_set == kArm || instruction_set == kThumb2) {
+  if (instruction_set == InstructionSet::kArm || instruction_set == InstructionSet::kThumb2) {
     return new arm::DisassemblerArm(options);
-  } else if (instruction_set == kArm64) {
+  } else if (instruction_set == InstructionSet::kArm64) {
     return new arm64::DisassemblerArm64(options);
-  } else if (instruction_set == kMips) {
+  } else if (instruction_set == InstructionSet::kMips) {
     return new mips::DisassemblerMips(options, /* is_o32_abi */ true);
-  } else if (instruction_set == kMips64) {
+  } else if (instruction_set == InstructionSet::kMips64) {
     return new mips::DisassemblerMips(options, /* is_o32_abi */ false);
-  } else if (instruction_set == kX86) {
+  } else if (instruction_set == InstructionSet::kX86) {
     return new x86::DisassemblerX86(options, false);
-  } else if (instruction_set == kX86_64) {
+  } else if (instruction_set == InstructionSet::kX86_64) {
     return new x86::DisassemblerX86(options, true);
   } else {
     UNIMPLEMENTED(FATAL) << static_cast<uint32_t>(instruction_set);
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 7064fa3..21eb207 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -168,7 +168,7 @@
       bss->WriteNoBitsSection(oat_file_->BssSize());
     }
 
-    if (isa == kMips || isa == kMips64) {
+    if (isa == InstructionSet::kMips || isa == InstructionSet::kMips64) {
       builder_->WriteMIPSabiflagsSection();
     }
     builder_->PrepareDynamicSection(elf_file->GetPath(),
@@ -916,7 +916,7 @@
 
   void AddOffsets(const OatFile::OatMethod& oat_method) {
     uint32_t code_offset = oat_method.GetCodeOffset();
-    if (oat_file_.GetOatHeader().GetInstructionSet() == kThumb2) {
+    if (oat_file_.GetOatHeader().GetInstructionSet() == InstructionSet::kThumb2) {
       code_offset &= ~0x1;
     }
     offsets_.insert(code_offset);
@@ -2269,7 +2269,7 @@
     if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) {
       quick_code = oat_dumper_->GetQuickOatCode(m);
     }
-    if (oat_dumper_->GetInstructionSet() == kThumb2) {
+    if (oat_dumper_->GetInstructionSet() == InstructionSet::kThumb2) {
       quick_code = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(quick_code) & ~0x1);
     }
     return quick_code;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index edce5b4..ae82d72 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -129,7 +129,7 @@
 
   TimingLogger::ScopedTiming t("Runtime Setup", timings);
 
-  CHECK_NE(isa, kNone);
+  CHECK_NE(isa, InstructionSet::kNone);
   const char* isa_name = GetInstructionSetString(isa);
 
   // Set up the runtime
@@ -807,7 +807,7 @@
 
   // cmd line args
   bool isa_set = false;
-  InstructionSet isa = kNone;
+  InstructionSet isa = InstructionSet::kNone;
   std::string input_image_location;
   std::string output_image_filename;
   off_t base_delta = 0;
@@ -824,7 +824,7 @@
       isa_set = true;
       const char* isa_str = option.substr(strlen("--instruction-set=")).data();
       isa = GetInstructionSetFromString(isa_str);
-      if (isa == kNone) {
+      if (isa == InstructionSet::kNone) {
         Usage("Unknown or invalid instruction set %s", isa_str);
       }
     } else if (option.starts_with("--input-image-location=")) {
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 5c31378..ef2b342 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -59,7 +59,7 @@
   // get the method from the top of the stack.  However it's in r0.
   uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(sc->fault_address);
   uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
-      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kArm));
+      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kArm));
   if (overflow_addr == fault_addr) {
     *out_method = reinterpret_cast<ArtMethod*>(sc->arm_r0);
   } else {
@@ -209,7 +209,7 @@
   VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
     ", fault_addr: " << fault_addr;
 
-  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kArm);
+  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kArm);
 
   // Check that the fault address is the value expected for a stack overflow.
   if (fault_addr != overflow_addr) {
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index 2496968..b789fc7 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -266,7 +266,7 @@
 }
 
 bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
-  if (kArm != other->GetInstructionSet()) {
+  if (InstructionSet::kArm != other->GetInstructionSet()) {
     return false;
   }
   const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
@@ -276,7 +276,7 @@
 }
 
 bool ArmInstructionSetFeatures::HasAtLeast(const InstructionSetFeatures* other) const {
-  if (kArm != other->GetInstructionSet()) {
+  if (InstructionSet::kArm != other->GetInstructionSet()) {
     return false;
   }
   const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
index f438a76..f82534b 100644
--- a/runtime/arch/arm/instruction_set_features_arm.h
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -52,7 +52,7 @@
   bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
 
   InstructionSet GetInstructionSet() const OVERRIDE {
-    return kArm;
+    return InstructionSet::kArm;
   }
 
   uint32_t AsBitmap() const OVERRIDE;
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
index 3582351..d9651f9 100644
--- a/runtime/arch/arm/instruction_set_features_arm_test.cc
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -24,10 +24,10 @@
   // Build features for a 32-bit ARM krait processor.
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> krait_features(
-      InstructionSetFeatures::FromVariant(kArm, "krait", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm, "krait", &error_msg));
   ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
 
-  ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
+  ASSERT_EQ(krait_features->GetInstructionSet(), InstructionSet::kArm);
   EXPECT_TRUE(krait_features->Equals(krait_features.get()));
   EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
   EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
@@ -36,10 +36,10 @@
 
   // Build features for a 32-bit ARM kryo processor.
   std::unique_ptr<const InstructionSetFeatures> kryo_features(
-      InstructionSetFeatures::FromVariant(kArm, "kryo", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm, "kryo", &error_msg));
   ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
 
-  ASSERT_EQ(kryo_features->GetInstructionSet(), kArm);
+  ASSERT_EQ(kryo_features->GetInstructionSet(), InstructionSet::kArm);
   EXPECT_TRUE(kryo_features->Equals(kryo_features.get()));
   EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
   EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
@@ -48,7 +48,7 @@
 
   // Build features for a 32-bit ARM denver processor.
   std::unique_ptr<const InstructionSetFeatures> denver_features(
-      InstructionSetFeatures::FromVariant(kArm, "denver", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm, "denver", &error_msg));
   ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
 
   EXPECT_TRUE(denver_features->Equals(denver_features.get()));
@@ -62,7 +62,7 @@
 
   // Build features for a 32-bit ARMv7 processor.
   std::unique_ptr<const InstructionSetFeatures> generic_features(
-      InstructionSetFeatures::FromVariant(kArm, "generic", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm, "generic", &error_msg));
   ASSERT_TRUE(generic_features.get() != nullptr) << error_msg;
 
   EXPECT_TRUE(generic_features->Equals(generic_features.get()));
@@ -75,7 +75,7 @@
 
   // ARM6 is not a supported architecture variant.
   std::unique_ptr<const InstructionSetFeatures> arm6_features(
-      InstructionSetFeatures::FromVariant(kArm, "arm6", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm, "arm6", &error_msg));
   EXPECT_TRUE(arm6_features.get() == nullptr);
   EXPECT_NE(error_msg.size(), 0U);
 }
@@ -83,7 +83,7 @@
 TEST(ArmInstructionSetFeaturesTest, ArmAddFeaturesFromString) {
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> base_features(
-      InstructionSetFeatures::FromVariant(kArm, "generic", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm, "generic", &error_msg));
   ASSERT_TRUE(base_features.get() != nullptr) << error_msg;
 
   // Build features for a 32-bit ARM with LPAE and div processor.
@@ -91,7 +91,7 @@
       base_features->AddFeaturesFromString("atomic_ldrd_strd,div", &error_msg));
   ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
 
-  ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
+  ASSERT_EQ(krait_features->GetInstructionSet(), InstructionSet::kArm);
   EXPECT_TRUE(krait_features->Equals(krait_features.get()));
   EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
   EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
@@ -103,7 +103,7 @@
       base_features->AddFeaturesFromString("atomic_ldrd_strd,div", &error_msg));
   ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
 
-  ASSERT_EQ(kryo_features->GetInstructionSet(), kArm);
+  ASSERT_EQ(kryo_features->GetInstructionSet(), InstructionSet::kArm);
   EXPECT_TRUE(kryo_features->Equals(krait_features.get()));
   EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
   EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index b9f9d55..d535c7e 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -52,7 +52,7 @@
   // get the method from the top of the stack.  However it's in x0.
   uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(sc->fault_address);
   uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
-      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kArm64));
+      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kArm64));
   if (overflow_addr == fault_addr) {
     *out_method = reinterpret_cast<ArtMethod*>(sc->regs[0]);
   } else {
@@ -164,7 +164,7 @@
   VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
       ", fault_addr: " << fault_addr;
 
-  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kArm64);
+  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kArm64);
 
   // Check that the fault address is the value expected for a stack overflow.
   if (fault_addr != overflow_addr) {
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index e5f6f11..d830ccf 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -98,7 +98,7 @@
 }
 
 bool Arm64InstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
-  if (kArm64 != other->GetInstructionSet()) {
+  if (InstructionSet::kArm64 != other->GetInstructionSet()) {
     return false;
   }
   const Arm64InstructionSetFeatures* other_as_arm64 = other->AsArm64InstructionSetFeatures();
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index 4243d32..af2d4c7 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -50,7 +50,7 @@
   bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
 
   InstructionSet GetInstructionSet() const OVERRIDE {
-    return kArm64;
+    return InstructionSet::kArm64;
   }
 
   uint32_t AsBitmap() const OVERRIDE;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index 91cb58f..7fd39b6 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -24,41 +24,41 @@
   // Build features for an ARM64 processor.
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> arm64_features(
-      InstructionSetFeatures::FromVariant(kArm64, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "default", &error_msg));
   ASSERT_TRUE(arm64_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(arm64_features->GetInstructionSet(), kArm64);
+  EXPECT_EQ(arm64_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(arm64_features->Equals(arm64_features.get()));
   EXPECT_STREQ("a53", arm64_features->GetFeatureString().c_str());
   EXPECT_EQ(arm64_features->AsBitmap(), 1U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a57_features(
-      InstructionSetFeatures::FromVariant(kArm64, "cortex-a57", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a57", &error_msg));
   ASSERT_TRUE(cortex_a57_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(cortex_a57_features->GetInstructionSet(), kArm64);
+  EXPECT_EQ(cortex_a57_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(cortex_a57_features->Equals(cortex_a57_features.get()));
   EXPECT_STREQ("a53", cortex_a57_features->GetFeatureString().c_str());
   EXPECT_EQ(cortex_a57_features->AsBitmap(), 1U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a73_features(
-      InstructionSetFeatures::FromVariant(kArm64, "cortex-a73", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a73", &error_msg));
   ASSERT_TRUE(cortex_a73_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(cortex_a73_features->GetInstructionSet(), kArm64);
+  EXPECT_EQ(cortex_a73_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(cortex_a73_features->Equals(cortex_a73_features.get()));
   EXPECT_STREQ("a53", cortex_a73_features->GetFeatureString().c_str());
   EXPECT_EQ(cortex_a73_features->AsBitmap(), 1U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a35_features(
-      InstructionSetFeatures::FromVariant(kArm64, "cortex-a35", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a35", &error_msg));
   ASSERT_TRUE(cortex_a35_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(cortex_a35_features->GetInstructionSet(), kArm64);
+  EXPECT_EQ(cortex_a35_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(cortex_a35_features->Equals(cortex_a35_features.get()));
   EXPECT_STREQ("-a53", cortex_a35_features->GetFeatureString().c_str());
   EXPECT_EQ(cortex_a35_features->AsBitmap(), 0U);
 
   std::unique_ptr<const InstructionSetFeatures> kryo_features(
-      InstructionSetFeatures::FromVariant(kArm64, "kryo", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "kryo", &error_msg));
   ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(kryo_features->GetInstructionSet(), kArm64);
+  EXPECT_EQ(kryo_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(kryo_features->Equals(kryo_features.get()));
   EXPECT_TRUE(kryo_features->Equals(cortex_a35_features.get()));
   EXPECT_FALSE(kryo_features->Equals(cortex_a57_features.get()));
diff --git a/runtime/arch/instruction_set.cc b/runtime/arch/instruction_set.cc
index 64af7ec..ecccdcf 100644
--- a/runtime/arch/instruction_set.cc
+++ b/runtime/arch/instruction_set.cc
@@ -26,14 +26,14 @@
 
 void InstructionSetAbort(InstructionSet isa) {
   switch (isa) {
-    case kArm:
-    case kThumb2:
-    case kArm64:
-    case kX86:
-    case kX86_64:
-    case kMips:
-    case kMips64:
-    case kNone:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
+    case InstructionSet::kArm64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
+    case InstructionSet::kNone:
       LOG(FATAL) << "Unsupported instruction set " << isa;
       UNREACHABLE();
   }
@@ -43,20 +43,20 @@
 
 const char* GetInstructionSetString(InstructionSet isa) {
   switch (isa) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return "arm";
-    case kArm64:
+    case InstructionSet::kArm64:
       return "arm64";
-    case kX86:
+    case InstructionSet::kX86:
       return "x86";
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return "x86_64";
-    case kMips:
+    case InstructionSet::kMips:
       return "mips";
-    case kMips64:
+    case InstructionSet::kMips64:
       return "mips64";
-    case kNone:
+    case InstructionSet::kNone:
       return "none";
   }
   LOG(FATAL) << "Unknown ISA " << isa;
@@ -67,62 +67,62 @@
   CHECK(isa_str != nullptr);
 
   if (strcmp("arm", isa_str) == 0) {
-    return kArm;
+    return InstructionSet::kArm;
   } else if (strcmp("arm64", isa_str) == 0) {
-    return kArm64;
+    return InstructionSet::kArm64;
   } else if (strcmp("x86", isa_str) == 0) {
-    return kX86;
+    return InstructionSet::kX86;
   } else if (strcmp("x86_64", isa_str) == 0) {
-    return kX86_64;
+    return InstructionSet::kX86_64;
   } else if (strcmp("mips", isa_str) == 0) {
-    return kMips;
+    return InstructionSet::kMips;
   } else if (strcmp("mips64", isa_str) == 0) {
-    return kMips64;
+    return InstructionSet::kMips64;
   }
 
-  return kNone;
+  return InstructionSet::kNone;
 }
 
 InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags) {
   switch (e_machine) {
     case EM_ARM:
-      return kArm;
+      return InstructionSet::kArm;
     case EM_AARCH64:
-      return kArm64;
+      return InstructionSet::kArm64;
     case EM_386:
-      return kX86;
+      return InstructionSet::kX86;
     case EM_X86_64:
-      return kX86_64;
+      return InstructionSet::kX86_64;
     case EM_MIPS: {
       if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R2 ||
           (e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) {
-        return kMips;
+        return InstructionSet::kMips;
       } else if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_64R6) {
-        return kMips64;
+        return InstructionSet::kMips64;
       }
       break;
     }
   }
-  return kNone;
+  return InstructionSet::kNone;
 }
 
 size_t GetInstructionSetAlignment(InstructionSet isa) {
   switch (isa) {
-    case kArm:
+    case InstructionSet::kArm:
       // Fall-through.
-    case kThumb2:
+    case InstructionSet::kThumb2:
       return kArmAlignment;
-    case kArm64:
+    case InstructionSet::kArm64:
       return kArm64Alignment;
-    case kX86:
+    case InstructionSet::kX86:
       // Fall-through.
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return kX86Alignment;
-    case kMips:
+    case InstructionSet::kMips:
       // Fall-through.
-    case kMips64:
+    case InstructionSet::kMips64:
       return kMipsAlignment;
-    case kNone:
+    case InstructionSet::kNone:
       LOG(FATAL) << "ISA kNone does not have alignment.";
       UNREACHABLE();
   }
@@ -171,26 +171,26 @@
 
 size_t GetStackOverflowReservedBytes(InstructionSet isa) {
   switch (isa) {
-    case kArm:      // Intentional fall-through.
-    case kThumb2:
+    case InstructionSet::kArm:      // Intentional fall-through.
+    case InstructionSet::kThumb2:
       return kArmStackOverflowReservedBytes;
 
-    case kArm64:
+    case InstructionSet::kArm64:
       return kArm64StackOverflowReservedBytes;
 
-    case kMips:
+    case InstructionSet::kMips:
       return kMipsStackOverflowReservedBytes;
 
-    case kMips64:
+    case InstructionSet::kMips64:
       return kMips64StackOverflowReservedBytes;
 
-    case kX86:
+    case InstructionSet::kX86:
       return kX86StackOverflowReservedBytes;
 
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return kX86_64StackOverflowReservedBytes;
 
-    case kNone:
+    case InstructionSet::kNone:
       LOG(FATAL) << "kNone has no stack overflow size";
       UNREACHABLE();
   }
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index 7203b18..6434005 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -25,7 +25,7 @@
 
 namespace art {
 
-enum InstructionSet {
+enum class InstructionSet {
   kNone,
   kArm,
   kArm64,
@@ -33,24 +33,25 @@
   kX86,
   kX86_64,
   kMips,
-  kMips64
+  kMips64,
+  kLast = kMips64
 };
 std::ostream& operator<<(std::ostream& os, const InstructionSet& rhs);
 
 #if defined(__arm__)
-static constexpr InstructionSet kRuntimeISA = kArm;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm;
 #elif defined(__aarch64__)
-static constexpr InstructionSet kRuntimeISA = kArm64;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm64;
 #elif defined(__mips__) && !defined(__LP64__)
-static constexpr InstructionSet kRuntimeISA = kMips;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kMips;
 #elif defined(__mips__) && defined(__LP64__)
-static constexpr InstructionSet kRuntimeISA = kMips64;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kMips64;
 #elif defined(__i386__)
-static constexpr InstructionSet kRuntimeISA = kX86;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kX86;
 #elif defined(__x86_64__)
-static constexpr InstructionSet kRuntimeISA = kX86_64;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kX86_64;
 #else
-static constexpr InstructionSet kRuntimeISA = kNone;
+static constexpr InstructionSet kRuntimeISA = InstructionSet::kNone;
 #endif
 
 // Architecture-specific pointer sizes
@@ -95,22 +96,22 @@
 
 constexpr PointerSize GetInstructionSetPointerSize(InstructionSet isa) {
   switch (isa) {
-    case kArm:
+    case InstructionSet::kArm:
       // Fall-through.
-    case kThumb2:
+    case InstructionSet::kThumb2:
       return kArmPointerSize;
-    case kArm64:
+    case InstructionSet::kArm64:
       return kArm64PointerSize;
-    case kX86:
+    case InstructionSet::kX86:
       return kX86PointerSize;
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return kX86_64PointerSize;
-    case kMips:
+    case InstructionSet::kMips:
       return kMipsPointerSize;
-    case kMips64:
+    case InstructionSet::kMips64:
       return kMips64PointerSize;
 
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   InstructionSetAbort(isa);
@@ -118,22 +119,22 @@
 
 constexpr size_t GetInstructionSetInstructionAlignment(InstructionSet isa) {
   switch (isa) {
-    case kArm:
+    case InstructionSet::kArm:
       // Fall-through.
-    case kThumb2:
+    case InstructionSet::kThumb2:
       return kThumb2InstructionAlignment;
-    case kArm64:
+    case InstructionSet::kArm64:
       return kArm64InstructionAlignment;
-    case kX86:
+    case InstructionSet::kX86:
       return kX86InstructionAlignment;
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return kX86_64InstructionAlignment;
-    case kMips:
+    case InstructionSet::kMips:
       return kMipsInstructionAlignment;
-    case kMips64:
+    case InstructionSet::kMips64:
       return kMips64InstructionAlignment;
 
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   InstructionSetAbort(isa);
@@ -141,16 +142,16 @@
 
 constexpr bool IsValidInstructionSet(InstructionSet isa) {
   switch (isa) {
-    case kArm:
-    case kThumb2:
-    case kArm64:
-    case kX86:
-    case kX86_64:
-    case kMips:
-    case kMips64:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
+    case InstructionSet::kArm64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
       return true;
 
-    case kNone:
+    case InstructionSet::kNone:
       return false;
   }
   return false;
@@ -160,18 +161,18 @@
 
 constexpr bool Is64BitInstructionSet(InstructionSet isa) {
   switch (isa) {
-    case kArm:
-    case kThumb2:
-    case kX86:
-    case kMips:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
+    case InstructionSet::kX86:
+    case InstructionSet::kMips:
       return false;
 
-    case kArm64:
-    case kX86_64:
-    case kMips64:
+    case InstructionSet::kArm64:
+    case InstructionSet::kX86_64:
+    case InstructionSet::kMips64:
       return true;
 
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   InstructionSetAbort(isa);
@@ -183,22 +184,22 @@
 
 constexpr size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
   switch (isa) {
-    case kArm:
+    case InstructionSet::kArm:
       // Fall-through.
-    case kThumb2:
+    case InstructionSet::kThumb2:
       return 4;
-    case kArm64:
+    case InstructionSet::kArm64:
       return 8;
-    case kX86:
+    case InstructionSet::kX86:
       return 4;
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return 8;
-    case kMips:
+    case InstructionSet::kMips:
       return 4;
-    case kMips64:
+    case InstructionSet::kMips64:
       return 8;
 
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   InstructionSetAbort(isa);
@@ -206,22 +207,22 @@
 
 constexpr size_t GetBytesPerFprSpillLocation(InstructionSet isa) {
   switch (isa) {
-    case kArm:
+    case InstructionSet::kArm:
       // Fall-through.
-    case kThumb2:
+    case InstructionSet::kThumb2:
       return 4;
-    case kArm64:
+    case InstructionSet::kArm64:
       return 8;
-    case kX86:
+    case InstructionSet::kX86:
       return 8;
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return 8;
-    case kMips:
+    case InstructionSet::kMips:
       return 4;
-    case kMips64:
+    case InstructionSet::kMips64:
       return 8;
 
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   InstructionSetAbort(isa);
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index ed8ff60..b6b24c2 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -33,21 +33,21 @@
 std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromVariant(
     InstructionSet isa, const std::string& variant, std::string* error_msg) {
   switch (isa) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return ArmInstructionSetFeatures::FromVariant(variant, error_msg);
-    case kArm64:
+    case InstructionSet::kArm64:
       return Arm64InstructionSetFeatures::FromVariant(variant, error_msg);
-    case kMips:
+    case InstructionSet::kMips:
       return MipsInstructionSetFeatures::FromVariant(variant, error_msg);
-    case kMips64:
+    case InstructionSet::kMips64:
       return Mips64InstructionSetFeatures::FromVariant(variant, error_msg);
-    case kX86:
+    case InstructionSet::kX86:
       return X86InstructionSetFeatures::FromVariant(variant, error_msg);
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return X86_64InstructionSetFeatures::FromVariant(variant, error_msg);
 
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   UNIMPLEMENTED(FATAL) << isa;
@@ -58,27 +58,27 @@
                                                                                  uint32_t bitmap) {
   std::unique_ptr<const InstructionSetFeatures> result;
   switch (isa) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       result = ArmInstructionSetFeatures::FromBitmap(bitmap);
       break;
-    case kArm64:
+    case InstructionSet::kArm64:
       result = Arm64InstructionSetFeatures::FromBitmap(bitmap);
       break;
-    case kMips:
+    case InstructionSet::kMips:
       result = MipsInstructionSetFeatures::FromBitmap(bitmap);
       break;
-    case kMips64:
+    case InstructionSet::kMips64:
       result = Mips64InstructionSetFeatures::FromBitmap(bitmap);
       break;
-    case kX86:
+    case InstructionSet::kX86:
       result = X86InstructionSetFeatures::FromBitmap(bitmap);
       break;
-    case kX86_64:
+    case InstructionSet::kX86_64:
       result = X86_64InstructionSetFeatures::FromBitmap(bitmap);
       break;
 
-    case kNone:
+    case InstructionSet::kNone:
     default:
       UNIMPLEMENTED(FATAL) << isa;
       UNREACHABLE();
@@ -89,21 +89,21 @@
 
 std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCppDefines() {
   switch (kRuntimeISA) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return ArmInstructionSetFeatures::FromCppDefines();
-    case kArm64:
+    case InstructionSet::kArm64:
       return Arm64InstructionSetFeatures::FromCppDefines();
-    case kMips:
+    case InstructionSet::kMips:
       return MipsInstructionSetFeatures::FromCppDefines();
-    case kMips64:
+    case InstructionSet::kMips64:
       return Mips64InstructionSetFeatures::FromCppDefines();
-    case kX86:
+    case InstructionSet::kX86:
       return X86InstructionSetFeatures::FromCppDefines();
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return X86_64InstructionSetFeatures::FromCppDefines();
 
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -113,21 +113,21 @@
 
 std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCpuInfo() {
   switch (kRuntimeISA) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return ArmInstructionSetFeatures::FromCpuInfo();
-    case kArm64:
+    case InstructionSet::kArm64:
       return Arm64InstructionSetFeatures::FromCpuInfo();
-    case kMips:
+    case InstructionSet::kMips:
       return MipsInstructionSetFeatures::FromCpuInfo();
-    case kMips64:
+    case InstructionSet::kMips64:
       return Mips64InstructionSetFeatures::FromCpuInfo();
-    case kX86:
+    case InstructionSet::kX86:
       return X86InstructionSetFeatures::FromCpuInfo();
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return X86_64InstructionSetFeatures::FromCpuInfo();
 
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -136,21 +136,21 @@
 
 std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromHwcap() {
   switch (kRuntimeISA) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return ArmInstructionSetFeatures::FromHwcap();
-    case kArm64:
+    case InstructionSet::kArm64:
       return Arm64InstructionSetFeatures::FromHwcap();
-    case kMips:
+    case InstructionSet::kMips:
       return MipsInstructionSetFeatures::FromHwcap();
-    case kMips64:
+    case InstructionSet::kMips64:
       return Mips64InstructionSetFeatures::FromHwcap();
-    case kX86:
+    case InstructionSet::kX86:
       return X86InstructionSetFeatures::FromHwcap();
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return X86_64InstructionSetFeatures::FromHwcap();
 
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -159,21 +159,21 @@
 
 std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromAssembly() {
   switch (kRuntimeISA) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return ArmInstructionSetFeatures::FromAssembly();
-    case kArm64:
+    case InstructionSet::kArm64:
       return Arm64InstructionSetFeatures::FromAssembly();
-    case kMips:
+    case InstructionSet::kMips:
       return MipsInstructionSetFeatures::FromAssembly();
-    case kMips64:
+    case InstructionSet::kMips64:
       return Mips64InstructionSetFeatures::FromAssembly();
-    case kX86:
+    case InstructionSet::kX86:
       return X86InstructionSetFeatures::FromAssembly();
-    case kX86_64:
+    case InstructionSet::kX86_64:
       return X86_64InstructionSetFeatures::FromAssembly();
 
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -222,32 +222,33 @@
 }
 
 const ArmInstructionSetFeatures* InstructionSetFeatures::AsArmInstructionSetFeatures() const {
-  DCHECK_EQ(kArm, GetInstructionSet());
+  DCHECK_EQ(InstructionSet::kArm, GetInstructionSet());
   return down_cast<const ArmInstructionSetFeatures*>(this);
 }
 
 const Arm64InstructionSetFeatures* InstructionSetFeatures::AsArm64InstructionSetFeatures() const {
-  DCHECK_EQ(kArm64, GetInstructionSet());
+  DCHECK_EQ(InstructionSet::kArm64, GetInstructionSet());
   return down_cast<const Arm64InstructionSetFeatures*>(this);
 }
 
 const MipsInstructionSetFeatures* InstructionSetFeatures::AsMipsInstructionSetFeatures() const {
-  DCHECK_EQ(kMips, GetInstructionSet());
+  DCHECK_EQ(InstructionSet::kMips, GetInstructionSet());
   return down_cast<const MipsInstructionSetFeatures*>(this);
 }
 
 const Mips64InstructionSetFeatures* InstructionSetFeatures::AsMips64InstructionSetFeatures() const {
-  DCHECK_EQ(kMips64, GetInstructionSet());
+  DCHECK_EQ(InstructionSet::kMips64, GetInstructionSet());
   return down_cast<const Mips64InstructionSetFeatures*>(this);
 }
 
 const X86InstructionSetFeatures* InstructionSetFeatures::AsX86InstructionSetFeatures() const {
-  DCHECK(kX86 == GetInstructionSet() || kX86_64 == GetInstructionSet());
+  DCHECK(InstructionSet::kX86 == GetInstructionSet() ||
+         InstructionSet::kX86_64 == GetInstructionSet());
   return down_cast<const X86InstructionSetFeatures*>(this);
 }
 
 const X86_64InstructionSetFeatures* InstructionSetFeatures::AsX86_64InstructionSetFeatures() const {
-  DCHECK_EQ(kX86_64, GetInstructionSet());
+  DCHECK_EQ(InstructionSet::kX86_64, GetInstructionSet());
   return down_cast<const X86_64InstructionSetFeatures*>(this);
 }
 
diff --git a/runtime/arch/instruction_set_test.cc b/runtime/arch/instruction_set_test.cc
index b251b57..12a117d 100644
--- a/runtime/arch/instruction_set_test.cc
+++ b/runtime/arch/instruction_set_test.cc
@@ -23,34 +23,40 @@
 namespace art {
 
 TEST(InstructionSetTest, GetInstructionSetFromString) {
-  EXPECT_EQ(kArm, GetInstructionSetFromString("arm"));
-  EXPECT_EQ(kArm64, GetInstructionSetFromString("arm64"));
-  EXPECT_EQ(kX86, GetInstructionSetFromString("x86"));
-  EXPECT_EQ(kX86_64, GetInstructionSetFromString("x86_64"));
-  EXPECT_EQ(kMips, GetInstructionSetFromString("mips"));
-  EXPECT_EQ(kMips64, GetInstructionSetFromString("mips64"));
-  EXPECT_EQ(kNone, GetInstructionSetFromString("none"));
-  EXPECT_EQ(kNone, GetInstructionSetFromString("random-string"));
+  EXPECT_EQ(InstructionSet::kArm, GetInstructionSetFromString("arm"));
+  EXPECT_EQ(InstructionSet::kArm64, GetInstructionSetFromString("arm64"));
+  EXPECT_EQ(InstructionSet::kX86, GetInstructionSetFromString("x86"));
+  EXPECT_EQ(InstructionSet::kX86_64, GetInstructionSetFromString("x86_64"));
+  EXPECT_EQ(InstructionSet::kMips, GetInstructionSetFromString("mips"));
+  EXPECT_EQ(InstructionSet::kMips64, GetInstructionSetFromString("mips64"));
+  EXPECT_EQ(InstructionSet::kNone, GetInstructionSetFromString("none"));
+  EXPECT_EQ(InstructionSet::kNone, GetInstructionSetFromString("random-string"));
 }
 
 TEST(InstructionSetTest, GetInstructionSetString) {
-  EXPECT_STREQ("arm", GetInstructionSetString(kArm));
-  EXPECT_STREQ("arm", GetInstructionSetString(kThumb2));
-  EXPECT_STREQ("arm64", GetInstructionSetString(kArm64));
-  EXPECT_STREQ("x86", GetInstructionSetString(kX86));
-  EXPECT_STREQ("x86_64", GetInstructionSetString(kX86_64));
-  EXPECT_STREQ("mips", GetInstructionSetString(kMips));
-  EXPECT_STREQ("mips64", GetInstructionSetString(kMips64));
-  EXPECT_STREQ("none", GetInstructionSetString(kNone));
+  EXPECT_STREQ("arm", GetInstructionSetString(InstructionSet::kArm));
+  EXPECT_STREQ("arm", GetInstructionSetString(InstructionSet::kThumb2));
+  EXPECT_STREQ("arm64", GetInstructionSetString(InstructionSet::kArm64));
+  EXPECT_STREQ("x86", GetInstructionSetString(InstructionSet::kX86));
+  EXPECT_STREQ("x86_64", GetInstructionSetString(InstructionSet::kX86_64));
+  EXPECT_STREQ("mips", GetInstructionSetString(InstructionSet::kMips));
+  EXPECT_STREQ("mips64", GetInstructionSetString(InstructionSet::kMips64));
+  EXPECT_STREQ("none", GetInstructionSetString(InstructionSet::kNone));
 }
 
 TEST(InstructionSetTest, GetInstructionSetInstructionAlignment) {
-  EXPECT_EQ(GetInstructionSetInstructionAlignment(kThumb2), kThumb2InstructionAlignment);
-  EXPECT_EQ(GetInstructionSetInstructionAlignment(kArm64), kArm64InstructionAlignment);
-  EXPECT_EQ(GetInstructionSetInstructionAlignment(kX86), kX86InstructionAlignment);
-  EXPECT_EQ(GetInstructionSetInstructionAlignment(kX86_64), kX86_64InstructionAlignment);
-  EXPECT_EQ(GetInstructionSetInstructionAlignment(kMips), kMipsInstructionAlignment);
-  EXPECT_EQ(GetInstructionSetInstructionAlignment(kMips64), kMips64InstructionAlignment);
+  EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kThumb2),
+            kThumb2InstructionAlignment);
+  EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kArm64),
+            kArm64InstructionAlignment);
+  EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kX86),
+            kX86InstructionAlignment);
+  EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kX86_64),
+            kX86_64InstructionAlignment);
+  EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kMips),
+            kMipsInstructionAlignment);
+  EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kMips64),
+            kMips64InstructionAlignment);
 }
 
 TEST(InstructionSetTest, TestRoundTrip) {
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index bf3e96a..6dce54e 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -51,7 +51,7 @@
   // get the method from the top of the stack.  However it's in r0.
   uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr);  // BVA addr
   uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
-      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kMips));
+      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kMips));
   if (overflow_addr == fault_addr) {
     *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips::A0]);
   } else {
@@ -124,7 +124,7 @@
   VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
     ", fault_addr: " << fault_addr;
 
-  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kMips);
+  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kMips);
 
   // Check that the fault address is the value expected for a stack overflow.
   if (fault_addr != overflow_addr) {
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
index 6540b44..6d4145b 100644
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -168,7 +168,7 @@
 }
 
 bool MipsInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
-  if (kMips != other->GetInstructionSet()) {
+  if (InstructionSet::kMips != other->GetInstructionSet()) {
     return false;
   }
   const MipsInstructionSetFeatures* other_as_mips = other->AsMipsInstructionSetFeatures();
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
index 1cb852e..ee539ed 100644
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -52,7 +52,7 @@
   bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
 
   InstructionSet GetInstructionSet() const OVERRIDE {
-    return kMips;
+    return InstructionSet::kMips;
   }
 
   uint32_t AsBitmap() const OVERRIDE;
diff --git a/runtime/arch/mips/instruction_set_features_mips_test.cc b/runtime/arch/mips/instruction_set_features_mips_test.cc
index 54fd2c9..b7de952 100644
--- a/runtime/arch/mips/instruction_set_features_mips_test.cc
+++ b/runtime/arch/mips/instruction_set_features_mips_test.cc
@@ -23,9 +23,9 @@
 TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromDefaultVariant) {
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> mips_features(
-      InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
   ASSERT_TRUE(mips_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips_features->GetInstructionSet(), kMips);
+  EXPECT_EQ(mips_features->GetInstructionSet(), InstructionSet::kMips);
   EXPECT_TRUE(mips_features->Equals(mips_features.get()));
   EXPECT_STREQ("fpu32,mips2,-msa", mips_features->GetFeatureString().c_str());
   EXPECT_EQ(mips_features->AsBitmap(), 3U);
@@ -34,15 +34,15 @@
 TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR1Variant) {
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
-      InstructionSetFeatures::FromVariant(kMips, "mips32r1", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
   ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips32r1_features->GetInstructionSet(), kMips);
+  EXPECT_EQ(mips32r1_features->GetInstructionSet(), InstructionSet::kMips);
   EXPECT_TRUE(mips32r1_features->Equals(mips32r1_features.get()));
   EXPECT_STREQ("fpu32,-mips2,-msa", mips32r1_features->GetFeatureString().c_str());
   EXPECT_EQ(mips32r1_features->AsBitmap(), 1U);
 
   std::unique_ptr<const InstructionSetFeatures> mips_default_features(
-      InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
   ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
   EXPECT_FALSE(mips32r1_features->Equals(mips_default_features.get()));
 }
@@ -50,20 +50,20 @@
 TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR2Variant) {
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
-      InstructionSetFeatures::FromVariant(kMips, "mips32r2", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r2", &error_msg));
   ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips32r2_features->GetInstructionSet(), kMips);
+  EXPECT_EQ(mips32r2_features->GetInstructionSet(), InstructionSet::kMips);
   EXPECT_TRUE(mips32r2_features->Equals(mips32r2_features.get()));
   EXPECT_STREQ("fpu32,mips2,-msa", mips32r2_features->GetFeatureString().c_str());
   EXPECT_EQ(mips32r2_features->AsBitmap(), 3U);
 
   std::unique_ptr<const InstructionSetFeatures> mips_default_features(
-      InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
   ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
   EXPECT_TRUE(mips32r2_features->Equals(mips_default_features.get()));
 
   std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
-      InstructionSetFeatures::FromVariant(kMips, "mips32r1", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
   ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
   EXPECT_FALSE(mips32r2_features->Equals(mips32r1_features.get()));
 }
@@ -71,25 +71,25 @@
 TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR5Variant) {
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> mips32r5_features(
-      InstructionSetFeatures::FromVariant(kMips, "mips32r5", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r5", &error_msg));
   ASSERT_TRUE(mips32r5_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips32r5_features->GetInstructionSet(), kMips);
+  EXPECT_EQ(mips32r5_features->GetInstructionSet(), InstructionSet::kMips);
   EXPECT_TRUE(mips32r5_features->Equals(mips32r5_features.get()));
   EXPECT_STREQ("-fpu32,mips2,msa", mips32r5_features->GetFeatureString().c_str());
   EXPECT_EQ(mips32r5_features->AsBitmap(), 10U);
 
   std::unique_ptr<const InstructionSetFeatures> mips_default_features(
-      InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
   ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
   EXPECT_FALSE(mips32r5_features->Equals(mips_default_features.get()));
 
   std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
-      InstructionSetFeatures::FromVariant(kMips, "mips32r1", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
   ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
   EXPECT_FALSE(mips32r5_features->Equals(mips32r1_features.get()));
 
   std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
-      InstructionSetFeatures::FromVariant(kMips, "mips32r2", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r2", &error_msg));
   ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
   EXPECT_FALSE(mips32r5_features->Equals(mips32r2_features.get()));
 }
@@ -97,30 +97,30 @@
 TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR6Variant) {
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> mips32r6_features(
-      InstructionSetFeatures::FromVariant(kMips, "mips32r6", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r6", &error_msg));
   ASSERT_TRUE(mips32r6_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips32r6_features->GetInstructionSet(), kMips);
+  EXPECT_EQ(mips32r6_features->GetInstructionSet(), InstructionSet::kMips);
   EXPECT_TRUE(mips32r6_features->Equals(mips32r6_features.get()));
   EXPECT_STREQ("-fpu32,mips2,r6,msa", mips32r6_features->GetFeatureString().c_str());
   EXPECT_EQ(mips32r6_features->AsBitmap(), 14U);
 
   std::unique_ptr<const InstructionSetFeatures> mips_default_features(
-      InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
   ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
   EXPECT_FALSE(mips32r6_features->Equals(mips_default_features.get()));
 
   std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
-      InstructionSetFeatures::FromVariant(kMips, "mips32r1", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
   ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
   EXPECT_FALSE(mips32r6_features->Equals(mips32r1_features.get()));
 
   std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
-      InstructionSetFeatures::FromVariant(kMips, "mips32r2", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r2", &error_msg));
   ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
   EXPECT_FALSE(mips32r6_features->Equals(mips32r2_features.get()));
 
   std::unique_ptr<const InstructionSetFeatures> mips32r5_features(
-      InstructionSetFeatures::FromVariant(kMips, "mips32r5", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r5", &error_msg));
   ASSERT_TRUE(mips32r5_features.get() != nullptr) << error_msg;
   EXPECT_FALSE(mips32r6_features->Equals(mips32r5_features.get()));
 }
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index 9d77ebc..bdce520 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -52,7 +52,7 @@
   // get the method from the top of the stack.  However it's in r0.
   uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr);  // BVA addr
   uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
-      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kMips64));
+      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kMips64));
   if (overflow_addr == fault_addr) {
     *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips64::A0]);
   } else {
@@ -126,7 +126,7 @@
   VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
     ", fault_addr: " << fault_addr;
 
-  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kMips64);
+  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kMips64);
 
   // Check that the fault address is the value expected for a stack overflow.
   if (fault_addr != overflow_addr) {
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
index 08d0bac..ea9f84b 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64.cc
@@ -89,7 +89,7 @@
 }
 
 bool Mips64InstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
-  if (kMips64 != other->GetInstructionSet()) {
+  if (InstructionSet::kMips64 != other->GetInstructionSet()) {
     return false;
   }
   const Mips64InstructionSetFeatures* other_as_mips64 = other->AsMips64InstructionSetFeatures();
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
index d9f30c7..27e544e 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ b/runtime/arch/mips64/instruction_set_features_mips64.h
@@ -51,7 +51,7 @@
   bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
 
   InstructionSet GetInstructionSet() const OVERRIDE {
-    return kMips64;
+    return InstructionSet::kMips64;
   }
 
   uint32_t AsBitmap() const OVERRIDE;
diff --git a/runtime/arch/mips64/instruction_set_features_mips64_test.cc b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
index 0ba0bd4..933dc66 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64_test.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
@@ -23,9 +23,9 @@
 TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromDefaultVariant) {
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> mips64_features(
-      InstructionSetFeatures::FromVariant(kMips64, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips64, "default", &error_msg));
   ASSERT_TRUE(mips64_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips64_features->GetInstructionSet(), kMips64);
+  EXPECT_EQ(mips64_features->GetInstructionSet(), InstructionSet::kMips64);
   EXPECT_TRUE(mips64_features->Equals(mips64_features.get()));
   EXPECT_STREQ("msa", mips64_features->GetFeatureString().c_str());
   EXPECT_EQ(mips64_features->AsBitmap(), 1U);
@@ -34,15 +34,15 @@
 TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromR6Variant) {
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> mips64r6_features(
-      InstructionSetFeatures::FromVariant(kMips64, "mips64r6", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips64, "mips64r6", &error_msg));
   ASSERT_TRUE(mips64r6_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips64r6_features->GetInstructionSet(), kMips64);
+  EXPECT_EQ(mips64r6_features->GetInstructionSet(), InstructionSet::kMips64);
   EXPECT_TRUE(mips64r6_features->Equals(mips64r6_features.get()));
   EXPECT_STREQ("msa", mips64r6_features->GetFeatureString().c_str());
   EXPECT_EQ(mips64r6_features->AsBitmap(), 1U);
 
   std::unique_ptr<const InstructionSetFeatures> mips64_default_features(
-      InstructionSetFeatures::FromVariant(kMips64, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kMips64, "default", &error_msg));
   ASSERT_TRUE(mips64_default_features.get() != nullptr) << error_msg;
   EXPECT_TRUE(mips64r6_features->Equals(mips64_default_features.get()));
 }
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 349ce3b..527332f 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -291,9 +291,9 @@
   uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr);
   uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
 #if defined(__x86_64__)
-      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86_64));
+      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kX86_64));
 #else
-      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86));
+      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kX86));
 #endif
   if (overflow_addr == fault_addr) {
     *out_method = reinterpret_cast<ArtMethod*>(uc->CTX_METHOD);
@@ -445,9 +445,9 @@
     ", fault_addr: " << fault_addr;
 
 #if defined(__x86_64__)
-  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kX86_64);
+  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kX86_64);
 #else
-  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kX86);
+  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kX86);
 #endif
 
   // Check that the fault address is the value expected for a stack overflow.
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 56cb07e..57cf4b2 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -54,7 +54,7 @@
   bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
 
   virtual InstructionSet GetInstructionSet() const OVERRIDE {
-    return kX86;
+    return InstructionSet::kX86;
   }
 
   uint32_t AsBitmap() const OVERRIDE;
diff --git a/runtime/arch/x86/instruction_set_features_x86_test.cc b/runtime/arch/x86/instruction_set_features_x86_test.cc
index c67b4dd..33eac0f 100644
--- a/runtime/arch/x86/instruction_set_features_x86_test.cc
+++ b/runtime/arch/x86/instruction_set_features_x86_test.cc
@@ -23,9 +23,9 @@
 TEST(X86InstructionSetFeaturesTest, X86FeaturesFromDefaultVariant) {
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> x86_features(
-      InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86, "default", &error_msg));
   ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+  EXPECT_EQ(x86_features->GetInstructionSet(), InstructionSet::kX86);
   EXPECT_TRUE(x86_features->Equals(x86_features.get()));
   EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
                x86_features->GetFeatureString().c_str());
@@ -36,9 +36,9 @@
   // Build features for a 32-bit x86 atom processor.
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> x86_features(
-      InstructionSetFeatures::FromVariant(kX86, "atom", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86, "atom", &error_msg));
   ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+  EXPECT_EQ(x86_features->GetInstructionSet(), InstructionSet::kX86);
   EXPECT_TRUE(x86_features->Equals(x86_features.get()));
   EXPECT_STREQ("ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
                x86_features->GetFeatureString().c_str());
@@ -46,9 +46,9 @@
 
   // Build features for a 32-bit x86 default processor.
   std::unique_ptr<const InstructionSetFeatures> x86_default_features(
-      InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86, "default", &error_msg));
   ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
+  EXPECT_EQ(x86_default_features->GetInstructionSet(), InstructionSet::kX86);
   EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
   EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
                x86_default_features->GetFeatureString().c_str());
@@ -56,9 +56,9 @@
 
   // Build features for a 64-bit x86-64 atom processor.
   std::unique_ptr<const InstructionSetFeatures> x86_64_features(
-      InstructionSetFeatures::FromVariant(kX86_64, "atom", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86_64, "atom", &error_msg));
   ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+  EXPECT_EQ(x86_64_features->GetInstructionSet(), InstructionSet::kX86_64);
   EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
   EXPECT_STREQ("ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
                x86_64_features->GetFeatureString().c_str());
@@ -73,9 +73,9 @@
   // Build features for a 32-bit x86 sandybridge processor.
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> x86_features(
-      InstructionSetFeatures::FromVariant(kX86, "sandybridge", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86, "sandybridge", &error_msg));
   ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+  EXPECT_EQ(x86_features->GetInstructionSet(), InstructionSet::kX86);
   EXPECT_TRUE(x86_features->Equals(x86_features.get()));
   EXPECT_STREQ("ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt",
                x86_features->GetFeatureString().c_str());
@@ -83,9 +83,9 @@
 
   // Build features for a 32-bit x86 default processor.
   std::unique_ptr<const InstructionSetFeatures> x86_default_features(
-      InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86, "default", &error_msg));
   ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
+  EXPECT_EQ(x86_default_features->GetInstructionSet(), InstructionSet::kX86);
   EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
   EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
                x86_default_features->GetFeatureString().c_str());
@@ -93,9 +93,9 @@
 
   // Build features for a 64-bit x86-64 sandybridge processor.
   std::unique_ptr<const InstructionSetFeatures> x86_64_features(
-      InstructionSetFeatures::FromVariant(kX86_64, "sandybridge", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86_64, "sandybridge", &error_msg));
   ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+  EXPECT_EQ(x86_64_features->GetInstructionSet(), InstructionSet::kX86_64);
   EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
   EXPECT_STREQ("ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt",
                x86_64_features->GetFeatureString().c_str());
@@ -110,9 +110,9 @@
   // Build features for a 32-bit x86 silvermont processor.
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> x86_features(
-      InstructionSetFeatures::FromVariant(kX86, "silvermont", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86, "silvermont", &error_msg));
   ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+  EXPECT_EQ(x86_features->GetInstructionSet(), InstructionSet::kX86);
   EXPECT_TRUE(x86_features->Equals(x86_features.get()));
   EXPECT_STREQ("ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt",
                x86_features->GetFeatureString().c_str());
@@ -120,9 +120,9 @@
 
   // Build features for a 32-bit x86 default processor.
   std::unique_ptr<const InstructionSetFeatures> x86_default_features(
-      InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86, "default", &error_msg));
   ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
+  EXPECT_EQ(x86_default_features->GetInstructionSet(), InstructionSet::kX86);
   EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
   EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
                x86_default_features->GetFeatureString().c_str());
@@ -130,9 +130,9 @@
 
   // Build features for a 64-bit x86-64 silvermont processor.
   std::unique_ptr<const InstructionSetFeatures> x86_64_features(
-      InstructionSetFeatures::FromVariant(kX86_64, "silvermont", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86_64, "silvermont", &error_msg));
   ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+  EXPECT_EQ(x86_64_features->GetInstructionSet(), InstructionSet::kX86_64);
   EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
   EXPECT_STREQ("ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt",
                x86_64_features->GetFeatureString().c_str());
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
index 83f4093..e76490b 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64.h
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -60,7 +60,7 @@
   }
 
   InstructionSet GetInstructionSet() const OVERRIDE {
-    return kX86_64;
+    return InstructionSet::kX86_64;
   }
 
   virtual ~X86_64InstructionSetFeatures() {}
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc b/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
index 3c2ceac..2b307da 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
@@ -23,9 +23,9 @@
 TEST(X86_64InstructionSetFeaturesTest, X86Features) {
   std::string error_msg;
   std::unique_ptr<const InstructionSetFeatures> x86_64_features(
-      InstructionSetFeatures::FromVariant(kX86_64, "default", &error_msg));
+      InstructionSetFeatures::FromVariant(InstructionSet::kX86_64, "default", &error_msg));
   ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+  EXPECT_EQ(x86_64_features->GetInstructionSet(), InstructionSet::kX86_64);
   EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
   EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt",
                x86_64_features->GetFeatureString().c_str());
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 09eae40..d8621cc 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -47,7 +47,7 @@
 class QuasiAtomic {
   static constexpr bool NeedSwapMutexes(InstructionSet isa) {
     // TODO - mips64 still need this for Cas64 ???
-    return (isa == kMips) || (isa == kMips64);
+    return (isa == InstructionSet::kMips) || (isa == InstructionSet::kMips64);
   }
 
  public:
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 008880c..2408043 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -1021,7 +1021,7 @@
 void Locks::Init() {
   if (logging_lock_ != nullptr) {
     // Already initialized.
-    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
+    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
       DCHECK(modify_ldt_lock_ != nullptr);
     } else {
       DCHECK(modify_ldt_lock_ == nullptr);
@@ -1126,7 +1126,7 @@
     DCHECK(allocated_thread_ids_lock_ == nullptr);
     allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
 
-    if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
+    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
       UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
       DCHECK(modify_ldt_lock_ == nullptr);
       modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index f15acf9..0515bc9 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -334,26 +334,26 @@
 
 std::string CommonRuntimeTestImpl::GetAndroidTargetToolsDir(InstructionSet isa) {
   switch (isa) {
-    case kArm:
-    case kThumb2:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
       return GetAndroidToolsDir("prebuilts/gcc/linux-x86/arm",
                                 "arm-linux-androideabi",
                                 "arm-linux-androideabi");
-    case kArm64:
+    case InstructionSet::kArm64:
       return GetAndroidToolsDir("prebuilts/gcc/linux-x86/aarch64",
                                 "aarch64-linux-android",
                                 "aarch64-linux-android");
-    case kX86:
-    case kX86_64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
       return GetAndroidToolsDir("prebuilts/gcc/linux-x86/x86",
                                 "x86_64-linux-android",
                                 "x86_64-linux-android");
-    case kMips:
-    case kMips64:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
       return GetAndroidToolsDir("prebuilts/gcc/linux-x86/mips",
                                 "mips64el-linux-android",
                                 "mips64el-linux-android");
-    case kNone:
+    case InstructionSet::kNone:
       break;
   }
   ADD_FAILURE() << "Invalid isa " << isa;
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index e2131f1..5be8d5b 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -253,13 +253,13 @@
   }
 
 #define TEST_DISABLED_FOR_MIPS() \
-  if (kRuntimeISA == kMips) { \
+  if (kRuntimeISA == InstructionSet::kMips) { \
     printf("WARNING: TEST DISABLED FOR MIPS\n"); \
     return; \
   }
 
 #define TEST_DISABLED_FOR_X86() \
-  if (kRuntimeISA == kX86) { \
+  if (kRuntimeISA == InstructionSet::kX86) { \
     printf("WARNING: TEST DISABLED FOR X86\n"); \
     return; \
   }
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 1e77753..cd52bb6 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -433,7 +433,8 @@
 static bool IsValidReadBarrierImplicitCheck(uintptr_t addr) {
   DCHECK(kEmitCompilerReadBarrier);
   uint32_t monitor_offset = mirror::Object::MonitorOffset().Uint32Value();
-  if (kUseBakerReadBarrier && (kRuntimeISA == kX86 || kRuntimeISA == kX86_64)) {
+  if (kUseBakerReadBarrier &&
+      (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64)) {
     constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
     monitor_offset += gray_byte_position;
   }
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 69e3fc1..ef27ca3 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -68,28 +68,50 @@
 };
 
 static constexpr size_t GetCalleeSaveFrameSize(InstructionSet isa, CalleeSaveType type) {
-  // constexpr must be a return statement.
-  return (isa == kArm || isa == kThumb2) ? arm::ArmCalleeSaveFrameSize(type) :
-         isa == kArm64 ? arm64::Arm64CalleeSaveFrameSize(type) :
-         isa == kMips ? mips::MipsCalleeSaveFrameSize(type) :
-         isa == kMips64 ? mips64::Mips64CalleeSaveFrameSize(type) :
-         isa == kX86 ? x86::X86CalleeSaveFrameSize(type) :
-         isa == kX86_64 ? x86_64::X86_64CalleeSaveFrameSize(type) :
-         isa == kNone ? (LOG(FATAL) << "kNone has no frame size", 0) :
-         (LOG(FATAL) << "Unknown instruction set" << isa, 0);
+  switch (isa) {
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
+      return arm::ArmCalleeSaveFrameSize(type);
+    case InstructionSet::kArm64:
+      return arm64::Arm64CalleeSaveFrameSize(type);
+    case InstructionSet::kMips:
+      return mips::MipsCalleeSaveFrameSize(type);
+    case InstructionSet::kMips64:
+      return mips64::Mips64CalleeSaveFrameSize(type);
+    case InstructionSet::kX86:
+      return x86::X86CalleeSaveFrameSize(type);
+    case InstructionSet::kX86_64:
+      return x86_64::X86_64CalleeSaveFrameSize(type);
+    case InstructionSet::kNone:
+      LOG(FATAL) << "kNone has no frame size";
+      UNREACHABLE();
+  }
+  LOG(FATAL) << "Unknown ISA " << isa;
+  UNREACHABLE();
 }
 
 // Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
 static constexpr PointerSize GetConstExprPointerSize(InstructionSet isa) {
-  // constexpr must be a return statement.
-  return (isa == kArm || isa == kThumb2) ? kArmPointerSize :
-         isa == kArm64 ? kArm64PointerSize :
-         isa == kMips ? kMipsPointerSize :
-         isa == kMips64 ? kMips64PointerSize :
-         isa == kX86 ? kX86PointerSize :
-         isa == kX86_64 ? kX86_64PointerSize :
-         isa == kNone ? (LOG(FATAL) << "kNone has no pointer size", PointerSize::k32) :
-         (LOG(FATAL) << "Unknown instruction set" << isa, PointerSize::k32);
+  switch (isa) {
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
+      return kArmPointerSize;
+    case InstructionSet::kArm64:
+      return kArm64PointerSize;
+    case InstructionSet::kMips:
+      return kMipsPointerSize;
+    case InstructionSet::kMips64:
+      return kMips64PointerSize;
+    case InstructionSet::kX86:
+      return kX86PointerSize;
+    case InstructionSet::kX86_64:
+      return kX86_64PointerSize;
+    case InstructionSet::kNone:
+      LOG(FATAL) << "kNone has no pointer size";
+      UNREACHABLE();
+  }
+  LOG(FATAL) << "Unknown ISA " << isa;
+  UNREACHABLE();
 }
 
 // Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 6bb67a3..a8d2a34 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -232,7 +232,7 @@
     }
     switch (return_shorty_char) {
       case 'F': {
-        if (kRuntimeISA == kX86) {
+        if (kRuntimeISA == InstructionSet::kX86) {
           // Convert back the result to float.
           double d = bit_cast<double, uint64_t>(result_f);
           return bit_cast<uint32_t, float>(static_cast<float>(d));
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index b692618..77b3132 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -103,23 +103,29 @@
                  GetCalleeSaveFrameSize(                                             \
                      isa, CalleeSaveType::kSaveEverythingForSuspendCheck))
 
-  CHECK_FRAME_SIZE(kArm);
-  CHECK_FRAME_SIZE(kArm64);
-  CHECK_FRAME_SIZE(kMips);
-  CHECK_FRAME_SIZE(kMips64);
-  CHECK_FRAME_SIZE(kX86);
-  CHECK_FRAME_SIZE(kX86_64);
+  CHECK_FRAME_SIZE(InstructionSet::kArm);
+  CHECK_FRAME_SIZE(InstructionSet::kArm64);
+  CHECK_FRAME_SIZE(InstructionSet::kMips);
+  CHECK_FRAME_SIZE(InstructionSet::kMips64);
+  CHECK_FRAME_SIZE(InstructionSet::kX86);
+  CHECK_FRAME_SIZE(InstructionSet::kX86_64);
 }
 
 // This test ensures that GetConstExprPointerSize is correct with respect to
 // GetInstructionSetPointerSize.
 TEST_F(QuickTrampolineEntrypointsTest, PointerSize) {
-  EXPECT_EQ(GetInstructionSetPointerSize(kArm), GetConstExprPointerSize(kArm));
-  EXPECT_EQ(GetInstructionSetPointerSize(kArm64), GetConstExprPointerSize(kArm64));
-  EXPECT_EQ(GetInstructionSetPointerSize(kMips), GetConstExprPointerSize(kMips));
-  EXPECT_EQ(GetInstructionSetPointerSize(kMips64), GetConstExprPointerSize(kMips64));
-  EXPECT_EQ(GetInstructionSetPointerSize(kX86), GetConstExprPointerSize(kX86));
-  EXPECT_EQ(GetInstructionSetPointerSize(kX86_64), GetConstExprPointerSize(kX86_64));
+  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kArm),
+            GetConstExprPointerSize(InstructionSet::kArm));
+  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kArm64),
+            GetConstExprPointerSize(InstructionSet::kArm64));
+  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kMips),
+            GetConstExprPointerSize(InstructionSet::kMips));
+  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kMips64),
+            GetConstExprPointerSize(InstructionSet::kMips64));
+  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kX86),
+            GetConstExprPointerSize(InstructionSet::kX86));
+  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kX86_64),
+            GetConstExprPointerSize(InstructionSet::kX86_64));
 }
 
 // This test ensures that the constexpr specialization of the return PC offset computation in
diff --git a/runtime/gc/space/image_space_fs.h b/runtime/gc/space/image_space_fs.h
index bdb2eda..a0ecb95 100644
--- a/runtime/gc/space/image_space_fs.h
+++ b/runtime/gc/space/image_space_fs.h
@@ -89,7 +89,7 @@
 // Adapted from prune_dex_cache(const char* subdir) in frameworks/native/cmds/installd/commands.c
 // Note this should only be used during first boot.
 static void PruneDalvikCache(InstructionSet isa) {
-  CHECK_NE(isa, kNone);
+  CHECK_NE(isa, InstructionSet::kNone);
   // Prune the base /data/dalvik-cache.
   // Note: GetDalvikCache may return the empty string if the directory doesn't
   // exist. It is safe to pass "" to DeleteDirectoryContents, so this is okay.
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 47615f5..e180752 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1223,8 +1223,8 @@
 }
 
 OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
-  static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
-  if (kRuntimeISA == kArm) {
+  static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
+  if (kRuntimeISA == InstructionSet::kArm) {
     // On Thumb-2, the pc is offset by one.
     --pc;
   }
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 7d9d8be..2235563 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -473,7 +473,7 @@
   }
 
   const InstructionSet target_instruction_set = GetInstructionSetFromString(instruction_set);
-  if (target_instruction_set == kNone) {
+  if (target_instruction_set == InstructionSet::kNone) {
     ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
     std::string message(StringPrintf("Instruction set %s is invalid.", instruction_set));
     env->ThrowNew(iae.get(), message.c_str());
@@ -533,7 +533,7 @@
 
   const InstructionSet target_instruction_set = GetInstructionSetFromString(
       instruction_set.c_str());
-  if (target_instruction_set == kNone) {
+  if (target_instruction_set == InstructionSet::kNone) {
     ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
     std::string message(StringPrintf("Instruction set %s is invalid.", instruction_set.c_str()));
     env->ThrowNew(iae.get(), message.c_str());
@@ -706,7 +706,7 @@
 
   const InstructionSet target_instruction_set = GetInstructionSetFromString(
       instruction_set.c_str());
-  if (target_instruction_set == kNone) {
+  if (target_instruction_set == InstructionSet::kNone) {
     ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
     std::string message(StringPrintf("Instruction set %s is invalid.", instruction_set.c_str()));
     env->ThrowNew(iae.get(), message.c_str());
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 0bbd1ec..2d1f886 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -624,7 +624,7 @@
     return JNI_FALSE;
   }
   InstructionSet isa = GetInstructionSetFromString(instruction_set.c_str());
-  if (isa == kNone) {
+  if (isa == InstructionSet::kNone) {
     ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
     std::string message(StringPrintf("Instruction set %s is invalid.", instruction_set.c_str()));
     env->ThrowNew(iae.get(), message.c_str());
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index b436e0d..a7bee39 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -332,7 +332,7 @@
     ScopedUtfChars isa_string(env, instruction_set);
     InstructionSet isa = GetInstructionSetFromString(isa_string.c_str());
     Runtime::NativeBridgeAction action = Runtime::NativeBridgeAction::kUnload;
-    if (isa != kNone && isa != kRuntimeISA) {
+    if (isa != InstructionSet::kNone && isa != kRuntimeISA) {
       action = Runtime::NativeBridgeAction::kInitialize;
     }
     Runtime::Current()->InitNonZygoteOrPostFork(
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 21e20e9..39dc8da 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -94,7 +94,7 @@
   memcpy(magic_, kOatMagic, sizeof(kOatMagic));
   memcpy(version_, kOatVersion, sizeof(kOatVersion));
 
-  CHECK_NE(instruction_set, kNone);
+  CHECK_NE(instruction_set, InstructionSet::kNone);
 
   // Flatten the map. Will also update variable_size_data_size_.
   Flatten(variable_data);
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 0f74ca4..44d8bd8 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -467,7 +467,7 @@
 
   // In a properly constructed OatFileAssistant object, isa_ should be either
   // the 32 or 64 bit variant for the current device.
-  const InstructionSet isa_ = kNone;
+  const InstructionSet isa_ = InstructionSet::kNone;
 
   // Whether we will attempt to load oat files executable.
   bool load_executable_ = false;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 6d14971..d12c331 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -1328,17 +1328,17 @@
   std::string odex_file;
 
   EXPECT_TRUE(OatFileAssistant::DexLocationToOdexFilename(
-        "/foo/bar/baz.jar", kArm, &odex_file, &error_msg)) << error_msg;
+        "/foo/bar/baz.jar", InstructionSet::kArm, &odex_file, &error_msg)) << error_msg;
   EXPECT_EQ("/foo/bar/oat/arm/baz.odex", odex_file);
 
   EXPECT_TRUE(OatFileAssistant::DexLocationToOdexFilename(
-        "/foo/bar/baz.funnyext", kArm, &odex_file, &error_msg)) << error_msg;
+        "/foo/bar/baz.funnyext", InstructionSet::kArm, &odex_file, &error_msg)) << error_msg;
   EXPECT_EQ("/foo/bar/oat/arm/baz.odex", odex_file);
 
   EXPECT_FALSE(OatFileAssistant::DexLocationToOdexFilename(
-        "nopath.jar", kArm, &odex_file, &error_msg));
+        "nopath.jar", InstructionSet::kArm, &odex_file, &error_msg));
   EXPECT_FALSE(OatFileAssistant::DexLocationToOdexFilename(
-        "/foo/bar/baz_noext", kArm, &odex_file, &error_msg));
+        "/foo/bar/baz_noext", InstructionSet::kArm, &odex_file, &error_msg));
 }
 
 // Verify the dexopt status values from dalvik.system.DexFile
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 3625b9e..4443255 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -136,8 +136,8 @@
 
   bool Contains(uintptr_t pc) const {
     uintptr_t code_start = reinterpret_cast<uintptr_t>(code_);
-    static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
-    if (kRuntimeISA == kArm) {
+    static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
+    if (kRuntimeISA == InstructionSet::kArm) {
       // On Thumb-2, the pc is offset by one.
       code_start++;
     }
@@ -149,8 +149,8 @@
     // (not `kThumb2`), *but* we always generate code for the Thumb-2
     // instruction set anyway. Thumb-2 requires the entrypoint to be of
     // offset 1.
-    static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
-    return (kRuntimeISA == kArm)
+    static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
+    return (kRuntimeISA == InstructionSet::kArm)
         ? reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(code_) | 1)
         : code_;
   }
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 526f6d1..cc09a77 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -369,7 +369,7 @@
     } else if (option == "imageinstructionset") {
       const char* isa_str = reinterpret_cast<const char*>(options[i].second);
       auto&& image_isa = GetInstructionSetFromString(isa_str);
-      if (image_isa == kNone) {
+      if (image_isa == InstructionSet::kNone) {
         Usage("%s is not a valid instruction set.", isa_str);
         return false;
       }
diff --git a/runtime/prebuilt_tools_test.cc b/runtime/prebuilt_tools_test.cc
index c2b34c8..6fa9b34 100644
--- a/runtime/prebuilt_tools_test.cc
+++ b/runtime/prebuilt_tools_test.cc
@@ -50,7 +50,7 @@
 
 TEST_F(PrebuiltToolsTest, CheckTargetTools) {
   // Other prebuilts are missing from the build server's repo manifest.
-  InstructionSet isas[] = { kThumb2 };  // NOLINT
+  InstructionSet isas[] = { InstructionSet::kThumb2 };  // NOLINT
   for (InstructionSet isa : isas) {
     std::string tools_dir = GetAndroidTargetToolsDir(isa);
     if (tools_dir.empty()) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 139de2b..f09b6c9 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -208,7 +208,7 @@
     : resolution_method_(nullptr),
       imt_conflict_method_(nullptr),
       imt_unimplemented_method_(nullptr),
-      instruction_set_(kNone),
+      instruction_set_(InstructionSet::kNone),
       compiler_callbacks_(nullptr),
       is_zygote_(false),
       must_relocate_(false),
@@ -1252,13 +1252,13 @@
 
   // Change the implicit checks flags based on runtime architecture.
   switch (kRuntimeISA) {
-    case kArm:
-    case kThumb2:
-    case kX86:
-    case kArm64:
-    case kX86_64:
-    case kMips:
-    case kMips64:
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
+    case InstructionSet::kX86:
+    case InstructionSet::kArm64:
+    case InstructionSet::kX86_64:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
       implicit_null_checks_ = true;
       // Installing stack protection does not play well with valgrind.
       implicit_so_checks_ = !(RUNNING_ON_MEMORY_TOOL && kMemoryToolIsValgrind);
@@ -1969,7 +1969,7 @@
   auto* method = CreateRuntimeMethod(GetClassLinker(), GetLinearAlloc());
   PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
   method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
-  DCHECK_NE(instruction_set_, kNone);
+  DCHECK_NE(instruction_set_, InstructionSet::kNone);
   DCHECK(method->IsRuntimeMethod());
   return method;
 }
@@ -2026,32 +2026,32 @@
 
 void Runtime::SetInstructionSet(InstructionSet instruction_set) {
   instruction_set_ = instruction_set;
-  if ((instruction_set_ == kThumb2) || (instruction_set_ == kArm)) {
+  if ((instruction_set_ == InstructionSet::kThumb2) || (instruction_set_ == InstructionSet::kArm)) {
     for (int i = 0; i != kCalleeSaveSize; ++i) {
       CalleeSaveType type = static_cast<CalleeSaveType>(i);
       callee_save_method_frame_infos_[i] = arm::ArmCalleeSaveMethodFrameInfo(type);
     }
-  } else if (instruction_set_ == kMips) {
+  } else if (instruction_set_ == InstructionSet::kMips) {
     for (int i = 0; i != kCalleeSaveSize; ++i) {
       CalleeSaveType type = static_cast<CalleeSaveType>(i);
       callee_save_method_frame_infos_[i] = mips::MipsCalleeSaveMethodFrameInfo(type);
     }
-  } else if (instruction_set_ == kMips64) {
+  } else if (instruction_set_ == InstructionSet::kMips64) {
     for (int i = 0; i != kCalleeSaveSize; ++i) {
       CalleeSaveType type = static_cast<CalleeSaveType>(i);
       callee_save_method_frame_infos_[i] = mips64::Mips64CalleeSaveMethodFrameInfo(type);
     }
-  } else if (instruction_set_ == kX86) {
+  } else if (instruction_set_ == InstructionSet::kX86) {
     for (int i = 0; i != kCalleeSaveSize; ++i) {
       CalleeSaveType type = static_cast<CalleeSaveType>(i);
       callee_save_method_frame_infos_[i] = x86::X86CalleeSaveMethodFrameInfo(type);
     }
-  } else if (instruction_set_ == kX86_64) {
+  } else if (instruction_set_ == InstructionSet::kX86_64) {
     for (int i = 0; i != kCalleeSaveSize; ++i) {
       CalleeSaveType type = static_cast<CalleeSaveType>(i);
       callee_save_method_frame_infos_[i] = x86_64::X86_64CalleeSaveMethodFrameInfo(type);
     }
-  } else if (instruction_set_ == kArm64) {
+  } else if (instruction_set_ == InstructionSet::kArm64) {
     for (int i = 0; i != kCalleeSaveSize; ++i) {
       CalleeSaveType type = static_cast<CalleeSaveType>(i);
       callee_save_method_frame_infos_[i] = arm64::Arm64CalleeSaveMethodFrameInfo(type);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 065b6e2..712eabc 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -155,7 +155,7 @@
 }
 
 void Thread::ResetQuickAllocEntryPointsForThread(bool is_marking) {
-  if (kUseReadBarrier && kRuntimeISA != kX86_64) {
+  if (kUseReadBarrier && kRuntimeISA != InstructionSet::kX86_64) {
     // Allocation entrypoint switching is currently only implemented for X86_64.
     is_marking = true;
   }
@@ -1114,7 +1114,7 @@
   // effectively disable stack overflow checks (we'll get segfaults, potentially) by setting
   // stack_begin to 0.
   const bool valgrind_on_arm =
-      (kRuntimeISA == kArm || kRuntimeISA == kArm64) &&
+      (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kArm64) &&
       kMemoryToolIsValgrind &&
       RUNNING_ON_MEMORY_TOOL != 0;
   if (valgrind_on_arm) {
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index efb20ba..1dc4687 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -292,7 +292,7 @@
 
 TEST_F(UtilsTest, GetSystemImageFilename) {
   EXPECT_STREQ("/system/framework/arm/boot.art",
-               GetSystemImageFilename("/system/framework/boot.art", kArm).c_str());
+               GetSystemImageFilename("/system/framework/boot.art", InstructionSet::kArm).c_str());
 }
 
 TEST_F(UtilsTest, ExecSuccess) {
diff --git a/simulator/code_simulator.cc b/simulator/code_simulator.cc
index e653dfc..c04ab1c 100644
--- a/simulator/code_simulator.cc
+++ b/simulator/code_simulator.cc
@@ -22,7 +22,7 @@
 
 CodeSimulator* CodeSimulator::CreateCodeSimulator(InstructionSet target_isa) {
   switch (target_isa) {
-    case kArm64:
+    case InstructionSet::kArm64:
       return arm64::CodeSimulatorArm64::CreateCodeSimulatorArm64();
     default:
       return nullptr;
diff --git a/simulator/code_simulator_arm64.h b/simulator/code_simulator_arm64.h
index 0542593..8b66529 100644
--- a/simulator/code_simulator_arm64.h
+++ b/simulator/code_simulator_arm64.h
@@ -25,6 +25,7 @@
 #include "aarch64/simulator-aarch64.h"
 #pragma GCC diagnostic pop
 
+#include "arch/instruction_set.h"
 #include "code_simulator.h"
 
 namespace art {
@@ -48,7 +49,7 @@
   vixl::aarch64::Simulator* simulator_;
 
   // TODO: Enable CodeSimulatorArm64 for more host ISAs once Simulator supports them.
-  static constexpr bool kCanSimulate = (kRuntimeISA == kX86_64);
+  static constexpr bool kCanSimulate = (kRuntimeISA == InstructionSet::kX86_64);
 
   DISALLOW_COPY_AND_ASSIGN(CodeSimulatorArm64);
 };