diff options
author | 2017-11-01 14:35:42 +0000 | |
---|---|---|
committer | 2017-11-02 10:11:02 +0000 | |
commit | 33bff25bcd7a02d35c54f63740eadb1a4833fc92 (patch) | |
tree | 553db4f60878acf2a0fa7036a739d406df9a29b7 /compiler | |
parent | 321b3ca9a36d769283c64d4bdee0798db80af524 (diff) |
ART: Make InstructionSet an enum class and add kLast.
Adding InstructionSet::kLast shall make it easier to encode
the InstructionSet in fewer bits using BitField<>. However,
introducing `kLast` into the `art` namespace is not a good
idea, so we change the InstructionSet to an enum class.
This also uncovered a case of InstructionSet::kNone being
erroneously used instead of vixl32::Condition::None(), so
it's good to remove `kNone` from the `art` namespace.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Change-Id: I6fa6168dfba4ed6da86d021a69c80224f09997a6
Diffstat (limited to 'compiler')
56 files changed, 299 insertions, 275 deletions
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h index 866a4d57a7..29ff235cea 100644 --- a/compiler/cfi_test.h +++ b/compiler/cfi_test.h @@ -68,7 +68,7 @@ class CFITest : public dwarf::DwarfTest { : &Thread::DumpThreadOffset<PointerSize::k32>); std::unique_ptr<Disassembler> disasm(Disassembler::Create(isa, opts)); std::stringstream stream; - const uint8_t* base = actual_asm.data() + (isa == kThumb2 ? 1 : 0); + const uint8_t* base = actual_asm.data() + (isa == InstructionSet::kThumb2 ? 1 : 0); disasm->Dump(stream, base, base + actual_asm.size()); ReformatAsm(&stream, &lines); // Print CFI and assembly interleaved. diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc index 111469fe89..fc6a717aa6 100644 --- a/compiler/compiled_method.cc +++ b/compiler/compiled_method.cc @@ -61,14 +61,14 @@ size_t CompiledCode::CodeDelta() const { size_t CompiledCode::CodeDelta(InstructionSet instruction_set) { switch (instruction_set) { - case kArm: - case kArm64: - case kMips: - case kMips64: - case kX86: - case kX86_64: + case InstructionSet::kArm: + case InstructionSet::kArm64: + case InstructionSet::kMips: + case InstructionSet::kMips64: + case InstructionSet::kX86: + case InstructionSet::kX86_64: return 0; - case kThumb2: { + case InstructionSet::kThumb2: { // +1 to set the low-order bit so a BLX will switch to Thumb mode return 1; } @@ -80,14 +80,14 @@ size_t CompiledCode::CodeDelta(InstructionSet instruction_set) { const void* CompiledCode::CodePointer(const void* code_pointer, InstructionSet instruction_set) { switch (instruction_set) { - case kArm: - case kArm64: - case kMips: - case kMips64: - case kX86: - case kX86_64: + case InstructionSet::kArm: + case InstructionSet::kArm64: + case InstructionSet::kMips: + case InstructionSet::kMips64: + case InstructionSet::kX86: + case InstructionSet::kX86_64: return code_pointer; - case kThumb2: { + case InstructionSet::kThumb2: { uintptr_t address = reinterpret_cast<uintptr_t>(code_pointer); // Set the low-order bit so a BLX will switch to Thumb mode address |= 0x1; diff --git a/compiler/debug/dwarf/dwarf_test.h b/compiler/debug/dwarf/dwarf_test.h index b30ff143d3..5405759c1f 100644 --- a/compiler/debug/dwarf/dwarf_test.h +++ b/compiler/debug/dwarf/dwarf_test.h @@ -60,7 +60,8 @@ class DwarfTest : public CommonRuntimeTest { template<typename ElfTypes> std::vector<std::string> Objdump(const char* args) { // Write simple elf file with just the DWARF sections. - InstructionSet isa = (sizeof(typename ElfTypes::Addr) == 8) ? kX86_64 : kX86; + InstructionSet isa = + (sizeof(typename ElfTypes::Addr) == 8) ? InstructionSet::kX86_64 : InstructionSet::kX86; ScratchFile file; linker::FileOutputStream output_stream(file.GetFile()); linker::ElfBuilder<ElfTypes> builder(isa, nullptr, &output_stream); diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h index 6dacdfa48c..d0c98a7b79 100644 --- a/compiler/debug/elf_debug_frame_writer.h +++ b/compiler/debug/elf_debug_frame_writer.h @@ -37,8 +37,8 @@ static void WriteCIE(InstructionSet isa, // debugger that its value in the previous frame is not recoverable. bool is64bit = Is64BitInstructionSet(isa); switch (isa) { - case kArm: - case kThumb2: { + case InstructionSet::kArm: + case InstructionSet::kThumb2: { dwarf::DebugFrameOpCodeWriter<> opcodes; opcodes.DefCFA(Reg::ArmCore(13), 0); // R13(SP). // core registers. @@ -61,7 +61,7 @@ static void WriteCIE(InstructionSet isa, WriteCIE(is64bit, return_reg, opcodes, format, buffer); return; } - case kArm64: { + case InstructionSet::kArm64: { dwarf::DebugFrameOpCodeWriter<> opcodes; opcodes.DefCFA(Reg::Arm64Core(31), 0); // R31(SP). // core registers. @@ -84,8 +84,8 @@ static void WriteCIE(InstructionSet isa, WriteCIE(is64bit, return_reg, opcodes, format, buffer); return; } - case kMips: - case kMips64: { + case InstructionSet::kMips: + case InstructionSet::kMips64: { dwarf::DebugFrameOpCodeWriter<> opcodes; opcodes.DefCFA(Reg::MipsCore(29), 0); // R29(SP). // core registers. @@ -108,7 +108,7 @@ static void WriteCIE(InstructionSet isa, WriteCIE(is64bit, return_reg, opcodes, format, buffer); return; } - case kX86: { + case InstructionSet::kX86: { // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296 constexpr bool generate_opcodes_for_x86_fp = false; dwarf::DebugFrameOpCodeWriter<> opcodes; @@ -134,7 +134,7 @@ static void WriteCIE(InstructionSet isa, WriteCIE(is64bit, return_reg, opcodes, format, buffer); return; } - case kX86_64: { + case InstructionSet::kX86_64: { dwarf::DebugFrameOpCodeWriter<> opcodes; opcodes.DefCFA(Reg::X86_64Core(4), 8); // R4(RSP). opcodes.Offset(Reg::X86_64Core(16), -8); // R16(RIP). @@ -160,7 +160,7 @@ static void WriteCIE(InstructionSet isa, WriteCIE(is64bit, return_reg, opcodes, format, buffer); return; } - case kNone: + case InstructionSet::kNone: break; } LOG(FATAL) << "Cannot write CIE frame for ISA " << isa; diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h index 49d52c45c2..6e72b46174 100644 --- a/compiler/debug/elf_debug_line_writer.h +++ b/compiler/debug/elf_debug_line_writer.h @@ -68,19 +68,19 @@ class ElfDebugLineWriter { int code_factor_bits_ = 0; int dwarf_isa = -1; switch (isa) { - case kArm: // arm actually means thumb2. - case kThumb2: + case InstructionSet::kArm: // arm actually means thumb2. + case InstructionSet::kThumb2: code_factor_bits_ = 1; // 16-bit instuctions dwarf_isa = 1; // DW_ISA_ARM_thumb. break; - case kArm64: - case kMips: - case kMips64: + case InstructionSet::kArm64: + case InstructionSet::kMips: + case InstructionSet::kMips64: code_factor_bits_ = 2; // 32-bit instructions break; - case kNone: - case kX86: - case kX86_64: + case InstructionSet::kNone: + case InstructionSet::kX86: + case InstructionSet::kX86_64: break; } std::unordered_set<uint64_t> seen_addresses(compilation_unit.methods.size()); diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h index bf47e8f3d9..bb856b29f4 100644 --- a/compiler/debug/elf_debug_loc_writer.h +++ b/compiler/debug/elf_debug_loc_writer.h @@ -33,20 +33,20 @@ using Reg = dwarf::Reg; static Reg GetDwarfCoreReg(InstructionSet isa, int machine_reg) { switch (isa) { - case kArm: - case kThumb2: + case InstructionSet::kArm: + case InstructionSet::kThumb2: return Reg::ArmCore(machine_reg); - case kArm64: + case InstructionSet::kArm64: return Reg::Arm64Core(machine_reg); - case kX86: + case InstructionSet::kX86: return Reg::X86Core(machine_reg); - case kX86_64: + case InstructionSet::kX86_64: return Reg::X86_64Core(machine_reg); - case kMips: + case InstructionSet::kMips: return Reg::MipsCore(machine_reg); - case kMips64: + case InstructionSet::kMips64: return Reg::Mips64Core(machine_reg); - case kNone: + case InstructionSet::kNone: LOG(FATAL) << "No instruction set"; } UNREACHABLE(); @@ -54,20 +54,20 @@ static Reg GetDwarfCoreReg(InstructionSet isa, int machine_reg) { static Reg GetDwarfFpReg(InstructionSet isa, int machine_reg) { switch (isa) { - case kArm: - case kThumb2: + case InstructionSet::kArm: + case InstructionSet::kThumb2: return Reg::ArmFp(machine_reg); - case kArm64: + case InstructionSet::kArm64: return Reg::Arm64Fp(machine_reg); - case kX86: + case InstructionSet::kX86: return Reg::X86Fp(machine_reg); - case kX86_64: + case InstructionSet::kX86_64: return Reg::X86_64Fp(machine_reg); - case kMips: + case InstructionSet::kMips: return Reg::MipsFp(machine_reg); - case kMips64: + case InstructionSet::kMips64: return Reg::Mips64Fp(machine_reg); - case kNone: + case InstructionSet::kNone: LOG(FATAL) << "No instruction set"; } UNREACHABLE(); @@ -230,7 +230,7 @@ static void WriteDebugLocEntry(const MethodDebugInfo* method_info, break; // the high word is correctly implied by the low word. } } else if (kind == Kind::kInFpuRegister) { - if ((isa == kArm || isa == kThumb2) && + if ((isa == InstructionSet::kArm || isa == InstructionSet::kThumb2) && piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegister && reg_hi.GetValue() == value + 1 && value % 2 == 0) { // Translate S register pair to D register (e.g. S4+S5 to D2). diff --git a/compiler/debug/elf_symtab_writer.h b/compiler/debug/elf_symtab_writer.h index b37f984860..0907e102a0 100644 --- a/compiler/debug/elf_symtab_writer.h +++ b/compiler/debug/elf_symtab_writer.h @@ -89,7 +89,7 @@ static void WriteDebugSymbols(linker::ElfBuilder<ElfTypes>* builder, // instructions, so that disassembler tools can correctly disassemble. // Note that even if we generate just a single mapping symbol, ARM's Streamline // requires it to match function symbol. Just address 0 does not work. - if (info.isa == kThumb2) { + if (info.isa == InstructionSet::kThumb2) { if (address < mapping_symbol_address || !kGenerateSingleArmMappingSymbol) { symtab->Add(strtab->Write("$t"), text, address & ~1, 0, STB_LOCAL, STT_NOTYPE); mapping_symbol_address = address; diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index 7581962a86..a94dbe94ff 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -381,9 +381,9 @@ CompiledMethod* ArtCompileDEX( quicken_data.push_back(static_cast<uint8_t>(info.dex_member_index >> 8)); } InstructionSet instruction_set = driver->GetInstructionSet(); - if (instruction_set == kThumb2) { + if (instruction_set == InstructionSet::kThumb2) { // Don't use the thumb2 instruction set to avoid the one off code delta. - instruction_set = kArm; + instruction_set = InstructionSet::kArm; } return CompiledMethod::SwapAllocCompiledMethod( driver, diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc index e1ea6304eb..de481caf07 100644 --- a/compiler/driver/compiled_method_storage_test.cc +++ b/compiler/driver/compiled_method_storage_test.cc @@ -31,7 +31,7 @@ TEST(CompiledMethodStorage, Deduplicate) { CompilerDriver driver(&compiler_options, &verification_results, Compiler::kOptimizing, - /* instruction_set_ */ kNone, + /* instruction_set_ */ InstructionSet::kNone, /* instruction_set_features */ nullptr, /* image_classes */ nullptr, /* compiled_classes */ nullptr, @@ -91,7 +91,7 @@ TEST(CompiledMethodStorage, Deduplicate) { for (auto&& f : cfi_info) { for (auto&& p : patches) { compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod( - &driver, kNone, c, 0u, 0u, 0u, s, v, f, p)); + &driver, InstructionSet::kNone, c, 0u, 0u, 0u, s, v, f, p)); } } } diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 135f9c7b47..3d4da5edcf 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -290,7 +290,8 @@ CompilerDriver::CompilerDriver( verification_results_(verification_results), compiler_(Compiler::Create(this, compiler_kind)), compiler_kind_(compiler_kind), - instruction_set_(instruction_set == kArm ? kThumb2 : instruction_set), + instruction_set_( + instruction_set == InstructionSet::kArm ? InstructionSet::kThumb2 : instruction_set), instruction_set_features_(instruction_set_features), requires_constructor_barrier_lock_("constructor barrier lock"), non_relative_linker_patch_count_(0u), @@ -451,13 +452,13 @@ static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel( // GetQuickGenericJniStub allowing down calls that aren't compiled using a JNI compiler? static bool InstructionSetHasGenericJniStub(InstructionSet isa) { switch (isa) { - case kArm: - case kArm64: - case kThumb2: - case kMips: - case kMips64: - case kX86: - case kX86_64: return true; + case InstructionSet::kArm: + case InstructionSet::kArm64: + case InstructionSet::kThumb2: + case InstructionSet::kMips: + case InstructionSet::kMips64: + case InstructionSet::kX86: + case InstructionSet::kX86_64: return true; default: return false; } } diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc index b434e90f0d..897b50bdac 100644 --- a/compiler/exception_test.cc +++ b/compiler/exception_test.cc @@ -98,7 +98,7 @@ class ExceptionTest : public CommonRuntimeTest { static_cast<const void*>(fake_header_code_and_maps_.data() + (fake_header_code_and_maps_.size() - code_size))); - if (kRuntimeISA == kArm) { + if (kRuntimeISA == InstructionSet::kArm) { // Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer(). CHECK_ALIGNED(stack_maps_offset, 2); } diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc index 5b57718456..236b5c0c2e 100644 --- a/compiler/jni/jni_cfi_test.cc +++ b/compiler/jni/jni_cfi_test.cc @@ -102,13 +102,13 @@ class JNICFITest : public CFITest { } }; -#define TEST_ISA(isa) \ - TEST_F(JNICFITest, isa) { \ - std::vector<uint8_t> expected_asm(expected_asm_##isa, \ - expected_asm_##isa + arraysize(expected_asm_##isa)); \ - std::vector<uint8_t> expected_cfi(expected_cfi_##isa, \ - expected_cfi_##isa + arraysize(expected_cfi_##isa)); \ - TestImpl(isa, #isa, expected_asm, expected_cfi); \ +#define TEST_ISA(isa) \ + TEST_F(JNICFITest, isa) { \ + std::vector<uint8_t> expected_asm(expected_asm_##isa, \ + expected_asm_##isa + arraysize(expected_asm_##isa)); \ + std::vector<uint8_t> expected_cfi(expected_cfi_##isa, \ + expected_cfi_##isa + arraysize(expected_cfi_##isa)); \ + TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi); \ } #ifdef ART_ENABLE_CODEGEN_arm diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc index 42a5f86117..55c27d1a6a 100644 --- a/compiler/jni/quick/calling_convention.cc +++ b/compiler/jni/quick/calling_convention.cc @@ -54,38 +54,38 @@ std::unique_ptr<ManagedRuntimeCallingConvention> ManagedRuntimeCallingConvention InstructionSet instruction_set) { switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_arm - case kArm: - case kThumb2: + case InstructionSet::kArm: + case InstructionSet::kThumb2: return std::unique_ptr<ManagedRuntimeCallingConvention>( new (allocator) arm::ArmManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_arm64 - case kArm64: + case InstructionSet::kArm64: return std::unique_ptr<ManagedRuntimeCallingConvention>( new (allocator) arm64::Arm64ManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_mips - case kMips: + case InstructionSet::kMips: return std::unique_ptr<ManagedRuntimeCallingConvention>( new (allocator) mips::MipsManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_mips64 - case kMips64: + case InstructionSet::kMips64: return std::unique_ptr<ManagedRuntimeCallingConvention>( new (allocator) mips64::Mips64ManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_x86 - case kX86: + case InstructionSet::kX86: return std::unique_ptr<ManagedRuntimeCallingConvention>( new (allocator) x86::X86ManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_x86_64 - case kX86_64: + case InstructionSet::kX86_64: return std::unique_ptr<ManagedRuntimeCallingConvention>( new (allocator) x86_64::X86_64ManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); @@ -156,38 +156,38 @@ std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocato InstructionSet instruction_set) { switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_arm - case kArm: - case kThumb2: + case InstructionSet::kArm: + case InstructionSet::kThumb2: return std::unique_ptr<JniCallingConvention>( new (allocator) arm::ArmJniCallingConvention( is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_arm64 - case kArm64: + case InstructionSet::kArm64: return std::unique_ptr<JniCallingConvention>( new (allocator) arm64::Arm64JniCallingConvention( is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_mips - case kMips: + case InstructionSet::kMips: return std::unique_ptr<JniCallingConvention>( new (allocator) mips::MipsJniCallingConvention( is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_mips64 - case kMips64: + case InstructionSet::kMips64: return std::unique_ptr<JniCallingConvention>( new (allocator) mips64::Mips64JniCallingConvention( is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_x86 - case kX86: + case InstructionSet::kX86: return std::unique_ptr<JniCallingConvention>( new (allocator) x86::X86JniCallingConvention( is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_x86_64 - case kX86_64: + case InstructionSet::kX86_64: return std::unique_ptr<JniCallingConvention>( new (allocator) x86_64::X86_64JniCallingConvention( is_static, is_synchronized, is_critical_native, shorty)); diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index e32b681c5b..b3177aa471 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -323,7 +323,7 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, // Note that we always have outgoing param space available for at least two params. if (kUseReadBarrier && is_static && !is_critical_native) { const bool kReadBarrierFastPath = - (instruction_set != kMips) && (instruction_set != kMips64); + (instruction_set != InstructionSet::kMips) && (instruction_set != InstructionSet::kMips64); std::unique_ptr<JNIMacroLabel> skip_cold_path_label; if (kReadBarrierFastPath) { skip_cold_path_label = __ CreateLabel(); @@ -531,7 +531,8 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, if (LIKELY(!is_critical_native)) { // For normal JNI, store the return value on the stack because the call to // JniMethodEnd will clobber the return value. It will be restored in (13). - if ((instruction_set == kMips || instruction_set == kMips64) && + if ((instruction_set == InstructionSet::kMips || + instruction_set == InstructionSet::kMips64) && main_jni_conv->GetReturnType() == Primitive::kPrimDouble && return_save_location.Uint32Value() % 8 != 0) { // Ensure doubles are 8-byte aligned for MIPS diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc index 3d5683335a..48747fc379 100644 --- a/compiler/linker/arm/relative_patcher_thumb2.cc +++ b/compiler/linker/arm/relative_patcher_thumb2.cc @@ -47,7 +47,7 @@ constexpr uint32_t kMaxBcondPositiveDisplacement = (1u << 20) - 2u + kPcDisplace constexpr uint32_t kMaxBcondNegativeDisplacement = (1u << 20) - kPcDisplacement; Thumb2RelativePatcher::Thumb2RelativePatcher(RelativePatcherTargetProvider* provider) - : ArmBaseRelativePatcher(provider, kThumb2) { + : ArmBaseRelativePatcher(provider, InstructionSet::kThumb2) { } void Thumb2RelativePatcher::PatchCall(std::vector<uint8_t>* code, diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc index fe76dfe39a..2c22a352c2 100644 --- a/compiler/linker/arm/relative_patcher_thumb2_test.cc +++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc @@ -28,7 +28,7 @@ namespace linker { class Thumb2RelativePatcherTest : public RelativePatcherTest { public: - Thumb2RelativePatcherTest() : RelativePatcherTest(kThumb2, "default") { } + Thumb2RelativePatcherTest() : RelativePatcherTest(InstructionSet::kThumb2, "default") { } protected: static const uint8_t kCallRawCode[]; @@ -173,7 +173,8 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest { return false; // No thunk. } else { uint32_t thunk_end = - CompiledCode::AlignCode(method3_offset - sizeof(OatQuickMethodHeader), kThumb2) + + CompiledCode::AlignCode(method3_offset - sizeof(OatQuickMethodHeader), + InstructionSet::kThumb2) + MethodCallThunkSize(); uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end); CHECK_EQ(result3.second, header_offset + sizeof(OatQuickMethodHeader) + 1 /* thumb mode */); @@ -420,7 +421,8 @@ TEST_F(Thumb2RelativePatcherTest, CallTrampolineTooFar) { // Check linked code. uint32_t method3_offset = GetMethodOffset(3u); - uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2); + uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), + InstructionSet::kThumb2); uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */); ASSERT_EQ(diff & 1u, 0u); ASSERT_LT(diff >> 1, 1u << 8); // Simple encoding, (diff >> 1) fits into 8 bits. @@ -495,8 +497,7 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarAfter) { ASSERT_TRUE(IsAligned<kArmAlignment>(method3_offset)); uint32_t method3_header_offset = method3_offset - sizeof(OatQuickMethodHeader); uint32_t thunk_size = MethodCallThunkSize(); - uint32_t thunk_offset = - RoundDown(method3_header_offset - thunk_size, GetInstructionSetAlignment(kThumb2)); + uint32_t thunk_offset = RoundDown(method3_header_offset - thunk_size, kArmAlignment); DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size), method3_header_offset); ASSERT_TRUE(IsAligned<kArmAlignment>(thunk_offset)); @@ -527,7 +528,8 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarBefore) { // Check linked code. uint32_t method3_offset = GetMethodOffset(3u); - uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2); + uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), + InstructionSet::kThumb2); uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */); ASSERT_EQ(diff & 1u, 0u); ASSERT_LT(diff >> 1, 1u << 8); // Simple encoding, (diff >> 1) fits into 8 bits. diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc index 663e43b4ec..52a07965b9 100644 --- a/compiler/linker/arm64/relative_patcher_arm64.cc +++ b/compiler/linker/arm64/relative_patcher_arm64.cc @@ -76,7 +76,8 @@ inline uint32_t MaxExtraSpace(size_t num_adrp, size_t code_size) { if (num_adrp == 0u) { return 0u; } - uint32_t alignment_bytes = CompiledMethod::AlignCode(code_size, kArm64) - code_size; + uint32_t alignment_bytes = + CompiledMethod::AlignCode(code_size, InstructionSet::kArm64) - code_size; return kAdrpThunkSize * num_adrp + alignment_bytes; } @@ -84,7 +85,7 @@ inline uint32_t MaxExtraSpace(size_t num_adrp, size_t code_size) { Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherTargetProvider* provider, const Arm64InstructionSetFeatures* features) - : ArmBaseRelativePatcher(provider, kArm64), + : ArmBaseRelativePatcher(provider, InstructionSet::kArm64), fix_cortex_a53_843419_(features->NeedFixCortexA53_843419()), reserved_adrp_thunks_(0u), processed_adrp_thunks_(0u) { @@ -105,7 +106,8 @@ uint32_t Arm64RelativePatcher::ReserveSpace(uint32_t offset, // Add thunks for previous method if any. if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) { size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_; - offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks; + offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64) + + kAdrpThunkSize * num_adrp_thunks; reserved_adrp_thunks_ = adrp_thunk_locations_.size(); } @@ -149,7 +151,8 @@ uint32_t Arm64RelativePatcher::ReserveSpaceEnd(uint32_t offset) { // Add thunks for the last method if any. if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) { size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_; - offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks; + offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64) + + kAdrpThunkSize * num_adrp_thunks; reserved_adrp_thunks_ = adrp_thunk_locations_.size(); } } @@ -159,7 +162,7 @@ uint32_t Arm64RelativePatcher::ReserveSpaceEnd(uint32_t offset) { uint32_t Arm64RelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) { if (fix_cortex_a53_843419_) { if (!current_method_thunks_.empty()) { - uint32_t aligned_offset = CompiledMethod::AlignCode(offset, kArm64); + uint32_t aligned_offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64); if (kIsDebugBuild) { CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize); size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize; diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc index 8a5b4cc8e5..05459a2a82 100644 --- a/compiler/linker/arm64/relative_patcher_arm64_test.cc +++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc @@ -29,7 +29,7 @@ namespace linker { class Arm64RelativePatcherTest : public RelativePatcherTest { public: explicit Arm64RelativePatcherTest(const std::string& variant) - : RelativePatcherTest(kArm64, variant) { } + : RelativePatcherTest(InstructionSet::kArm64, variant) { } protected: static const uint8_t kCallRawCode[]; @@ -153,7 +153,8 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { // There may be a thunk before method2. if (last_result.second != last_method_offset) { // Thunk present. Check that there's only one. - uint32_t thunk_end = CompiledCode::AlignCode(gap_end, kArm64) + MethodCallThunkSize(); + uint32_t thunk_end = + CompiledCode::AlignCode(gap_end, InstructionSet::kArm64) + MethodCallThunkSize(); uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end); CHECK_EQ(last_result.second, header_offset + sizeof(OatQuickMethodHeader)); } @@ -347,7 +348,8 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { CHECK_EQ(compiled_method_refs_[0].index, 1u); CHECK_EQ(compiled_method_refs_.size(), compiled_methods_.size()); uint32_t method1_size = compiled_methods_[0]->GetQuickCode().size(); - uint32_t thunk_offset = CompiledCode::AlignCode(method1_offset + method1_size, kArm64); + uint32_t thunk_offset = + CompiledCode::AlignCode(method1_offset + method1_size, InstructionSet::kArm64); uint32_t b_diff = thunk_offset - (method1_offset + num_nops * 4u); CHECK_ALIGNED(b_diff, 4u); ASSERT_LT(b_diff, 128 * MB); @@ -602,7 +604,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallTrampolineTooFar) { // Check linked code. uint32_t thunk_offset = - CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64); + CompiledCode::AlignCode(last_method_offset + last_method_code.size(), InstructionSet::kArm64); uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method); CHECK_ALIGNED(diff, 4u); ASSERT_LT(diff, 128 * MB); @@ -688,8 +690,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarAfter) { ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_offset)); uint32_t last_method_header_offset = last_method_offset - sizeof(OatQuickMethodHeader); uint32_t thunk_size = MethodCallThunkSize(); - uint32_t thunk_offset = - RoundDown(last_method_header_offset - thunk_size, GetInstructionSetAlignment(kArm64)); + uint32_t thunk_offset = RoundDown(last_method_header_offset - thunk_size, kArm64Alignment); DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size), last_method_header_offset); uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1); @@ -721,7 +722,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarBefore) { // Check linked code. uint32_t thunk_offset = - CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64); + CompiledCode::AlignCode(last_method_offset + last_method_code.size(), InstructionSet::kArm64); uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method); CHECK_ALIGNED(diff, 4u); ASSERT_LT(diff, 128 * MB); diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h index 79412370bc..b30b55e9b4 100644 --- a/compiler/linker/elf_builder.h +++ b/compiler/linker/elf_builder.h @@ -417,10 +417,10 @@ class ElfBuilder FINAL { InstructionSet isa, const InstructionSetFeatures* features) : Section(owner, name, type, flags, link, info, align, entsize) { - if (isa == kMips || isa == kMips64) { + if (isa == InstructionSet::kMips || isa == InstructionSet::kMips64) { bool fpu32 = false; // assume mips64 values uint8_t isa_rev = 6; // assume mips64 values - if (isa == kMips) { + if (isa == InstructionSet::kMips) { // adjust for mips32 values fpu32 = features->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint(); isa_rev = features->AsMipsInstructionSetFeatures()->IsR6() @@ -430,14 +430,15 @@ class ElfBuilder FINAL { : 1; } abiflags_.version = 0; // version of flags structure - abiflags_.isa_level = (isa == kMips) ? 32 : 64; + abiflags_.isa_level = (isa == InstructionSet::kMips) ? 32 : 64; abiflags_.isa_rev = isa_rev; - abiflags_.gpr_size = (isa == kMips) ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64; + abiflags_.gpr_size = (isa == InstructionSet::kMips) ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64; abiflags_.cpr1_size = fpu32 ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64; abiflags_.cpr2_size = MIPS_AFL_REG_NONE; // Set the fp_abi to MIPS_ABI_FP_64A for mips32 with 64-bit FPUs (ie: mips32 R5 and R6). // Otherwise set to MIPS_ABI_FP_DOUBLE. - abiflags_.fp_abi = (isa == kMips && !fpu32) ? MIPS_ABI_FP_64A : MIPS_ABI_FP_DOUBLE; + abiflags_.fp_abi = + (isa == InstructionSet::kMips && !fpu32) ? MIPS_ABI_FP_64A : MIPS_ABI_FP_DOUBLE; abiflags_.isa_ext = 0; abiflags_.ases = 0; // To keep the code simple, we are not using odd FP reg for single floats for both @@ -689,7 +690,7 @@ class ElfBuilder FINAL { Elf_Word bss_address = RoundUp(text_address + text_size, kPageSize); Elf_Word abiflags_address = RoundUp(bss_address + bss_size, kPageSize); Elf_Word abiflags_size = 0; - if (isa_ == kMips || isa_ == kMips64) { + if (isa_ == InstructionSet::kMips || isa_ == InstructionSet::kMips64) { abiflags_size = abiflags_.GetSize(); } Elf_Word dynstr_address = RoundUp(abiflags_address + abiflags_size, kPageSize); @@ -835,29 +836,29 @@ class ElfBuilder FINAL { static Elf_Ehdr MakeElfHeader(InstructionSet isa, const InstructionSetFeatures* features) { Elf_Ehdr elf_header = Elf_Ehdr(); switch (isa) { - case kArm: + case InstructionSet::kArm: // Fall through. - case kThumb2: { + case InstructionSet::kThumb2: { elf_header.e_machine = EM_ARM; elf_header.e_flags = EF_ARM_EABI_VER5; break; } - case kArm64: { + case InstructionSet::kArm64: { elf_header.e_machine = EM_AARCH64; elf_header.e_flags = 0; break; } - case kX86: { + case InstructionSet::kX86: { elf_header.e_machine = EM_386; elf_header.e_flags = 0; break; } - case kX86_64: { + case InstructionSet::kX86_64: { elf_header.e_machine = EM_X86_64; elf_header.e_flags = 0; break; } - case kMips: { + case InstructionSet::kMips: { elf_header.e_machine = EM_MIPS; elf_header.e_flags = (EF_MIPS_NOREORDER | EF_MIPS_PIC | @@ -868,7 +869,7 @@ class ElfBuilder FINAL { : EF_MIPS_ARCH_32R2)); break; } - case kMips64: { + case InstructionSet::kMips64: { elf_header.e_machine = EM_MIPS; elf_header.e_flags = (EF_MIPS_NOREORDER | EF_MIPS_PIC | @@ -876,7 +877,7 @@ class ElfBuilder FINAL { EF_MIPS_ARCH_64R6); break; } - case kNone: { + case InstructionSet::kNone: { LOG(FATAL) << "No instruction set"; break; } diff --git a/compiler/linker/mips/relative_patcher_mips32r6_test.cc b/compiler/linker/mips/relative_patcher_mips32r6_test.cc index 586e2aa8b2..629fdd535d 100644 --- a/compiler/linker/mips/relative_patcher_mips32r6_test.cc +++ b/compiler/linker/mips/relative_patcher_mips32r6_test.cc @@ -22,7 +22,7 @@ namespace linker { class Mips32r6RelativePatcherTest : public RelativePatcherTest { public: - Mips32r6RelativePatcherTest() : RelativePatcherTest(kMips, "mips32r6") {} + Mips32r6RelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips, "mips32r6") {} protected: static const uint8_t kUnpatchedPcRelativeRawCode[]; diff --git a/compiler/linker/mips/relative_patcher_mips_test.cc b/compiler/linker/mips/relative_patcher_mips_test.cc index ebe5406512..d876c76daa 100644 --- a/compiler/linker/mips/relative_patcher_mips_test.cc +++ b/compiler/linker/mips/relative_patcher_mips_test.cc @@ -23,7 +23,7 @@ namespace linker { class MipsRelativePatcherTest : public RelativePatcherTest { public: - MipsRelativePatcherTest() : RelativePatcherTest(kMips, "mips32r2") {} + MipsRelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips, "mips32r2") {} protected: static const uint8_t kUnpatchedPcRelativeRawCode[]; diff --git a/compiler/linker/mips64/relative_patcher_mips64_test.cc b/compiler/linker/mips64/relative_patcher_mips64_test.cc index 4edcae72f6..a02f5005e8 100644 --- a/compiler/linker/mips64/relative_patcher_mips64_test.cc +++ b/compiler/linker/mips64/relative_patcher_mips64_test.cc @@ -23,7 +23,7 @@ namespace linker { class Mips64RelativePatcherTest : public RelativePatcherTest { public: - Mips64RelativePatcherTest() : RelativePatcherTest(kMips64, "default") {} + Mips64RelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips64, "default") {} protected: static const uint8_t kUnpatchedPcRelativeRawCode[]; diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc index dc15bb087e..13877f8f12 100644 --- a/compiler/linker/relative_patcher.cc +++ b/compiler/linker/relative_patcher.cc @@ -95,31 +95,31 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create( UNUSED(provider); switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_x86 - case kX86: + case InstructionSet::kX86: return std::unique_ptr<RelativePatcher>(new X86RelativePatcher()); #endif #ifdef ART_ENABLE_CODEGEN_x86_64 - case kX86_64: + case InstructionSet::kX86_64: return std::unique_ptr<RelativePatcher>(new X86_64RelativePatcher()); #endif #ifdef ART_ENABLE_CODEGEN_arm - case kArm: + case InstructionSet::kArm: // Fall through: we generate Thumb2 code for "arm". - case kThumb2: + case InstructionSet::kThumb2: return std::unique_ptr<RelativePatcher>(new Thumb2RelativePatcher(provider)); #endif #ifdef ART_ENABLE_CODEGEN_arm64 - case kArm64: + case InstructionSet::kArm64: return std::unique_ptr<RelativePatcher>( new Arm64RelativePatcher(provider, features->AsArm64InstructionSetFeatures())); #endif #ifdef ART_ENABLE_CODEGEN_mips - case kMips: + case InstructionSet::kMips: return std::unique_ptr<RelativePatcher>( new MipsRelativePatcher(features->AsMipsInstructionSetFeatures())); #endif #ifdef ART_ENABLE_CODEGEN_mips64 - case kMips64: + case InstructionSet::kMips64: return std::unique_ptr<RelativePatcher>(new Mips64RelativePatcher()); #endif default: diff --git a/compiler/linker/x86/relative_patcher_x86_test.cc b/compiler/linker/x86/relative_patcher_x86_test.cc index 4f74cee384..b855dec91d 100644 --- a/compiler/linker/x86/relative_patcher_x86_test.cc +++ b/compiler/linker/x86/relative_patcher_x86_test.cc @@ -23,7 +23,7 @@ namespace linker { class X86RelativePatcherTest : public RelativePatcherTest { public: - X86RelativePatcherTest() : RelativePatcherTest(kX86, "default") { } + X86RelativePatcherTest() : RelativePatcherTest(InstructionSet::kX86, "default") { } protected: static const uint8_t kCallRawCode[]; diff --git a/compiler/linker/x86_64/relative_patcher_x86_64_test.cc b/compiler/linker/x86_64/relative_patcher_x86_64_test.cc index ae17aa7a5f..6baa92de36 100644 --- a/compiler/linker/x86_64/relative_patcher_x86_64_test.cc +++ b/compiler/linker/x86_64/relative_patcher_x86_64_test.cc @@ -23,7 +23,7 @@ namespace linker { class X86_64RelativePatcherTest : public RelativePatcherTest { public: - X86_64RelativePatcherTest() : RelativePatcherTest(kX86_64, "default") { } + X86_64RelativePatcherTest() : RelativePatcherTest(InstructionSet::kX86_64, "default") { } protected: static const uint8_t kCallRawCode[]; diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index b8d1f52995..5625f04726 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -786,43 +786,43 @@ std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph, ArenaAllocator* allocator = graph->GetAllocator(); switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_arm - case kArm: - case kThumb2: { + case InstructionSet::kArm: + case InstructionSet::kThumb2: { return std::unique_ptr<CodeGenerator>( new (allocator) arm::CodeGeneratorARMVIXL( graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_arm64 - case kArm64: { + case InstructionSet::kArm64: { return std::unique_ptr<CodeGenerator>( new (allocator) arm64::CodeGeneratorARM64( graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_mips - case kMips: { + case InstructionSet::kMips: { return std::unique_ptr<CodeGenerator>( new (allocator) mips::CodeGeneratorMIPS( graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_mips64 - case kMips64: { + case InstructionSet::kMips64: { return std::unique_ptr<CodeGenerator>( new (allocator) mips64::CodeGeneratorMIPS64( graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_x86 - case kX86: { + case InstructionSet::kX86: { return std::unique_ptr<CodeGenerator>( new (allocator) x86::CodeGeneratorX86( graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats)); } #endif #ifdef ART_ENABLE_CODEGEN_x86_64 - case kX86_64: { + case InstructionSet::kX86_64: { return std::unique_ptr<CodeGenerator>( new (allocator) x86_64::CodeGeneratorX86_64( graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats)); diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 64c88eb67c..18ad60db87 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -626,7 +626,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { bool CallPushesPC() const { InstructionSet instruction_set = GetInstructionSet(); - return instruction_set == kX86 || instruction_set == kX86_64; + return instruction_set == InstructionSet::kX86 || instruction_set == InstructionSet::kX86_64; } // Arm64 has its own type for a label, so we need to templatize these methods diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index c7811ab976..e01b7b78cb 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -1557,12 +1557,13 @@ void CodeGeneratorARM64::GenerateFrameEntry() { MacroAssembler* masm = GetVIXLAssembler(); __ Bind(&frame_entry_label_); - bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod(); + bool do_overflow_check = + FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm64) || !IsLeafMethod(); if (do_overflow_check) { UseScratchRegisterScope temps(masm); Register temp = temps.AcquireX(); DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks()); - __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64))); + __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kArm64))); { // Ensure that between load and RecordPcInfo there are no pools emitted. ExactAssemblyScope eas(GetVIXLAssembler(), diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 90f3ae8a01..edd307263d 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -2568,7 +2568,7 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() { if (!skip_overflow_check) { UseScratchRegisterScope temps(GetVIXLAssembler()); vixl32::Register temp = temps.Acquire(); - __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(kArm))); + __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(InstructionSet::kArm))); // The load must immediately precede RecordPcInfo. ExactAssemblyScope aas(GetVIXLAssembler(), vixl32::kMaxInstructionSizeInBytes, @@ -5303,7 +5303,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) { vixl32::Label less, greater, done; vixl32::Label* final_label = codegen_->GetFinalLabel(compare, &done); DataType::Type type = compare->InputAt(0)->GetType(); - vixl32::Condition less_cond = vixl32::Condition(kNone); + vixl32::Condition less_cond = vixl32::Condition::None(); switch (type) { case DataType::Type::kBool: case DataType::Type::kUint8: diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 2f65e8c958..b3fed079d8 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -1132,7 +1132,7 @@ void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) { StackMapStream* stack_map_stream = GetStackMapStream(); for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) { uint32_t old_position = - stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips); + stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips); uint32_t new_position = __ GetAdjustedPosition(old_position); DCHECK_GE(new_position, old_position); stack_map_stream->SetStackMapNativePcOffset(i, new_position); @@ -1347,13 +1347,14 @@ static dwarf::Reg DWARFReg(Register reg) { void CodeGeneratorMIPS::GenerateFrameEntry() { __ Bind(&frame_entry_label_); - bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips) || !IsLeafMethod(); + bool do_overflow_check = + FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips) || !IsLeafMethod(); if (do_overflow_check) { __ LoadFromOffset(kLoadWord, ZERO, SP, - -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips))); + -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips))); RecordPcInfo(nullptr, 0); } @@ -1365,8 +1366,9 @@ void CodeGeneratorMIPS::GenerateFrameEntry() { } // Make sure the frame size isn't unreasonably large. - if (GetFrameSize() > GetStackOverflowReservedBytes(kMips)) { - LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips) << " bytes"; + if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips)) { + LOG(FATAL) << "Stack frame larger than " + << GetStackOverflowReservedBytes(InstructionSet::kMips) << " bytes"; } // Spill callee-saved registers. diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 6cbfa14f15..53a7f26c81 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -1076,7 +1076,7 @@ void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) { StackMapStream* stack_map_stream = GetStackMapStream(); for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) { uint32_t old_position = - stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64); + stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips64); uint32_t new_position = __ GetAdjustedPosition(old_position); DCHECK_GE(new_position, old_position); stack_map_stream->SetStackMapNativePcOffset(i, new_position); @@ -1161,13 +1161,15 @@ static dwarf::Reg DWARFReg(FpuRegister reg) { void CodeGeneratorMIPS64::GenerateFrameEntry() { __ Bind(&frame_entry_label_); - bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod(); + bool do_overflow_check = + FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips64) || !IsLeafMethod(); if (do_overflow_check) { - __ LoadFromOffset(kLoadWord, - ZERO, - SP, - -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64))); + __ LoadFromOffset( + kLoadWord, + ZERO, + SP, + -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips64))); RecordPcInfo(nullptr, 0); } @@ -1176,8 +1178,9 @@ void CodeGeneratorMIPS64::GenerateFrameEntry() { } // Make sure the frame size isn't unreasonably large. - if (GetFrameSize() > GetStackOverflowReservedBytes(kMips64)) { - LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips64) << " bytes"; + if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips64)) { + LOG(FATAL) << "Stack frame larger than " + << GetStackOverflowReservedBytes(InstructionSet::kMips64) << " bytes"; } // Spill callee-saved registers. diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 44614e1630..f84dd0045e 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -1072,7 +1072,8 @@ void CodeGeneratorX86::GenerateFrameEntry() { DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks()); if (!skip_overflow_check) { - __ testl(EAX, Address(ESP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86)))); + size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86); + __ testl(EAX, Address(ESP, -static_cast<int32_t>(reserved_bytes))); RecordPcInfo(nullptr, 0); } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 259bb4a9a9..16d1f183a1 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -1277,8 +1277,8 @@ void CodeGeneratorX86_64::GenerateFrameEntry() { DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks()); if (!skip_overflow_check) { - __ testq(CpuRegister(RAX), Address( - CpuRegister(RSP), -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86_64)))); + size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86_64); + __ testq(CpuRegister(RAX), Address(CpuRegister(RSP), -static_cast<int32_t>(reserved_bytes))); RecordPcInfo(nullptr, 0); } diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index e35c7c734b..ba431a5b08 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -44,22 +44,22 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() { ::std::vector<CodegenTargetConfig> test_config_candidates = { #ifdef ART_ENABLE_CODEGEN_arm // TODO: Should't this be `kThumb2` instead of `kArm` here? - CodegenTargetConfig(kArm, create_codegen_arm_vixl32), + CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32), #endif #ifdef ART_ENABLE_CODEGEN_arm64 - CodegenTargetConfig(kArm64, create_codegen_arm64), + CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64), #endif #ifdef ART_ENABLE_CODEGEN_x86 - CodegenTargetConfig(kX86, create_codegen_x86), + CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86), #endif #ifdef ART_ENABLE_CODEGEN_x86_64 - CodegenTargetConfig(kX86_64, create_codegen_x86_64), + CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64), #endif #ifdef ART_ENABLE_CODEGEN_mips - CodegenTargetConfig(kMips, create_codegen_mips), + CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips), #endif #ifdef ART_ENABLE_CODEGEN_mips64 - CodegenTargetConfig(kMips64, create_codegen_mips64) + CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64) #endif }; @@ -825,7 +825,7 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) { TEST_F(CodegenTest, MipsClobberRA) { std::unique_ptr<const MipsInstructionSetFeatures> features_mips( MipsInstructionSetFeatures::FromCppDefines()); - if (!CanExecute(kMips) || features_mips->IsR6()) { + if (!CanExecute(InstructionSet::kMips) || features_mips->IsR6()) { // HMipsComputeBaseMethodAddress and the NAL instruction behind it // should only be generated on non-R6. return; diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h index bcbcc12349..c41c290c8b 100644 --- a/compiler/optimizing/codegen_test_utils.h +++ b/compiler/optimizing/codegen_test_utils.h @@ -207,7 +207,7 @@ class InternalCodeAllocator : public CodeAllocator { static bool CanExecuteOnHardware(InstructionSet target_isa) { return (target_isa == kRuntimeISA) // Handle the special case of ARM, with two instructions sets (ARM32 and Thumb-2). - || (kRuntimeISA == kArm && target_isa == kThumb2); + || (kRuntimeISA == InstructionSet::kArm && target_isa == InstructionSet::kThumb2); } static bool CanExecute(InstructionSet target_isa) { @@ -271,7 +271,7 @@ static void Run(const InternalCodeAllocator& allocator, typedef Expected (*fptr)(); CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize()); fptr f = reinterpret_cast<fptr>(allocator.GetMemory()); - if (target_isa == kThumb2) { + if (target_isa == InstructionSet::kThumb2) { // For thumb we need the bottom bit set. f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1); } diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h index 102acb3423..ed2f8e995d 100644 --- a/compiler/optimizing/common_arm64.h +++ b/compiler/optimizing/common_arm64.h @@ -342,7 +342,7 @@ inline vixl::aarch64::Extend ExtendFromOpKind(HDataProcWithShifterOp::OpKind op_ } inline bool ShifterOperandSupportsExtension(HInstruction* instruction) { - DCHECK(HasShifterOperand(instruction, kArm64)); + DCHECK(HasShifterOperand(instruction, InstructionSet::kArm64)); // Although the `neg` instruction is an alias of the `sub` instruction, `HNeg` // does *not* support extension. This is because the `extended register` form // of the `sub` instruction interprets the left register with code 31 as the diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index f7fd9101fd..12c69889ab 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -153,7 +153,7 @@ class HGraphVisualizerDisassembler { } const uint8_t* base = disassembler_->GetDisassemblerOptions()->base_address_; - if (instruction_set_ == kThumb2) { + if (instruction_set_ == InstructionSet::kThumb2) { // ARM and Thumb-2 use the same disassembler. The bottom bit of the // address is used to distinguish between the two. base += 1; diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 189d5aea56..2bd2d5f0a1 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -250,7 +250,7 @@ bool InstructionSimplifierVisitor::TryCombineVecMultiplyAccumulate(HVecMul* mul) DataType::Type type = mul->GetPackedType(); InstructionSet isa = codegen_->GetInstructionSet(); switch (isa) { - case kArm64: + case InstructionSet::kArm64: if (!(type == DataType::Type::kUint8 || type == DataType::Type::kInt8 || type == DataType::Type::kUint16 || @@ -259,8 +259,8 @@ bool InstructionSimplifierVisitor::TryCombineVecMultiplyAccumulate(HVecMul* mul) return false; } break; - case kMips: - case kMips64: + case InstructionSet::kMips: + case InstructionSet::kMips64: if (!(type == DataType::Type::kUint8 || type == DataType::Type::kInt8 || type == DataType::Type::kUint16 || diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc index 9422f9f30c..d41e49a0f3 100644 --- a/compiler/optimizing/instruction_simplifier_arm.cc +++ b/compiler/optimizing/instruction_simplifier_arm.cc @@ -84,7 +84,7 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor { bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op, bool do_merge) { - DCHECK(HasShifterOperand(use, kArm)); + DCHECK(HasShifterOperand(use, InstructionSet::kArm)); DCHECK(use->IsBinaryOperation()); DCHECK(CanFitInShifterOperand(bitfield_op)); DCHECK(!bitfield_op->HasEnvironmentUses()); @@ -166,7 +166,7 @@ bool InstructionSimplifierArmVisitor::TryMergeIntoUsersShifterOperand(HInstructi // Check whether we can merge the instruction in all its users' shifter operand. for (const HUseListNode<HInstruction*>& use : uses) { HInstruction* user = use.GetUser(); - if (!HasShifterOperand(user, kArm)) { + if (!HasShifterOperand(user, InstructionSet::kArm)) { return false; } if (!CanMergeIntoShifterOperand(user, bitfield_op)) { @@ -242,7 +242,7 @@ void InstructionSimplifierArmVisitor::VisitArraySet(HArraySet* instruction) { } void InstructionSimplifierArmVisitor::VisitMul(HMul* instruction) { - if (TryCombineMultiplyAccumulate(instruction, kArm)) { + if (TryCombineMultiplyAccumulate(instruction, InstructionSet::kArm)) { RecordSimplification(); } } diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc index c0ab68fec2..69e1463ac4 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.cc +++ b/compiler/optimizing/instruction_simplifier_arm64.cc @@ -90,7 +90,7 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor { bool InstructionSimplifierArm64Visitor::TryMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op, bool do_merge) { - DCHECK(HasShifterOperand(use, kArm64)); + DCHECK(HasShifterOperand(use, InstructionSet::kArm64)); DCHECK(use->IsBinaryOperation() || use->IsNeg()); DCHECK(CanFitInShifterOperand(bitfield_op)); DCHECK(!bitfield_op->HasEnvironmentUses()); @@ -170,7 +170,7 @@ bool InstructionSimplifierArm64Visitor::TryMergeIntoUsersShifterOperand(HInstruc // Check whether we can merge the instruction in all its users' shifter operand. for (const HUseListNode<HInstruction*>& use : uses) { HInstruction* user = use.GetUser(); - if (!HasShifterOperand(user, kArm64)) { + if (!HasShifterOperand(user, InstructionSet::kArm64)) { return false; } if (!CanMergeIntoShifterOperand(user, bitfield_op)) { @@ -218,7 +218,7 @@ void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) { } void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) { - if (TryCombineMultiplyAccumulate(instruction, kArm64)) { + if (TryCombineMultiplyAccumulate(instruction, InstructionSet::kArm64)) { RecordSimplification(); } } diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc index 1c13084a48..ccdcb3532d 100644 --- a/compiler/optimizing/instruction_simplifier_shared.cc +++ b/compiler/optimizing/instruction_simplifier_shared.cc @@ -90,13 +90,13 @@ bool TrySimpleMultiplyAccumulatePatterns(HMul* mul, bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) { DataType::Type type = mul->GetType(); switch (isa) { - case kArm: - case kThumb2: + case InstructionSet::kArm: + case InstructionSet::kThumb2: if (type != DataType::Type::kInt32) { return false; } break; - case kArm64: + case InstructionSet::kArm64: if (!DataType::IsIntOrLongType(type)) { return false; } @@ -148,7 +148,7 @@ bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) { mul->GetBlock()->RemoveInstruction(mul); return true; } - } else if (use->IsNeg() && isa != kArm) { + } else if (use->IsNeg() && isa != InstructionSet::kArm) { HMultiplyAccumulate* mulacc = new (allocator) HMultiplyAccumulate(type, HInstruction::kSub, diff --git a/compiler/optimizing/instruction_simplifier_shared.h b/compiler/optimizing/instruction_simplifier_shared.h index b016a8769e..758fc7663d 100644 --- a/compiler/optimizing/instruction_simplifier_shared.h +++ b/compiler/optimizing/instruction_simplifier_shared.h @@ -41,7 +41,8 @@ inline bool CanFitInShifterOperand(HInstruction* instruction) { inline bool HasShifterOperand(HInstruction* instr, InstructionSet isa) { // On ARM64 `neg` instructions are an alias of `sub` using the zero register // as the first register input. - bool res = instr->IsAdd() || instr->IsAnd() || (isa == kArm64 && instr->IsNeg()) || + bool res = instr->IsAdd() || instr->IsAnd() || + (isa == InstructionSet::kArm64 && instr->IsNeg()) || instr->IsOr() || instr->IsSub() || instr->IsXor(); return res; } diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc index 74de0773fc..c672dae1d7 100644 --- a/compiler/optimizing/loop_optimization.cc +++ b/compiler/optimizing/loop_optimization.cc @@ -1414,8 +1414,8 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node, uint32_t HLoopOptimization::GetVectorSizeInBytes() { switch (compiler_driver_->GetInstructionSet()) { - case kArm: - case kThumb2: + case InstructionSet::kArm: + case InstructionSet::kThumb2: return 8; // 64-bit SIMD default: return 16; // 128-bit SIMD @@ -1425,8 +1425,8 @@ uint32_t HLoopOptimization::GetVectorSizeInBytes() { bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) { const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures(); switch (compiler_driver_->GetInstructionSet()) { - case kArm: - case kThumb2: + case InstructionSet::kArm: + case InstructionSet::kThumb2: // Allow vectorization for all ARM devices, because Android assumes that // ARM 32-bit always supports advanced SIMD (64-bit SIMD). switch (type) { @@ -1446,7 +1446,7 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict break; } return false; - case kArm64: + case InstructionSet::kArm64: // Allow vectorization for all ARM devices, because Android assumes that // ARMv8 AArch64 always supports advanced SIMD (128-bit SIMD). switch (type) { @@ -1474,8 +1474,8 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict default: return false; } - case kX86: - case kX86_64: + case InstructionSet::kX86: + case InstructionSet::kX86_64: // Allow vectorization for SSE4.1-enabled X86 devices only (128-bit SIMD). if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) { switch (type) { @@ -1506,7 +1506,7 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict } // switch type } return false; - case kMips: + case InstructionSet::kMips: if (features->AsMipsInstructionSetFeatures()->HasMsa()) { switch (type) { case DataType::Type::kBool: @@ -1535,7 +1535,7 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict } // switch type } return false; - case kMips64: + case InstructionSet::kMips64: if (features->AsMips64InstructionSetFeatures()->HasMsa()) { switch (type) { case DataType::Type::kBool: @@ -2170,7 +2170,7 @@ static constexpr uint32_t ARM64_SIMD_HEURISTIC_MAX_BODY_SIZE = 50; uint32_t HLoopOptimization::GetUnrollingFactor(HBasicBlock* block, int64_t trip_count) { uint32_t max_peel = MaxNumberPeeled(); switch (compiler_driver_->GetInstructionSet()) { - case kArm64: { + case InstructionSet::kArm64: { // Don't unroll with insufficient iterations. // TODO: Unroll loops with unknown trip count. DCHECK_NE(vector_length_, 0u); @@ -2192,8 +2192,8 @@ uint32_t HLoopOptimization::GetUnrollingFactor(HBasicBlock* block, int64_t trip_ DCHECK_GE(unroll_factor, 1u); return unroll_factor; } - case kX86: - case kX86_64: + case InstructionSet::kX86: + case InstructionSet::kX86_64: default: return kNoUnrollingFactor; } diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc index b7380b0a49..4ad29961be 100644 --- a/compiler/optimizing/optimizing_cfi_test.cc +++ b/compiler/optimizing/optimizing_cfi_test.cc @@ -153,15 +153,15 @@ class OptimizingCFITest : public CFITest { InternalCodeAllocator code_allocator_; }; -#define TEST_ISA(isa) \ - TEST_F(OptimizingCFITest, isa) { \ - std::vector<uint8_t> expected_asm( \ - expected_asm_##isa, \ - expected_asm_##isa + arraysize(expected_asm_##isa)); \ - std::vector<uint8_t> expected_cfi( \ - expected_cfi_##isa, \ - expected_cfi_##isa + arraysize(expected_cfi_##isa)); \ - TestImpl(isa, #isa, expected_asm, expected_cfi); \ +#define TEST_ISA(isa) \ + TEST_F(OptimizingCFITest, isa) { \ + std::vector<uint8_t> expected_asm( \ + expected_asm_##isa, \ + expected_asm_##isa + arraysize(expected_asm_##isa)); \ + std::vector<uint8_t> expected_cfi( \ + expected_cfi_##isa, \ + expected_cfi_##isa + arraysize(expected_cfi_##isa)); \ + TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi); \ } #ifdef ART_ENABLE_CODEGEN_arm @@ -204,7 +204,7 @@ TEST_F(OptimizingCFITest, kThumb2Adjust) { std::vector<uint8_t> expected_cfi( expected_cfi_kThumb2_adjust, expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust)); - SetUpFrame(kThumb2); + SetUpFrame(InstructionSet::kThumb2); #define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \ ->GetAssembler())->GetVIXLAssembler()-> vixl32::Label target; @@ -216,7 +216,7 @@ TEST_F(OptimizingCFITest, kThumb2Adjust) { __ Bind(&target); #undef __ Finish(); - Check(kThumb2, "kThumb2_adjust", expected_asm, expected_cfi); + Check(InstructionSet::kThumb2, "kThumb2_adjust", expected_asm, expected_cfi); } #endif @@ -235,7 +235,7 @@ TEST_F(OptimizingCFITest, kMipsAdjust) { std::vector<uint8_t> expected_cfi( expected_cfi_kMips_adjust, expected_cfi_kMips_adjust + arraysize(expected_cfi_kMips_adjust)); - SetUpFrame(kMips); + SetUpFrame(InstructionSet::kMips); #define __ down_cast<mips::MipsAssembler*>(GetCodeGenerator()->GetAssembler())-> mips::MipsLabel target; __ Beqz(mips::A0, &target); @@ -246,7 +246,7 @@ TEST_F(OptimizingCFITest, kMipsAdjust) { __ Bind(&target); #undef __ Finish(); - Check(kMips, "kMips_adjust", expected_asm, expected_cfi); + Check(InstructionSet::kMips, "kMips_adjust", expected_asm, expected_cfi); } #endif @@ -265,7 +265,7 @@ TEST_F(OptimizingCFITest, kMips64Adjust) { std::vector<uint8_t> expected_cfi( expected_cfi_kMips64_adjust, expected_cfi_kMips64_adjust + arraysize(expected_cfi_kMips64_adjust)); - SetUpFrame(kMips64); + SetUpFrame(InstructionSet::kMips64); #define __ down_cast<mips64::Mips64Assembler*>(GetCodeGenerator()->GetAssembler())-> mips64::Mips64Label target; __ Beqc(mips64::A1, mips64::A2, &target); @@ -276,7 +276,7 @@ TEST_F(OptimizingCFITest, kMips64Adjust) { __ Bind(&target); #undef __ Finish(); - Check(kMips64, "kMips64_adjust", expected_asm, expected_cfi); + Check(InstructionSet::kMips64, "kMips64_adjust", expected_asm, expected_cfi); } #endif diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 29319f8c38..9233eb5baf 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -437,13 +437,13 @@ bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED, } static bool IsInstructionSetSupported(InstructionSet instruction_set) { - return instruction_set == kArm - || instruction_set == kArm64 - || instruction_set == kThumb2 - || instruction_set == kMips - || instruction_set == kMips64 - || instruction_set == kX86 - || instruction_set == kX86_64; + return instruction_set == InstructionSet::kArm + || instruction_set == InstructionSet::kArm64 + || instruction_set == InstructionSet::kThumb2 + || instruction_set == InstructionSet::kMips + || instruction_set == InstructionSet::kMips64 + || instruction_set == InstructionSet::kX86 + || instruction_set == InstructionSet::kX86_64; } // Strip pass name suffix to get optimization name. @@ -637,8 +637,8 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, ArenaAllocator* allocator = graph->GetAllocator(); switch (instruction_set) { #if defined(ART_ENABLE_CODEGEN_arm) - case kThumb2: - case kArm: { + case InstructionSet::kThumb2: + case InstructionSet::kArm: { arm::InstructionSimplifierArm* simplifier = new (allocator) arm::InstructionSimplifierArm(graph, stats); SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph); @@ -657,7 +657,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, } #endif #ifdef ART_ENABLE_CODEGEN_arm64 - case kArm64: { + case InstructionSet::kArm64: { arm64::InstructionSimplifierArm64* simplifier = new (allocator) arm64::InstructionSimplifierArm64(graph, stats); SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph); @@ -676,7 +676,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, } #endif #ifdef ART_ENABLE_CODEGEN_mips - case kMips: { + case InstructionSet::kMips: { mips::InstructionSimplifierMips* simplifier = new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats); SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph); @@ -695,7 +695,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, } #endif #ifdef ART_ENABLE_CODEGEN_mips64 - case kMips64: { + case InstructionSet::kMips64: { SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph); GVNOptimization* gvn = new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch"); @@ -708,7 +708,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, } #endif #ifdef ART_ENABLE_CODEGEN_x86 - case kX86: { + case InstructionSet::kX86: { SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph); GVNOptimization* gvn = new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch"); @@ -727,7 +727,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set, } #endif #ifdef ART_ENABLE_CODEGEN_x86_64 - case kX86_64: { + case InstructionSet::kX86_64: { SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph); GVNOptimization* gvn = new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch"); @@ -949,7 +949,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, // Always use the Thumb-2 assembler: some runtime functionality // (like implicit stack overflow checks) assume Thumb-2. - DCHECK_NE(instruction_set, kArm); + DCHECK_NE(instruction_set, InstructionSet::kArm); // Do not attempt to compile on architectures we do not support. if (!IsInstructionSetSupported(instruction_set)) { diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index 86e971353f..bad73e1b61 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -70,13 +70,13 @@ RegisterAllocator::~RegisterAllocator() { bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED, InstructionSet instruction_set) { - return instruction_set == kArm - || instruction_set == kArm64 - || instruction_set == kMips - || instruction_set == kMips64 - || instruction_set == kThumb2 - || instruction_set == kX86 - || instruction_set == kX86_64; + return instruction_set == InstructionSet::kArm + || instruction_set == InstructionSet::kArm64 + || instruction_set == InstructionSet::kMips + || instruction_set == InstructionSet::kMips64 + || instruction_set == InstructionSet::kThumb2 + || instruction_set == InstructionSet::kX86 + || instruction_set == InstructionSet::kX86_64; } class AllRangesIterator : public ValueObject { diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc index 57eb7623b1..8cc376c3a6 100644 --- a/compiler/optimizing/scheduler.cc +++ b/compiler/optimizing/scheduler.cc @@ -796,7 +796,7 @@ void HInstructionScheduling::Run(bool only_optimize_loop_blocks, switch (instruction_set_) { #ifdef ART_ENABLE_CODEGEN_arm64 - case kArm64: { + case InstructionSet::kArm64: { arm64::HSchedulerARM64 scheduler(&allocator, selector); scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks); scheduler.Schedule(graph_); @@ -804,8 +804,8 @@ void HInstructionScheduling::Run(bool only_optimize_loop_blocks, } #endif #if defined(ART_ENABLE_CODEGEN_arm) - case kThumb2: - case kArm: { + case InstructionSet::kThumb2: + case InstructionSet::kArm: { arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_); arm::HSchedulerARM scheduler(&allocator, selector, &arm_latency_visitor); scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks); diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc index dfc1633fe6..75dce81550 100644 --- a/compiler/optimizing/scheduler_test.cc +++ b/compiler/optimizing/scheduler_test.cc @@ -43,22 +43,22 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() { ::std::vector<CodegenTargetConfig> test_config_candidates = { #ifdef ART_ENABLE_CODEGEN_arm // TODO: Should't this be `kThumb2` instead of `kArm` here? - CodegenTargetConfig(kArm, create_codegen_arm_vixl32), + CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32), #endif #ifdef ART_ENABLE_CODEGEN_arm64 - CodegenTargetConfig(kArm64, create_codegen_arm64), + CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64), #endif #ifdef ART_ENABLE_CODEGEN_x86 - CodegenTargetConfig(kX86, create_codegen_x86), + CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86), #endif #ifdef ART_ENABLE_CODEGEN_x86_64 - CodegenTargetConfig(kX86_64, create_codegen_x86_64), + CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64), #endif #ifdef ART_ENABLE_CODEGEN_mips - CodegenTargetConfig(kMips, create_codegen_mips), + CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips), #endif #ifdef ART_ENABLE_CODEGEN_mips64 - CodegenTargetConfig(kMips64, create_codegen_mips64) + CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64) #endif }; diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index 91f86d5c50..7e517f3485 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -928,18 +928,24 @@ TEST(StackMapTest, InlineTest) { TEST(StackMapTest, CodeOffsetTest) { // Test minimum alignments, encoding, and decoding. - CodeOffset offset_thumb2 = CodeOffset::FromOffset(kThumb2InstructionAlignment, kThumb2); - CodeOffset offset_arm64 = CodeOffset::FromOffset(kArm64InstructionAlignment, kArm64); - CodeOffset offset_x86 = CodeOffset::FromOffset(kX86InstructionAlignment, kX86); - CodeOffset offset_x86_64 = CodeOffset::FromOffset(kX86_64InstructionAlignment, kX86_64); - CodeOffset offset_mips = CodeOffset::FromOffset(kMipsInstructionAlignment, kMips); - CodeOffset offset_mips64 = CodeOffset::FromOffset(kMips64InstructionAlignment, kMips64); - EXPECT_EQ(offset_thumb2.Uint32Value(kThumb2), kThumb2InstructionAlignment); - EXPECT_EQ(offset_arm64.Uint32Value(kArm64), kArm64InstructionAlignment); - EXPECT_EQ(offset_x86.Uint32Value(kX86), kX86InstructionAlignment); - EXPECT_EQ(offset_x86_64.Uint32Value(kX86_64), kX86_64InstructionAlignment); - EXPECT_EQ(offset_mips.Uint32Value(kMips), kMipsInstructionAlignment); - EXPECT_EQ(offset_mips64.Uint32Value(kMips64), kMips64InstructionAlignment); + CodeOffset offset_thumb2 = + CodeOffset::FromOffset(kThumb2InstructionAlignment, InstructionSet::kThumb2); + CodeOffset offset_arm64 = + CodeOffset::FromOffset(kArm64InstructionAlignment, InstructionSet::kArm64); + CodeOffset offset_x86 = + CodeOffset::FromOffset(kX86InstructionAlignment, InstructionSet::kX86); + CodeOffset offset_x86_64 = + CodeOffset::FromOffset(kX86_64InstructionAlignment, InstructionSet::kX86_64); + CodeOffset offset_mips = + CodeOffset::FromOffset(kMipsInstructionAlignment, InstructionSet::kMips); + CodeOffset offset_mips64 = + CodeOffset::FromOffset(kMips64InstructionAlignment, InstructionSet::kMips64); + EXPECT_EQ(offset_thumb2.Uint32Value(InstructionSet::kThumb2), kThumb2InstructionAlignment); + EXPECT_EQ(offset_arm64.Uint32Value(InstructionSet::kArm64), kArm64InstructionAlignment); + EXPECT_EQ(offset_x86.Uint32Value(InstructionSet::kX86), kX86InstructionAlignment); + EXPECT_EQ(offset_x86_64.Uint32Value(InstructionSet::kX86_64), kX86_64InstructionAlignment); + EXPECT_EQ(offset_mips.Uint32Value(InstructionSet::kMips), kMipsInstructionAlignment); + EXPECT_EQ(offset_mips64.Uint32Value(InstructionSet::kMips64), kMips64InstructionAlignment); } TEST(StackMapTest, TestDeduplicateStackMask) { diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc index 9527a60976..921d401849 100644 --- a/compiler/trampolines/trampoline_compiler.cc +++ b/compiler/trampolines/trampoline_compiler.cc @@ -247,15 +247,15 @@ std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet is ArenaAllocator allocator(&pool); switch (isa) { #ifdef ART_ENABLE_CODEGEN_arm64 - case kArm64: + case InstructionSet::kArm64: return arm64::CreateTrampoline(&allocator, abi, offset); #endif #ifdef ART_ENABLE_CODEGEN_mips64 - case kMips64: + case InstructionSet::kMips64: return mips64::CreateTrampoline(&allocator, abi, offset); #endif #ifdef ART_ENABLE_CODEGEN_x86_64 - case kX86_64: + case InstructionSet::kX86_64: return x86_64::CreateTrampoline(&allocator, offset); #endif default: @@ -273,16 +273,16 @@ std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet is ArenaAllocator allocator(&pool); switch (isa) { #ifdef ART_ENABLE_CODEGEN_arm - case kArm: - case kThumb2: + case InstructionSet::kArm: + case InstructionSet::kThumb2: return arm::CreateTrampoline(&allocator, abi, offset); #endif #ifdef ART_ENABLE_CODEGEN_mips - case kMips: + case InstructionSet::kMips: return mips::CreateTrampoline(&allocator, abi, offset); #endif #ifdef ART_ENABLE_CODEGEN_x86 - case kX86: + case InstructionSet::kX86: UNUSED(abi); return x86::CreateTrampoline(&allocator, offset); #endif diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h index e239004506..c13c9af819 100644 --- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h +++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h @@ -232,7 +232,7 @@ class ArmVIXLJNIMacroAssembler FINAL class ArmVIXLJNIMacroLabel FINAL : public JNIMacroLabelCommon<ArmVIXLJNIMacroLabel, vixl32::Label, - kArm> { + InstructionSet::kArm> { public: vixl32::Label* AsArm() { return AsPlatformLabel(); diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h index fda87aa573..ce39a13692 100644 --- a/compiler/utils/arm64/jni_macro_assembler_arm64.h +++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h @@ -235,7 +235,7 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, class Arm64JNIMacroLabel FINAL : public JNIMacroLabelCommon<Arm64JNIMacroLabel, vixl::aarch64::Label, - kArm64> { + InstructionSet::kArm64> { public: vixl::aarch64::Label* AsArm64() { return AsPlatformLabel(); diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc index 5307d17bb0..655d17d4fb 100644 --- a/compiler/utils/assembler_thumb_test.cc +++ b/compiler/utils/assembler_thumb_test.cc @@ -81,7 +81,7 @@ std::string GetToolsDir() { if (toolsdir.empty()) { setup_results(); - toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(kThumb2); + toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(InstructionSet::kThumb2); SetAndroidData(); } @@ -215,10 +215,10 @@ TEST_F(ArmVIXLAssemblerTest, VixlJniHelpers) { is_synchronized, is_critical_native, shorty, - kThumb2)); + InstructionSet::kThumb2)); std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv( ManagedRuntimeCallingConvention::Create( - &allocator, is_static, is_synchronized, shorty, kThumb2)); + &allocator, is_static, is_synchronized, shorty, InstructionSet::kThumb2)); const int frame_size(jni_conv->FrameSize()); ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters(); diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc index 0616b35a39..3f7691b6a8 100644 --- a/compiler/utils/jni_macro_assembler.cc +++ b/compiler/utils/jni_macro_assembler.cc @@ -56,12 +56,12 @@ MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create( switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_arm - case kArm: - case kThumb2: + case InstructionSet::kArm: + case InstructionSet::kThumb2: return MacroAsm32UniquePtr(new (allocator) arm::ArmVIXLJNIMacroAssembler(allocator)); #endif #ifdef ART_ENABLE_CODEGEN_mips - case kMips: + case InstructionSet::kMips: return MacroAsm32UniquePtr(new (allocator) mips::MipsAssembler( allocator, instruction_set_features != nullptr @@ -69,7 +69,7 @@ MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create( : nullptr)); #endif #ifdef ART_ENABLE_CODEGEN_x86 - case kX86: + case InstructionSet::kX86: return MacroAsm32UniquePtr(new (allocator) x86::X86JNIMacroAssembler(allocator)); #endif default: @@ -91,11 +91,11 @@ MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create( switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_arm64 - case kArm64: + case InstructionSet::kArm64: return MacroAsm64UniquePtr(new (allocator) arm64::Arm64JNIMacroAssembler(allocator)); #endif #ifdef ART_ENABLE_CODEGEN_mips64 - case kMips64: + case InstructionSet::kMips64: return MacroAsm64UniquePtr(new (allocator) mips64::Mips64Assembler( allocator, instruction_set_features != nullptr @@ -103,7 +103,7 @@ MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create( : nullptr)); #endif #ifdef ART_ENABLE_CODEGEN_x86_64 - case kX86_64: + case InstructionSet::kX86_64: return MacroAsm64UniquePtr(new (allocator) x86_64::X86_64JNIMacroAssembler(allocator)); #endif default: diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h index 56eaf1951e..99219d8f88 100644 --- a/compiler/utils/x86/jni_macro_assembler_x86.h +++ b/compiler/utils/x86/jni_macro_assembler_x86.h @@ -171,7 +171,7 @@ class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, Poi class X86JNIMacroLabel FINAL : public JNIMacroLabelCommon<X86JNIMacroLabel, art::Label, - kX86> { + InstructionSet::kX86> { public: art::Label* AsX86() { return AsPlatformLabel(); diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h index d1a3032a56..d766ad4716 100644 --- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h +++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h @@ -197,7 +197,7 @@ class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assemble class X86_64JNIMacroLabel FINAL : public JNIMacroLabelCommon<X86_64JNIMacroLabel, art::Label, - kX86_64> { + InstructionSet::kX86_64> { public: art::Label* AsX86_64() { return AsPlatformLabel(); |