ART: Make InstructionSet an enum class and add kLast.
Adding InstructionSet::kLast shall make it easier to encode
the InstructionSet in fewer bits using BitField<>. However,
introducing `kLast` into the `art` namespace is not a good
idea, so we change the InstructionSet to an enum class.
This also uncovered a case of InstructionSet::kNone being
erroneously used instead of vixl32::Condition::None(), so
it's good to remove `kNone` from the `art` namespace.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Change-Id: I6fa6168dfba4ed6da86d021a69c80224f09997a6
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index b8d1f52..5625f04 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -786,43 +786,43 @@
ArenaAllocator* allocator = graph->GetAllocator();
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
- case kArm:
- case kThumb2: {
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2: {
return std::unique_ptr<CodeGenerator>(
new (allocator) arm::CodeGeneratorARMVIXL(
graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64: {
+ case InstructionSet::kArm64: {
return std::unique_ptr<CodeGenerator>(
new (allocator) arm64::CodeGeneratorARM64(
graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips: {
+ case InstructionSet::kMips: {
return std::unique_ptr<CodeGenerator>(
new (allocator) mips::CodeGeneratorMIPS(
graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64: {
+ case InstructionSet::kMips64: {
return std::unique_ptr<CodeGenerator>(
new (allocator) mips64::CodeGeneratorMIPS64(
graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- case kX86: {
+ case InstructionSet::kX86: {
return std::unique_ptr<CodeGenerator>(
new (allocator) x86::CodeGeneratorX86(
graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64: {
+ case InstructionSet::kX86_64: {
return std::unique_ptr<CodeGenerator>(
new (allocator) x86_64::CodeGeneratorX86_64(
graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats));
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 64c88eb..18ad60d 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -626,7 +626,7 @@
bool CallPushesPC() const {
InstructionSet instruction_set = GetInstructionSet();
- return instruction_set == kX86 || instruction_set == kX86_64;
+ return instruction_set == InstructionSet::kX86 || instruction_set == InstructionSet::kX86_64;
}
// Arm64 has its own type for a label, so we need to templatize these methods
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c7811ab..e01b7b7 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1557,12 +1557,13 @@
MacroAssembler* masm = GetVIXLAssembler();
__ Bind(&frame_entry_label_);
- bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
+ bool do_overflow_check =
+ FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm64) || !IsLeafMethod();
if (do_overflow_check) {
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireX();
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
- __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
+ __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kArm64)));
{
// Ensure that between load and RecordPcInfo there are no pools emitted.
ExactAssemblyScope eas(GetVIXLAssembler(),
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 90f3ae8..edd3072 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2568,7 +2568,7 @@
if (!skip_overflow_check) {
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
- __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(kArm)));
+ __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(InstructionSet::kArm)));
// The load must immediately precede RecordPcInfo.
ExactAssemblyScope aas(GetVIXLAssembler(),
vixl32::kMaxInstructionSizeInBytes,
@@ -5303,7 +5303,7 @@
vixl32::Label less, greater, done;
vixl32::Label* final_label = codegen_->GetFinalLabel(compare, &done);
DataType::Type type = compare->InputAt(0)->GetType();
- vixl32::Condition less_cond = vixl32::Condition(kNone);
+ vixl32::Condition less_cond = vixl32::Condition::None();
switch (type) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 2f65e8c..b3fed07 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1132,7 +1132,7 @@
StackMapStream* stack_map_stream = GetStackMapStream();
for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
uint32_t old_position =
- stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
+ stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
stack_map_stream->SetStackMapNativePcOffset(i, new_position);
@@ -1347,13 +1347,14 @@
void CodeGeneratorMIPS::GenerateFrameEntry() {
__ Bind(&frame_entry_label_);
- bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips) || !IsLeafMethod();
+ bool do_overflow_check =
+ FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips) || !IsLeafMethod();
if (do_overflow_check) {
__ LoadFromOffset(kLoadWord,
ZERO,
SP,
- -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips)));
+ -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips)));
RecordPcInfo(nullptr, 0);
}
@@ -1365,8 +1366,9 @@
}
// Make sure the frame size isn't unreasonably large.
- if (GetFrameSize() > GetStackOverflowReservedBytes(kMips)) {
- LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips) << " bytes";
+ if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips)) {
+ LOG(FATAL) << "Stack frame larger than "
+ << GetStackOverflowReservedBytes(InstructionSet::kMips) << " bytes";
}
// Spill callee-saved registers.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 6cbfa14..53a7f26 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1076,7 +1076,7 @@
StackMapStream* stack_map_stream = GetStackMapStream();
for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
uint32_t old_position =
- stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64);
+ stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips64);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
stack_map_stream->SetStackMapNativePcOffset(i, new_position);
@@ -1161,13 +1161,15 @@
void CodeGeneratorMIPS64::GenerateFrameEntry() {
__ Bind(&frame_entry_label_);
- bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
+ bool do_overflow_check =
+ FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips64) || !IsLeafMethod();
if (do_overflow_check) {
- __ LoadFromOffset(kLoadWord,
- ZERO,
- SP,
- -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
+ __ LoadFromOffset(
+ kLoadWord,
+ ZERO,
+ SP,
+ -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips64)));
RecordPcInfo(nullptr, 0);
}
@@ -1176,8 +1178,9 @@
}
// Make sure the frame size isn't unreasonably large.
- if (GetFrameSize() > GetStackOverflowReservedBytes(kMips64)) {
- LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips64) << " bytes";
+ if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips64)) {
+ LOG(FATAL) << "Stack frame larger than "
+ << GetStackOverflowReservedBytes(InstructionSet::kMips64) << " bytes";
}
// Spill callee-saved registers.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 44614e1..f84dd00 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1072,7 +1072,8 @@
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
if (!skip_overflow_check) {
- __ testl(EAX, Address(ESP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86))));
+ size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86);
+ __ testl(EAX, Address(ESP, -static_cast<int32_t>(reserved_bytes)));
RecordPcInfo(nullptr, 0);
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 259bb4a..16d1f18 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1277,8 +1277,8 @@
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
if (!skip_overflow_check) {
- __ testq(CpuRegister(RAX), Address(
- CpuRegister(RSP), -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86_64))));
+ size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86_64);
+ __ testq(CpuRegister(RAX), Address(CpuRegister(RSP), -static_cast<int32_t>(reserved_bytes)));
RecordPcInfo(nullptr, 0);
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index e35c7c7..ba431a5 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -44,22 +44,22 @@
::std::vector<CodegenTargetConfig> test_config_candidates = {
#ifdef ART_ENABLE_CODEGEN_arm
// TODO: Should't this be `kThumb2` instead of `kArm` here?
- CodegenTargetConfig(kArm, create_codegen_arm_vixl32),
+ CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32),
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- CodegenTargetConfig(kArm64, create_codegen_arm64),
+ CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64),
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- CodegenTargetConfig(kX86, create_codegen_x86),
+ CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86),
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- CodegenTargetConfig(kX86_64, create_codegen_x86_64),
+ CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64),
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- CodegenTargetConfig(kMips, create_codegen_mips),
+ CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips),
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- CodegenTargetConfig(kMips64, create_codegen_mips64)
+ CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64)
#endif
};
@@ -825,7 +825,7 @@
TEST_F(CodegenTest, MipsClobberRA) {
std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
MipsInstructionSetFeatures::FromCppDefines());
- if (!CanExecute(kMips) || features_mips->IsR6()) {
+ if (!CanExecute(InstructionSet::kMips) || features_mips->IsR6()) {
// HMipsComputeBaseMethodAddress and the NAL instruction behind it
// should only be generated on non-R6.
return;
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index bcbcc12..c41c290 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -207,7 +207,7 @@
static bool CanExecuteOnHardware(InstructionSet target_isa) {
return (target_isa == kRuntimeISA)
// Handle the special case of ARM, with two instructions sets (ARM32 and Thumb-2).
- || (kRuntimeISA == kArm && target_isa == kThumb2);
+ || (kRuntimeISA == InstructionSet::kArm && target_isa == InstructionSet::kThumb2);
}
static bool CanExecute(InstructionSet target_isa) {
@@ -271,7 +271,7 @@
typedef Expected (*fptr)();
CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
fptr f = reinterpret_cast<fptr>(allocator.GetMemory());
- if (target_isa == kThumb2) {
+ if (target_isa == InstructionSet::kThumb2) {
// For thumb we need the bottom bit set.
f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1);
}
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 102acb3..ed2f8e9 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -342,7 +342,7 @@
}
inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
- DCHECK(HasShifterOperand(instruction, kArm64));
+ DCHECK(HasShifterOperand(instruction, InstructionSet::kArm64));
// Although the `neg` instruction is an alias of the `sub` instruction, `HNeg`
// does *not* support extension. This is because the `extended register` form
// of the `sub` instruction interprets the left register with code 31 as the
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index f7fd910..12c6988 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -153,7 +153,7 @@
}
const uint8_t* base = disassembler_->GetDisassemblerOptions()->base_address_;
- if (instruction_set_ == kThumb2) {
+ if (instruction_set_ == InstructionSet::kThumb2) {
// ARM and Thumb-2 use the same disassembler. The bottom bit of the
// address is used to distinguish between the two.
base += 1;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 189d5ae..2bd2d5f 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -250,7 +250,7 @@
DataType::Type type = mul->GetPackedType();
InstructionSet isa = codegen_->GetInstructionSet();
switch (isa) {
- case kArm64:
+ case InstructionSet::kArm64:
if (!(type == DataType::Type::kUint8 ||
type == DataType::Type::kInt8 ||
type == DataType::Type::kUint16 ||
@@ -259,8 +259,8 @@
return false;
}
break;
- case kMips:
- case kMips64:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
if (!(type == DataType::Type::kUint8 ||
type == DataType::Type::kInt8 ||
type == DataType::Type::kUint16 ||
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 9422f9f..d41e49a 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -84,7 +84,7 @@
bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* use,
HInstruction* bitfield_op,
bool do_merge) {
- DCHECK(HasShifterOperand(use, kArm));
+ DCHECK(HasShifterOperand(use, InstructionSet::kArm));
DCHECK(use->IsBinaryOperation());
DCHECK(CanFitInShifterOperand(bitfield_op));
DCHECK(!bitfield_op->HasEnvironmentUses());
@@ -166,7 +166,7 @@
// Check whether we can merge the instruction in all its users' shifter operand.
for (const HUseListNode<HInstruction*>& use : uses) {
HInstruction* user = use.GetUser();
- if (!HasShifterOperand(user, kArm)) {
+ if (!HasShifterOperand(user, InstructionSet::kArm)) {
return false;
}
if (!CanMergeIntoShifterOperand(user, bitfield_op)) {
@@ -242,7 +242,7 @@
}
void InstructionSimplifierArmVisitor::VisitMul(HMul* instruction) {
- if (TryCombineMultiplyAccumulate(instruction, kArm)) {
+ if (TryCombineMultiplyAccumulate(instruction, InstructionSet::kArm)) {
RecordSimplification();
}
}
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index c0ab68f..69e1463 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -90,7 +90,7 @@
bool InstructionSimplifierArm64Visitor::TryMergeIntoShifterOperand(HInstruction* use,
HInstruction* bitfield_op,
bool do_merge) {
- DCHECK(HasShifterOperand(use, kArm64));
+ DCHECK(HasShifterOperand(use, InstructionSet::kArm64));
DCHECK(use->IsBinaryOperation() || use->IsNeg());
DCHECK(CanFitInShifterOperand(bitfield_op));
DCHECK(!bitfield_op->HasEnvironmentUses());
@@ -170,7 +170,7 @@
// Check whether we can merge the instruction in all its users' shifter operand.
for (const HUseListNode<HInstruction*>& use : uses) {
HInstruction* user = use.GetUser();
- if (!HasShifterOperand(user, kArm64)) {
+ if (!HasShifterOperand(user, InstructionSet::kArm64)) {
return false;
}
if (!CanMergeIntoShifterOperand(user, bitfield_op)) {
@@ -218,7 +218,7 @@
}
void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) {
- if (TryCombineMultiplyAccumulate(instruction, kArm64)) {
+ if (TryCombineMultiplyAccumulate(instruction, InstructionSet::kArm64)) {
RecordSimplification();
}
}
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 1c13084..ccdcb35 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -90,13 +90,13 @@
bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) {
DataType::Type type = mul->GetType();
switch (isa) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
if (type != DataType::Type::kInt32) {
return false;
}
break;
- case kArm64:
+ case InstructionSet::kArm64:
if (!DataType::IsIntOrLongType(type)) {
return false;
}
@@ -148,7 +148,7 @@
mul->GetBlock()->RemoveInstruction(mul);
return true;
}
- } else if (use->IsNeg() && isa != kArm) {
+ } else if (use->IsNeg() && isa != InstructionSet::kArm) {
HMultiplyAccumulate* mulacc =
new (allocator) HMultiplyAccumulate(type,
HInstruction::kSub,
diff --git a/compiler/optimizing/instruction_simplifier_shared.h b/compiler/optimizing/instruction_simplifier_shared.h
index b016a87..758fc76 100644
--- a/compiler/optimizing/instruction_simplifier_shared.h
+++ b/compiler/optimizing/instruction_simplifier_shared.h
@@ -41,7 +41,8 @@
inline bool HasShifterOperand(HInstruction* instr, InstructionSet isa) {
// On ARM64 `neg` instructions are an alias of `sub` using the zero register
// as the first register input.
- bool res = instr->IsAdd() || instr->IsAnd() || (isa == kArm64 && instr->IsNeg()) ||
+ bool res = instr->IsAdd() || instr->IsAnd() ||
+ (isa == InstructionSet::kArm64 && instr->IsNeg()) ||
instr->IsOr() || instr->IsSub() || instr->IsXor();
return res;
}
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 74de077..c672dae 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -1414,8 +1414,8 @@
uint32_t HLoopOptimization::GetVectorSizeInBytes() {
switch (compiler_driver_->GetInstructionSet()) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return 8; // 64-bit SIMD
default:
return 16; // 128-bit SIMD
@@ -1425,8 +1425,8 @@
bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) {
const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures();
switch (compiler_driver_->GetInstructionSet()) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
// Allow vectorization for all ARM devices, because Android assumes that
// ARM 32-bit always supports advanced SIMD (64-bit SIMD).
switch (type) {
@@ -1446,7 +1446,7 @@
break;
}
return false;
- case kArm64:
+ case InstructionSet::kArm64:
// Allow vectorization for all ARM devices, because Android assumes that
// ARMv8 AArch64 always supports advanced SIMD (128-bit SIMD).
switch (type) {
@@ -1474,8 +1474,8 @@
default:
return false;
}
- case kX86:
- case kX86_64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
// Allow vectorization for SSE4.1-enabled X86 devices only (128-bit SIMD).
if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) {
switch (type) {
@@ -1506,7 +1506,7 @@
} // switch type
}
return false;
- case kMips:
+ case InstructionSet::kMips:
if (features->AsMipsInstructionSetFeatures()->HasMsa()) {
switch (type) {
case DataType::Type::kBool:
@@ -1535,7 +1535,7 @@
} // switch type
}
return false;
- case kMips64:
+ case InstructionSet::kMips64:
if (features->AsMips64InstructionSetFeatures()->HasMsa()) {
switch (type) {
case DataType::Type::kBool:
@@ -2170,7 +2170,7 @@
uint32_t HLoopOptimization::GetUnrollingFactor(HBasicBlock* block, int64_t trip_count) {
uint32_t max_peel = MaxNumberPeeled();
switch (compiler_driver_->GetInstructionSet()) {
- case kArm64: {
+ case InstructionSet::kArm64: {
// Don't unroll with insufficient iterations.
// TODO: Unroll loops with unknown trip count.
DCHECK_NE(vector_length_, 0u);
@@ -2192,8 +2192,8 @@
DCHECK_GE(unroll_factor, 1u);
return unroll_factor;
}
- case kX86:
- case kX86_64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
default:
return kNoUnrollingFactor;
}
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index b7380b0..4ad2996 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -153,15 +153,15 @@
InternalCodeAllocator code_allocator_;
};
-#define TEST_ISA(isa) \
- TEST_F(OptimizingCFITest, isa) { \
- std::vector<uint8_t> expected_asm( \
- expected_asm_##isa, \
- expected_asm_##isa + arraysize(expected_asm_##isa)); \
- std::vector<uint8_t> expected_cfi( \
- expected_cfi_##isa, \
- expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
- TestImpl(isa, #isa, expected_asm, expected_cfi); \
+#define TEST_ISA(isa) \
+ TEST_F(OptimizingCFITest, isa) { \
+ std::vector<uint8_t> expected_asm( \
+ expected_asm_##isa, \
+ expected_asm_##isa + arraysize(expected_asm_##isa)); \
+ std::vector<uint8_t> expected_cfi( \
+ expected_cfi_##isa, \
+ expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
+ TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi); \
}
#ifdef ART_ENABLE_CODEGEN_arm
@@ -204,7 +204,7 @@
std::vector<uint8_t> expected_cfi(
expected_cfi_kThumb2_adjust,
expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust));
- SetUpFrame(kThumb2);
+ SetUpFrame(InstructionSet::kThumb2);
#define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \
->GetAssembler())->GetVIXLAssembler()->
vixl32::Label target;
@@ -216,7 +216,7 @@
__ Bind(&target);
#undef __
Finish();
- Check(kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
+ Check(InstructionSet::kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
}
#endif
@@ -235,7 +235,7 @@
std::vector<uint8_t> expected_cfi(
expected_cfi_kMips_adjust,
expected_cfi_kMips_adjust + arraysize(expected_cfi_kMips_adjust));
- SetUpFrame(kMips);
+ SetUpFrame(InstructionSet::kMips);
#define __ down_cast<mips::MipsAssembler*>(GetCodeGenerator()->GetAssembler())->
mips::MipsLabel target;
__ Beqz(mips::A0, &target);
@@ -246,7 +246,7 @@
__ Bind(&target);
#undef __
Finish();
- Check(kMips, "kMips_adjust", expected_asm, expected_cfi);
+ Check(InstructionSet::kMips, "kMips_adjust", expected_asm, expected_cfi);
}
#endif
@@ -265,7 +265,7 @@
std::vector<uint8_t> expected_cfi(
expected_cfi_kMips64_adjust,
expected_cfi_kMips64_adjust + arraysize(expected_cfi_kMips64_adjust));
- SetUpFrame(kMips64);
+ SetUpFrame(InstructionSet::kMips64);
#define __ down_cast<mips64::Mips64Assembler*>(GetCodeGenerator()->GetAssembler())->
mips64::Mips64Label target;
__ Beqc(mips64::A1, mips64::A2, &target);
@@ -276,7 +276,7 @@
__ Bind(&target);
#undef __
Finish();
- Check(kMips64, "kMips64_adjust", expected_asm, expected_cfi);
+ Check(InstructionSet::kMips64, "kMips64_adjust", expected_asm, expected_cfi);
}
#endif
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 29319f8..9233eb5 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -437,13 +437,13 @@
}
static bool IsInstructionSetSupported(InstructionSet instruction_set) {
- return instruction_set == kArm
- || instruction_set == kArm64
- || instruction_set == kThumb2
- || instruction_set == kMips
- || instruction_set == kMips64
- || instruction_set == kX86
- || instruction_set == kX86_64;
+ return instruction_set == InstructionSet::kArm
+ || instruction_set == InstructionSet::kArm64
+ || instruction_set == InstructionSet::kThumb2
+ || instruction_set == InstructionSet::kMips
+ || instruction_set == InstructionSet::kMips64
+ || instruction_set == InstructionSet::kX86
+ || instruction_set == InstructionSet::kX86_64;
}
// Strip pass name suffix to get optimization name.
@@ -637,8 +637,8 @@
ArenaAllocator* allocator = graph->GetAllocator();
switch (instruction_set) {
#if defined(ART_ENABLE_CODEGEN_arm)
- case kThumb2:
- case kArm: {
+ case InstructionSet::kThumb2:
+ case InstructionSet::kArm: {
arm::InstructionSimplifierArm* simplifier =
new (allocator) arm::InstructionSimplifierArm(graph, stats);
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -657,7 +657,7 @@
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64: {
+ case InstructionSet::kArm64: {
arm64::InstructionSimplifierArm64* simplifier =
new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -676,7 +676,7 @@
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips: {
+ case InstructionSet::kMips: {
mips::InstructionSimplifierMips* simplifier =
new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -695,7 +695,7 @@
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64: {
+ case InstructionSet::kMips64: {
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
GVNOptimization* gvn =
new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -708,7 +708,7 @@
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- case kX86: {
+ case InstructionSet::kX86: {
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
GVNOptimization* gvn =
new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -727,7 +727,7 @@
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64: {
+ case InstructionSet::kX86_64: {
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
GVNOptimization* gvn =
new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -949,7 +949,7 @@
// Always use the Thumb-2 assembler: some runtime functionality
// (like implicit stack overflow checks) assume Thumb-2.
- DCHECK_NE(instruction_set, kArm);
+ DCHECK_NE(instruction_set, InstructionSet::kArm);
// Do not attempt to compile on architectures we do not support.
if (!IsInstructionSetSupported(instruction_set)) {
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 86e9713..bad73e1 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -70,13 +70,13 @@
bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
InstructionSet instruction_set) {
- return instruction_set == kArm
- || instruction_set == kArm64
- || instruction_set == kMips
- || instruction_set == kMips64
- || instruction_set == kThumb2
- || instruction_set == kX86
- || instruction_set == kX86_64;
+ return instruction_set == InstructionSet::kArm
+ || instruction_set == InstructionSet::kArm64
+ || instruction_set == InstructionSet::kMips
+ || instruction_set == InstructionSet::kMips64
+ || instruction_set == InstructionSet::kThumb2
+ || instruction_set == InstructionSet::kX86
+ || instruction_set == InstructionSet::kX86_64;
}
class AllRangesIterator : public ValueObject {
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 57eb762..8cc376c 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -796,7 +796,7 @@
switch (instruction_set_) {
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64: {
+ case InstructionSet::kArm64: {
arm64::HSchedulerARM64 scheduler(&allocator, selector);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
scheduler.Schedule(graph_);
@@ -804,8 +804,8 @@
}
#endif
#if defined(ART_ENABLE_CODEGEN_arm)
- case kThumb2:
- case kArm: {
+ case InstructionSet::kThumb2:
+ case InstructionSet::kArm: {
arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
arm::HSchedulerARM scheduler(&allocator, selector, &arm_latency_visitor);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index dfc1633..75dce81 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -43,22 +43,22 @@
::std::vector<CodegenTargetConfig> test_config_candidates = {
#ifdef ART_ENABLE_CODEGEN_arm
// TODO: Should't this be `kThumb2` instead of `kArm` here?
- CodegenTargetConfig(kArm, create_codegen_arm_vixl32),
+ CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32),
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- CodegenTargetConfig(kArm64, create_codegen_arm64),
+ CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64),
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- CodegenTargetConfig(kX86, create_codegen_x86),
+ CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86),
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- CodegenTargetConfig(kX86_64, create_codegen_x86_64),
+ CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64),
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- CodegenTargetConfig(kMips, create_codegen_mips),
+ CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips),
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- CodegenTargetConfig(kMips64, create_codegen_mips64)
+ CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64)
#endif
};
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 91f86d5..7e517f3 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -928,18 +928,24 @@
TEST(StackMapTest, CodeOffsetTest) {
// Test minimum alignments, encoding, and decoding.
- CodeOffset offset_thumb2 = CodeOffset::FromOffset(kThumb2InstructionAlignment, kThumb2);
- CodeOffset offset_arm64 = CodeOffset::FromOffset(kArm64InstructionAlignment, kArm64);
- CodeOffset offset_x86 = CodeOffset::FromOffset(kX86InstructionAlignment, kX86);
- CodeOffset offset_x86_64 = CodeOffset::FromOffset(kX86_64InstructionAlignment, kX86_64);
- CodeOffset offset_mips = CodeOffset::FromOffset(kMipsInstructionAlignment, kMips);
- CodeOffset offset_mips64 = CodeOffset::FromOffset(kMips64InstructionAlignment, kMips64);
- EXPECT_EQ(offset_thumb2.Uint32Value(kThumb2), kThumb2InstructionAlignment);
- EXPECT_EQ(offset_arm64.Uint32Value(kArm64), kArm64InstructionAlignment);
- EXPECT_EQ(offset_x86.Uint32Value(kX86), kX86InstructionAlignment);
- EXPECT_EQ(offset_x86_64.Uint32Value(kX86_64), kX86_64InstructionAlignment);
- EXPECT_EQ(offset_mips.Uint32Value(kMips), kMipsInstructionAlignment);
- EXPECT_EQ(offset_mips64.Uint32Value(kMips64), kMips64InstructionAlignment);
+ CodeOffset offset_thumb2 =
+ CodeOffset::FromOffset(kThumb2InstructionAlignment, InstructionSet::kThumb2);
+ CodeOffset offset_arm64 =
+ CodeOffset::FromOffset(kArm64InstructionAlignment, InstructionSet::kArm64);
+ CodeOffset offset_x86 =
+ CodeOffset::FromOffset(kX86InstructionAlignment, InstructionSet::kX86);
+ CodeOffset offset_x86_64 =
+ CodeOffset::FromOffset(kX86_64InstructionAlignment, InstructionSet::kX86_64);
+ CodeOffset offset_mips =
+ CodeOffset::FromOffset(kMipsInstructionAlignment, InstructionSet::kMips);
+ CodeOffset offset_mips64 =
+ CodeOffset::FromOffset(kMips64InstructionAlignment, InstructionSet::kMips64);
+ EXPECT_EQ(offset_thumb2.Uint32Value(InstructionSet::kThumb2), kThumb2InstructionAlignment);
+ EXPECT_EQ(offset_arm64.Uint32Value(InstructionSet::kArm64), kArm64InstructionAlignment);
+ EXPECT_EQ(offset_x86.Uint32Value(InstructionSet::kX86), kX86InstructionAlignment);
+ EXPECT_EQ(offset_x86_64.Uint32Value(InstructionSet::kX86_64), kX86_64InstructionAlignment);
+ EXPECT_EQ(offset_mips.Uint32Value(InstructionSet::kMips), kMipsInstructionAlignment);
+ EXPECT_EQ(offset_mips64.Uint32Value(InstructionSet::kMips64), kMips64InstructionAlignment);
}
TEST(StackMapTest, TestDeduplicateStackMask) {