summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.cc14
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc5
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc4
-rw-r--r--compiler/optimizing/code_generator_mips.cc12
-rw-r--r--compiler/optimizing/code_generator_mips64.cc19
-rw-r--r--compiler/optimizing/code_generator_x86.cc3
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc4
-rw-r--r--compiler/optimizing/codegen_test.cc14
-rw-r--r--compiler/optimizing/codegen_test_utils.h4
-rw-r--r--compiler/optimizing/common_arm64.h2
-rw-r--r--compiler/optimizing/graph_visualizer.cc2
-rw-r--r--compiler/optimizing/instruction_simplifier.cc6
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.cc6
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.cc6
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.cc8
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.h3
-rw-r--r--compiler/optimizing/loop_optimization.cc24
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc30
-rw-r--r--compiler/optimizing/optimizing_compiler.cc30
-rw-r--r--compiler/optimizing/register_allocator.cc14
-rw-r--r--compiler/optimizing/scheduler.cc6
-rw-r--r--compiler/optimizing/scheduler_test.cc12
-rw-r--r--compiler/optimizing/stack_map_test.cc30
24 files changed, 137 insertions, 123 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index b8d1f52995..5625f04726 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -786,43 +786,43 @@ std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
ArenaAllocator* allocator = graph->GetAllocator();
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
- case kArm:
- case kThumb2: {
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2: {
return std::unique_ptr<CodeGenerator>(
new (allocator) arm::CodeGeneratorARMVIXL(
graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64: {
+ case InstructionSet::kArm64: {
return std::unique_ptr<CodeGenerator>(
new (allocator) arm64::CodeGeneratorARM64(
graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips: {
+ case InstructionSet::kMips: {
return std::unique_ptr<CodeGenerator>(
new (allocator) mips::CodeGeneratorMIPS(
graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64: {
+ case InstructionSet::kMips64: {
return std::unique_ptr<CodeGenerator>(
new (allocator) mips64::CodeGeneratorMIPS64(
graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- case kX86: {
+ case InstructionSet::kX86: {
return std::unique_ptr<CodeGenerator>(
new (allocator) x86::CodeGeneratorX86(
graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64: {
+ case InstructionSet::kX86_64: {
return std::unique_ptr<CodeGenerator>(
new (allocator) x86_64::CodeGeneratorX86_64(
graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats));
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 64c88eb67c..18ad60db87 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -626,7 +626,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
bool CallPushesPC() const {
InstructionSet instruction_set = GetInstructionSet();
- return instruction_set == kX86 || instruction_set == kX86_64;
+ return instruction_set == InstructionSet::kX86 || instruction_set == InstructionSet::kX86_64;
}
// Arm64 has its own type for a label, so we need to templatize these methods
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c7811ab976..e01b7b78cb 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1557,12 +1557,13 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
MacroAssembler* masm = GetVIXLAssembler();
__ Bind(&frame_entry_label_);
- bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
+ bool do_overflow_check =
+ FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm64) || !IsLeafMethod();
if (do_overflow_check) {
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireX();
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
- __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
+ __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kArm64)));
{
// Ensure that between load and RecordPcInfo there are no pools emitted.
ExactAssemblyScope eas(GetVIXLAssembler(),
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 90f3ae8a01..edd307263d 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2568,7 +2568,7 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
if (!skip_overflow_check) {
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
- __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(kArm)));
+ __ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(InstructionSet::kArm)));
// The load must immediately precede RecordPcInfo.
ExactAssemblyScope aas(GetVIXLAssembler(),
vixl32::kMaxInstructionSizeInBytes,
@@ -5303,7 +5303,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) {
vixl32::Label less, greater, done;
vixl32::Label* final_label = codegen_->GetFinalLabel(compare, &done);
DataType::Type type = compare->InputAt(0)->GetType();
- vixl32::Condition less_cond = vixl32::Condition(kNone);
+ vixl32::Condition less_cond = vixl32::Condition::None();
switch (type) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 2f65e8c958..b3fed079d8 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1132,7 +1132,7 @@ void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
StackMapStream* stack_map_stream = GetStackMapStream();
for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
uint32_t old_position =
- stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
+ stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
stack_map_stream->SetStackMapNativePcOffset(i, new_position);
@@ -1347,13 +1347,14 @@ static dwarf::Reg DWARFReg(Register reg) {
void CodeGeneratorMIPS::GenerateFrameEntry() {
__ Bind(&frame_entry_label_);
- bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips) || !IsLeafMethod();
+ bool do_overflow_check =
+ FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips) || !IsLeafMethod();
if (do_overflow_check) {
__ LoadFromOffset(kLoadWord,
ZERO,
SP,
- -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips)));
+ -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips)));
RecordPcInfo(nullptr, 0);
}
@@ -1365,8 +1366,9 @@ void CodeGeneratorMIPS::GenerateFrameEntry() {
}
// Make sure the frame size isn't unreasonably large.
- if (GetFrameSize() > GetStackOverflowReservedBytes(kMips)) {
- LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips) << " bytes";
+ if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips)) {
+ LOG(FATAL) << "Stack frame larger than "
+ << GetStackOverflowReservedBytes(InstructionSet::kMips) << " bytes";
}
// Spill callee-saved registers.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 6cbfa14f15..53a7f26c81 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1076,7 +1076,7 @@ void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
StackMapStream* stack_map_stream = GetStackMapStream();
for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
uint32_t old_position =
- stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64);
+ stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips64);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
stack_map_stream->SetStackMapNativePcOffset(i, new_position);
@@ -1161,13 +1161,15 @@ static dwarf::Reg DWARFReg(FpuRegister reg) {
void CodeGeneratorMIPS64::GenerateFrameEntry() {
__ Bind(&frame_entry_label_);
- bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
+ bool do_overflow_check =
+ FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips64) || !IsLeafMethod();
if (do_overflow_check) {
- __ LoadFromOffset(kLoadWord,
- ZERO,
- SP,
- -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
+ __ LoadFromOffset(
+ kLoadWord,
+ ZERO,
+ SP,
+ -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips64)));
RecordPcInfo(nullptr, 0);
}
@@ -1176,8 +1178,9 @@ void CodeGeneratorMIPS64::GenerateFrameEntry() {
}
// Make sure the frame size isn't unreasonably large.
- if (GetFrameSize() > GetStackOverflowReservedBytes(kMips64)) {
- LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips64) << " bytes";
+ if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips64)) {
+ LOG(FATAL) << "Stack frame larger than "
+ << GetStackOverflowReservedBytes(InstructionSet::kMips64) << " bytes";
}
// Spill callee-saved registers.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 44614e1630..f84dd0045e 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1072,7 +1072,8 @@ void CodeGeneratorX86::GenerateFrameEntry() {
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
if (!skip_overflow_check) {
- __ testl(EAX, Address(ESP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86))));
+ size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86);
+ __ testl(EAX, Address(ESP, -static_cast<int32_t>(reserved_bytes)));
RecordPcInfo(nullptr, 0);
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 259bb4a9a9..16d1f183a1 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1277,8 +1277,8 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
if (!skip_overflow_check) {
- __ testq(CpuRegister(RAX), Address(
- CpuRegister(RSP), -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86_64))));
+ size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86_64);
+ __ testq(CpuRegister(RAX), Address(CpuRegister(RSP), -static_cast<int32_t>(reserved_bytes)));
RecordPcInfo(nullptr, 0);
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index e35c7c734b..ba431a5b08 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -44,22 +44,22 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() {
::std::vector<CodegenTargetConfig> test_config_candidates = {
#ifdef ART_ENABLE_CODEGEN_arm
// TODO: Should't this be `kThumb2` instead of `kArm` here?
- CodegenTargetConfig(kArm, create_codegen_arm_vixl32),
+ CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32),
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- CodegenTargetConfig(kArm64, create_codegen_arm64),
+ CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64),
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- CodegenTargetConfig(kX86, create_codegen_x86),
+ CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86),
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- CodegenTargetConfig(kX86_64, create_codegen_x86_64),
+ CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64),
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- CodegenTargetConfig(kMips, create_codegen_mips),
+ CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips),
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- CodegenTargetConfig(kMips64, create_codegen_mips64)
+ CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64)
#endif
};
@@ -825,7 +825,7 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) {
TEST_F(CodegenTest, MipsClobberRA) {
std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
MipsInstructionSetFeatures::FromCppDefines());
- if (!CanExecute(kMips) || features_mips->IsR6()) {
+ if (!CanExecute(InstructionSet::kMips) || features_mips->IsR6()) {
// HMipsComputeBaseMethodAddress and the NAL instruction behind it
// should only be generated on non-R6.
return;
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index bcbcc12349..c41c290c8b 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -207,7 +207,7 @@ class InternalCodeAllocator : public CodeAllocator {
static bool CanExecuteOnHardware(InstructionSet target_isa) {
return (target_isa == kRuntimeISA)
// Handle the special case of ARM, with two instructions sets (ARM32 and Thumb-2).
- || (kRuntimeISA == kArm && target_isa == kThumb2);
+ || (kRuntimeISA == InstructionSet::kArm && target_isa == InstructionSet::kThumb2);
}
static bool CanExecute(InstructionSet target_isa) {
@@ -271,7 +271,7 @@ static void Run(const InternalCodeAllocator& allocator,
typedef Expected (*fptr)();
CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
fptr f = reinterpret_cast<fptr>(allocator.GetMemory());
- if (target_isa == kThumb2) {
+ if (target_isa == InstructionSet::kThumb2) {
// For thumb we need the bottom bit set.
f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1);
}
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 102acb3423..ed2f8e995d 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -342,7 +342,7 @@ inline vixl::aarch64::Extend ExtendFromOpKind(HDataProcWithShifterOp::OpKind op_
}
inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
- DCHECK(HasShifterOperand(instruction, kArm64));
+ DCHECK(HasShifterOperand(instruction, InstructionSet::kArm64));
// Although the `neg` instruction is an alias of the `sub` instruction, `HNeg`
// does *not* support extension. This is because the `extended register` form
// of the `sub` instruction interprets the left register with code 31 as the
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index f7fd9101fd..12c69889ab 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -153,7 +153,7 @@ class HGraphVisualizerDisassembler {
}
const uint8_t* base = disassembler_->GetDisassemblerOptions()->base_address_;
- if (instruction_set_ == kThumb2) {
+ if (instruction_set_ == InstructionSet::kThumb2) {
// ARM and Thumb-2 use the same disassembler. The bottom bit of the
// address is used to distinguish between the two.
base += 1;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 189d5aea56..2bd2d5f0a1 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -250,7 +250,7 @@ bool InstructionSimplifierVisitor::TryCombineVecMultiplyAccumulate(HVecMul* mul)
DataType::Type type = mul->GetPackedType();
InstructionSet isa = codegen_->GetInstructionSet();
switch (isa) {
- case kArm64:
+ case InstructionSet::kArm64:
if (!(type == DataType::Type::kUint8 ||
type == DataType::Type::kInt8 ||
type == DataType::Type::kUint16 ||
@@ -259,8 +259,8 @@ bool InstructionSimplifierVisitor::TryCombineVecMultiplyAccumulate(HVecMul* mul)
return false;
}
break;
- case kMips:
- case kMips64:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
if (!(type == DataType::Type::kUint8 ||
type == DataType::Type::kInt8 ||
type == DataType::Type::kUint16 ||
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 9422f9f30c..d41e49a0f3 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -84,7 +84,7 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor {
bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* use,
HInstruction* bitfield_op,
bool do_merge) {
- DCHECK(HasShifterOperand(use, kArm));
+ DCHECK(HasShifterOperand(use, InstructionSet::kArm));
DCHECK(use->IsBinaryOperation());
DCHECK(CanFitInShifterOperand(bitfield_op));
DCHECK(!bitfield_op->HasEnvironmentUses());
@@ -166,7 +166,7 @@ bool InstructionSimplifierArmVisitor::TryMergeIntoUsersShifterOperand(HInstructi
// Check whether we can merge the instruction in all its users' shifter operand.
for (const HUseListNode<HInstruction*>& use : uses) {
HInstruction* user = use.GetUser();
- if (!HasShifterOperand(user, kArm)) {
+ if (!HasShifterOperand(user, InstructionSet::kArm)) {
return false;
}
if (!CanMergeIntoShifterOperand(user, bitfield_op)) {
@@ -242,7 +242,7 @@ void InstructionSimplifierArmVisitor::VisitArraySet(HArraySet* instruction) {
}
void InstructionSimplifierArmVisitor::VisitMul(HMul* instruction) {
- if (TryCombineMultiplyAccumulate(instruction, kArm)) {
+ if (TryCombineMultiplyAccumulate(instruction, InstructionSet::kArm)) {
RecordSimplification();
}
}
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index c0ab68fec2..69e1463ac4 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -90,7 +90,7 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
bool InstructionSimplifierArm64Visitor::TryMergeIntoShifterOperand(HInstruction* use,
HInstruction* bitfield_op,
bool do_merge) {
- DCHECK(HasShifterOperand(use, kArm64));
+ DCHECK(HasShifterOperand(use, InstructionSet::kArm64));
DCHECK(use->IsBinaryOperation() || use->IsNeg());
DCHECK(CanFitInShifterOperand(bitfield_op));
DCHECK(!bitfield_op->HasEnvironmentUses());
@@ -170,7 +170,7 @@ bool InstructionSimplifierArm64Visitor::TryMergeIntoUsersShifterOperand(HInstruc
// Check whether we can merge the instruction in all its users' shifter operand.
for (const HUseListNode<HInstruction*>& use : uses) {
HInstruction* user = use.GetUser();
- if (!HasShifterOperand(user, kArm64)) {
+ if (!HasShifterOperand(user, InstructionSet::kArm64)) {
return false;
}
if (!CanMergeIntoShifterOperand(user, bitfield_op)) {
@@ -218,7 +218,7 @@ void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) {
}
void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) {
- if (TryCombineMultiplyAccumulate(instruction, kArm64)) {
+ if (TryCombineMultiplyAccumulate(instruction, InstructionSet::kArm64)) {
RecordSimplification();
}
}
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 1c13084a48..ccdcb3532d 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -90,13 +90,13 @@ bool TrySimpleMultiplyAccumulatePatterns(HMul* mul,
bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) {
DataType::Type type = mul->GetType();
switch (isa) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
if (type != DataType::Type::kInt32) {
return false;
}
break;
- case kArm64:
+ case InstructionSet::kArm64:
if (!DataType::IsIntOrLongType(type)) {
return false;
}
@@ -148,7 +148,7 @@ bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) {
mul->GetBlock()->RemoveInstruction(mul);
return true;
}
- } else if (use->IsNeg() && isa != kArm) {
+ } else if (use->IsNeg() && isa != InstructionSet::kArm) {
HMultiplyAccumulate* mulacc =
new (allocator) HMultiplyAccumulate(type,
HInstruction::kSub,
diff --git a/compiler/optimizing/instruction_simplifier_shared.h b/compiler/optimizing/instruction_simplifier_shared.h
index b016a8769e..758fc7663d 100644
--- a/compiler/optimizing/instruction_simplifier_shared.h
+++ b/compiler/optimizing/instruction_simplifier_shared.h
@@ -41,7 +41,8 @@ inline bool CanFitInShifterOperand(HInstruction* instruction) {
inline bool HasShifterOperand(HInstruction* instr, InstructionSet isa) {
// On ARM64 `neg` instructions are an alias of `sub` using the zero register
// as the first register input.
- bool res = instr->IsAdd() || instr->IsAnd() || (isa == kArm64 && instr->IsNeg()) ||
+ bool res = instr->IsAdd() || instr->IsAnd() ||
+ (isa == InstructionSet::kArm64 && instr->IsNeg()) ||
instr->IsOr() || instr->IsSub() || instr->IsXor();
return res;
}
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 74de0773fc..c672dae1d7 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -1414,8 +1414,8 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
uint32_t HLoopOptimization::GetVectorSizeInBytes() {
switch (compiler_driver_->GetInstructionSet()) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
return 8; // 64-bit SIMD
default:
return 16; // 128-bit SIMD
@@ -1425,8 +1425,8 @@ uint32_t HLoopOptimization::GetVectorSizeInBytes() {
bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) {
const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures();
switch (compiler_driver_->GetInstructionSet()) {
- case kArm:
- case kThumb2:
+ case InstructionSet::kArm:
+ case InstructionSet::kThumb2:
// Allow vectorization for all ARM devices, because Android assumes that
// ARM 32-bit always supports advanced SIMD (64-bit SIMD).
switch (type) {
@@ -1446,7 +1446,7 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
break;
}
return false;
- case kArm64:
+ case InstructionSet::kArm64:
// Allow vectorization for all ARM devices, because Android assumes that
// ARMv8 AArch64 always supports advanced SIMD (128-bit SIMD).
switch (type) {
@@ -1474,8 +1474,8 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
default:
return false;
}
- case kX86:
- case kX86_64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
// Allow vectorization for SSE4.1-enabled X86 devices only (128-bit SIMD).
if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) {
switch (type) {
@@ -1506,7 +1506,7 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
} // switch type
}
return false;
- case kMips:
+ case InstructionSet::kMips:
if (features->AsMipsInstructionSetFeatures()->HasMsa()) {
switch (type) {
case DataType::Type::kBool:
@@ -1535,7 +1535,7 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
} // switch type
}
return false;
- case kMips64:
+ case InstructionSet::kMips64:
if (features->AsMips64InstructionSetFeatures()->HasMsa()) {
switch (type) {
case DataType::Type::kBool:
@@ -2170,7 +2170,7 @@ static constexpr uint32_t ARM64_SIMD_HEURISTIC_MAX_BODY_SIZE = 50;
uint32_t HLoopOptimization::GetUnrollingFactor(HBasicBlock* block, int64_t trip_count) {
uint32_t max_peel = MaxNumberPeeled();
switch (compiler_driver_->GetInstructionSet()) {
- case kArm64: {
+ case InstructionSet::kArm64: {
// Don't unroll with insufficient iterations.
// TODO: Unroll loops with unknown trip count.
DCHECK_NE(vector_length_, 0u);
@@ -2192,8 +2192,8 @@ uint32_t HLoopOptimization::GetUnrollingFactor(HBasicBlock* block, int64_t trip_
DCHECK_GE(unroll_factor, 1u);
return unroll_factor;
}
- case kX86:
- case kX86_64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
default:
return kNoUnrollingFactor;
}
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index b7380b0a49..4ad29961be 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -153,15 +153,15 @@ class OptimizingCFITest : public CFITest {
InternalCodeAllocator code_allocator_;
};
-#define TEST_ISA(isa) \
- TEST_F(OptimizingCFITest, isa) { \
- std::vector<uint8_t> expected_asm( \
- expected_asm_##isa, \
- expected_asm_##isa + arraysize(expected_asm_##isa)); \
- std::vector<uint8_t> expected_cfi( \
- expected_cfi_##isa, \
- expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
- TestImpl(isa, #isa, expected_asm, expected_cfi); \
+#define TEST_ISA(isa) \
+ TEST_F(OptimizingCFITest, isa) { \
+ std::vector<uint8_t> expected_asm( \
+ expected_asm_##isa, \
+ expected_asm_##isa + arraysize(expected_asm_##isa)); \
+ std::vector<uint8_t> expected_cfi( \
+ expected_cfi_##isa, \
+ expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
+ TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi); \
}
#ifdef ART_ENABLE_CODEGEN_arm
@@ -204,7 +204,7 @@ TEST_F(OptimizingCFITest, kThumb2Adjust) {
std::vector<uint8_t> expected_cfi(
expected_cfi_kThumb2_adjust,
expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust));
- SetUpFrame(kThumb2);
+ SetUpFrame(InstructionSet::kThumb2);
#define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \
->GetAssembler())->GetVIXLAssembler()->
vixl32::Label target;
@@ -216,7 +216,7 @@ TEST_F(OptimizingCFITest, kThumb2Adjust) {
__ Bind(&target);
#undef __
Finish();
- Check(kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
+ Check(InstructionSet::kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
}
#endif
@@ -235,7 +235,7 @@ TEST_F(OptimizingCFITest, kMipsAdjust) {
std::vector<uint8_t> expected_cfi(
expected_cfi_kMips_adjust,
expected_cfi_kMips_adjust + arraysize(expected_cfi_kMips_adjust));
- SetUpFrame(kMips);
+ SetUpFrame(InstructionSet::kMips);
#define __ down_cast<mips::MipsAssembler*>(GetCodeGenerator()->GetAssembler())->
mips::MipsLabel target;
__ Beqz(mips::A0, &target);
@@ -246,7 +246,7 @@ TEST_F(OptimizingCFITest, kMipsAdjust) {
__ Bind(&target);
#undef __
Finish();
- Check(kMips, "kMips_adjust", expected_asm, expected_cfi);
+ Check(InstructionSet::kMips, "kMips_adjust", expected_asm, expected_cfi);
}
#endif
@@ -265,7 +265,7 @@ TEST_F(OptimizingCFITest, kMips64Adjust) {
std::vector<uint8_t> expected_cfi(
expected_cfi_kMips64_adjust,
expected_cfi_kMips64_adjust + arraysize(expected_cfi_kMips64_adjust));
- SetUpFrame(kMips64);
+ SetUpFrame(InstructionSet::kMips64);
#define __ down_cast<mips64::Mips64Assembler*>(GetCodeGenerator()->GetAssembler())->
mips64::Mips64Label target;
__ Beqc(mips64::A1, mips64::A2, &target);
@@ -276,7 +276,7 @@ TEST_F(OptimizingCFITest, kMips64Adjust) {
__ Bind(&target);
#undef __
Finish();
- Check(kMips64, "kMips64_adjust", expected_asm, expected_cfi);
+ Check(InstructionSet::kMips64, "kMips64_adjust", expected_asm, expected_cfi);
}
#endif
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 29319f8c38..9233eb5baf 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -437,13 +437,13 @@ bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
}
static bool IsInstructionSetSupported(InstructionSet instruction_set) {
- return instruction_set == kArm
- || instruction_set == kArm64
- || instruction_set == kThumb2
- || instruction_set == kMips
- || instruction_set == kMips64
- || instruction_set == kX86
- || instruction_set == kX86_64;
+ return instruction_set == InstructionSet::kArm
+ || instruction_set == InstructionSet::kArm64
+ || instruction_set == InstructionSet::kThumb2
+ || instruction_set == InstructionSet::kMips
+ || instruction_set == InstructionSet::kMips64
+ || instruction_set == InstructionSet::kX86
+ || instruction_set == InstructionSet::kX86_64;
}
// Strip pass name suffix to get optimization name.
@@ -637,8 +637,8 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
ArenaAllocator* allocator = graph->GetAllocator();
switch (instruction_set) {
#if defined(ART_ENABLE_CODEGEN_arm)
- case kThumb2:
- case kArm: {
+ case InstructionSet::kThumb2:
+ case InstructionSet::kArm: {
arm::InstructionSimplifierArm* simplifier =
new (allocator) arm::InstructionSimplifierArm(graph, stats);
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -657,7 +657,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64: {
+ case InstructionSet::kArm64: {
arm64::InstructionSimplifierArm64* simplifier =
new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -676,7 +676,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips: {
+ case InstructionSet::kMips: {
mips::InstructionSimplifierMips* simplifier =
new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
@@ -695,7 +695,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64: {
+ case InstructionSet::kMips64: {
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
GVNOptimization* gvn =
new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -708,7 +708,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- case kX86: {
+ case InstructionSet::kX86: {
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
GVNOptimization* gvn =
new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -727,7 +727,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64: {
+ case InstructionSet::kX86_64: {
SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
GVNOptimization* gvn =
new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
@@ -949,7 +949,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
// Always use the Thumb-2 assembler: some runtime functionality
// (like implicit stack overflow checks) assume Thumb-2.
- DCHECK_NE(instruction_set, kArm);
+ DCHECK_NE(instruction_set, InstructionSet::kArm);
// Do not attempt to compile on architectures we do not support.
if (!IsInstructionSetSupported(instruction_set)) {
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 86e971353f..bad73e1b61 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -70,13 +70,13 @@ RegisterAllocator::~RegisterAllocator() {
bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
InstructionSet instruction_set) {
- return instruction_set == kArm
- || instruction_set == kArm64
- || instruction_set == kMips
- || instruction_set == kMips64
- || instruction_set == kThumb2
- || instruction_set == kX86
- || instruction_set == kX86_64;
+ return instruction_set == InstructionSet::kArm
+ || instruction_set == InstructionSet::kArm64
+ || instruction_set == InstructionSet::kMips
+ || instruction_set == InstructionSet::kMips64
+ || instruction_set == InstructionSet::kThumb2
+ || instruction_set == InstructionSet::kX86
+ || instruction_set == InstructionSet::kX86_64;
}
class AllRangesIterator : public ValueObject {
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 57eb7623b1..8cc376c3a6 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -796,7 +796,7 @@ void HInstructionScheduling::Run(bool only_optimize_loop_blocks,
switch (instruction_set_) {
#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64: {
+ case InstructionSet::kArm64: {
arm64::HSchedulerARM64 scheduler(&allocator, selector);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
scheduler.Schedule(graph_);
@@ -804,8 +804,8 @@ void HInstructionScheduling::Run(bool only_optimize_loop_blocks,
}
#endif
#if defined(ART_ENABLE_CODEGEN_arm)
- case kThumb2:
- case kArm: {
+ case InstructionSet::kThumb2:
+ case InstructionSet::kArm: {
arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
arm::HSchedulerARM scheduler(&allocator, selector, &arm_latency_visitor);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index dfc1633fe6..75dce81550 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -43,22 +43,22 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() {
::std::vector<CodegenTargetConfig> test_config_candidates = {
#ifdef ART_ENABLE_CODEGEN_arm
// TODO: Should't this be `kThumb2` instead of `kArm` here?
- CodegenTargetConfig(kArm, create_codegen_arm_vixl32),
+ CodegenTargetConfig(InstructionSet::kArm, create_codegen_arm_vixl32),
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
- CodegenTargetConfig(kArm64, create_codegen_arm64),
+ CodegenTargetConfig(InstructionSet::kArm64, create_codegen_arm64),
#endif
#ifdef ART_ENABLE_CODEGEN_x86
- CodegenTargetConfig(kX86, create_codegen_x86),
+ CodegenTargetConfig(InstructionSet::kX86, create_codegen_x86),
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
- CodegenTargetConfig(kX86_64, create_codegen_x86_64),
+ CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64),
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- CodegenTargetConfig(kMips, create_codegen_mips),
+ CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips),
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
- CodegenTargetConfig(kMips64, create_codegen_mips64)
+ CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64)
#endif
};
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 91f86d5c50..7e517f3485 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -928,18 +928,24 @@ TEST(StackMapTest, InlineTest) {
TEST(StackMapTest, CodeOffsetTest) {
// Test minimum alignments, encoding, and decoding.
- CodeOffset offset_thumb2 = CodeOffset::FromOffset(kThumb2InstructionAlignment, kThumb2);
- CodeOffset offset_arm64 = CodeOffset::FromOffset(kArm64InstructionAlignment, kArm64);
- CodeOffset offset_x86 = CodeOffset::FromOffset(kX86InstructionAlignment, kX86);
- CodeOffset offset_x86_64 = CodeOffset::FromOffset(kX86_64InstructionAlignment, kX86_64);
- CodeOffset offset_mips = CodeOffset::FromOffset(kMipsInstructionAlignment, kMips);
- CodeOffset offset_mips64 = CodeOffset::FromOffset(kMips64InstructionAlignment, kMips64);
- EXPECT_EQ(offset_thumb2.Uint32Value(kThumb2), kThumb2InstructionAlignment);
- EXPECT_EQ(offset_arm64.Uint32Value(kArm64), kArm64InstructionAlignment);
- EXPECT_EQ(offset_x86.Uint32Value(kX86), kX86InstructionAlignment);
- EXPECT_EQ(offset_x86_64.Uint32Value(kX86_64), kX86_64InstructionAlignment);
- EXPECT_EQ(offset_mips.Uint32Value(kMips), kMipsInstructionAlignment);
- EXPECT_EQ(offset_mips64.Uint32Value(kMips64), kMips64InstructionAlignment);
+ CodeOffset offset_thumb2 =
+ CodeOffset::FromOffset(kThumb2InstructionAlignment, InstructionSet::kThumb2);
+ CodeOffset offset_arm64 =
+ CodeOffset::FromOffset(kArm64InstructionAlignment, InstructionSet::kArm64);
+ CodeOffset offset_x86 =
+ CodeOffset::FromOffset(kX86InstructionAlignment, InstructionSet::kX86);
+ CodeOffset offset_x86_64 =
+ CodeOffset::FromOffset(kX86_64InstructionAlignment, InstructionSet::kX86_64);
+ CodeOffset offset_mips =
+ CodeOffset::FromOffset(kMipsInstructionAlignment, InstructionSet::kMips);
+ CodeOffset offset_mips64 =
+ CodeOffset::FromOffset(kMips64InstructionAlignment, InstructionSet::kMips64);
+ EXPECT_EQ(offset_thumb2.Uint32Value(InstructionSet::kThumb2), kThumb2InstructionAlignment);
+ EXPECT_EQ(offset_arm64.Uint32Value(InstructionSet::kArm64), kArm64InstructionAlignment);
+ EXPECT_EQ(offset_x86.Uint32Value(InstructionSet::kX86), kX86InstructionAlignment);
+ EXPECT_EQ(offset_x86_64.Uint32Value(InstructionSet::kX86_64), kX86_64InstructionAlignment);
+ EXPECT_EQ(offset_mips.Uint32Value(InstructionSet::kMips), kMipsInstructionAlignment);
+ EXPECT_EQ(offset_mips64.Uint32Value(InstructionSet::kMips64), kMips64InstructionAlignment);
}
TEST(StackMapTest, TestDeduplicateStackMask) {