summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2023-04-26 09:13:59 +0000
committer VladimĂ­r Marko <vmarko@google.com> 2023-04-26 15:29:51 +0000
commit5150dbe9bcb1fa950f9f3155430413570aaaaf2d (patch)
treed1fb11332ee84a9f31b02d042ce2653bbf7bb0ab
parent8890cdfd96cebe59b62dd010747f9babb8fa005c (diff)
Remove unnecessary `HInstruction::As##type()` calls.
Also so some style cleanup. Test: m test-art-host-gtest Test: testrunner.py --host --optimizing Change-Id: I34304acb39bc5197dde03543a6c157b3c319f94f
-rw-r--r--compiler/optimizing/code_generator_arm64.cc2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc3
-rw-r--r--compiler/optimizing/code_generator_x86.cc9
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc10
-rw-r--r--compiler/optimizing/instruction_builder.cc6
-rw-r--r--compiler/optimizing/intrinsics_x86.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc2
-rw-r--r--compiler/optimizing/nodes.cc6
-rw-r--r--compiler/optimizing/nodes.h4
-rw-r--r--compiler/optimizing/register_allocation_resolver.cc23
-rw-r--r--compiler/optimizing/register_allocator_linear_scan.cc3
-rw-r--r--compiler/optimizing/select_generator.cc3
12 files changed, 37 insertions, 36 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 41db9a2542..5492400b13 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -3876,7 +3876,7 @@ static inline bool IsConditionOnFloatingPointValues(HInstruction* condition) {
}
static inline Condition GetConditionForSelect(HCondition* condition) {
- IfCondition cond = condition->AsCondition()->GetCondition();
+ IfCondition cond = condition->GetCondition();
return IsConditionOnFloatingPointValues(condition) ? ARM64FPCondition(cond, condition->IsGtBias())
: ARM64Condition(cond);
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index d69e77045b..1e1aee99aa 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -6104,8 +6104,7 @@ Location LocationsBuilderARMVIXL::ArithmeticZeroOrFpuRegister(HInstruction* inpu
Location LocationsBuilderARMVIXL::ArmEncodableConstantOrRegister(HInstruction* constant,
Opcode opcode) {
DCHECK(!DataType::IsFloatingPointType(constant->GetType()));
- if (constant->IsConstant() &&
- CanEncodeConstantAsImmediate(constant->AsConstant(), opcode)) {
+ if (constant->IsConstant() && CanEncodeConstantAsImmediate(constant->AsConstant(), opcode)) {
return Location::ConstantLocation(constant);
}
return Location::RequiresRegister();
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8adfb53681..3dacbdc2c4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -839,7 +839,8 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode {
DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
(instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile) ||
(instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kJdkUnsafeGetObject) ||
- (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kJdkUnsafeGetObjectVolatile) ||
+ (instruction_->AsInvoke()->GetIntrinsic() ==
+ Intrinsics::kJdkUnsafeGetObjectVolatile) ||
(instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kJdkUnsafeGetObjectAcquire))
<< instruction_->AsInvoke()->GetIntrinsic();
DCHECK_EQ(offset_, 0U);
@@ -8782,13 +8783,15 @@ void InstructionCodeGeneratorX86::VisitX86LoadFromConstantTable(HX86LoadFromCons
case DataType::Type::kFloat32:
__ movss(out.AsFpuRegister<XmmRegister>(),
codegen_->LiteralFloatAddress(
- value->AsFloatConstant()->GetValue(), insn->GetBaseMethodAddress(), const_area));
+ value->AsFloatConstant()->GetValue(), insn->GetBaseMethodAddress(), const_area));
break;
case DataType::Type::kFloat64:
__ movsd(out.AsFpuRegister<XmmRegister>(),
codegen_->LiteralDoubleAddress(
- value->AsDoubleConstant()->GetValue(), insn->GetBaseMethodAddress(), const_area));
+ value->AsDoubleConstant()->GetValue(),
+ insn->GetBaseMethodAddress(),
+ const_area));
break;
case DataType::Type::kInt32:
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f4a7b4463a..df2bef747f 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -856,7 +856,8 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode {
DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
(instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile) ||
(instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kJdkUnsafeGetObject) ||
- (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kJdkUnsafeGetObjectVolatile) ||
+ (instruction_->AsInvoke()->GetIntrinsic() ==
+ Intrinsics::kJdkUnsafeGetObjectVolatile) ||
(instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kJdkUnsafeGetObjectAcquire))
<< instruction_->AsInvoke()->GetIntrinsic();
DCHECK_EQ(offset_, 0U);
@@ -2051,7 +2052,7 @@ void InstructionCodeGeneratorX86_64::GenerateCompareTest(HCondition* condition)
} else if (right.IsConstant()) {
__ ucomiss(left.AsFpuRegister<XmmRegister>(),
codegen_->LiteralFloatAddress(
- right.GetConstant()->AsFloatConstant()->GetValue()));
+ right.GetConstant()->AsFloatConstant()->GetValue()));
} else {
DCHECK(right.IsStackSlot());
__ ucomiss(left.AsFpuRegister<XmmRegister>(),
@@ -2065,7 +2066,7 @@ void InstructionCodeGeneratorX86_64::GenerateCompareTest(HCondition* condition)
} else if (right.IsConstant()) {
__ ucomisd(left.AsFpuRegister<XmmRegister>(),
codegen_->LiteralDoubleAddress(
- right.GetConstant()->AsDoubleConstant()->GetValue()));
+ right.GetConstant()->AsDoubleConstant()->GetValue()));
} else {
DCHECK(right.IsDoubleStackSlot());
__ ucomisd(left.AsFpuRegister<XmmRegister>(),
@@ -5930,8 +5931,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
__ movsd(address, value.AsFpuRegister<XmmRegister>());
codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
- int64_t v =
- bit_cast<int64_t, double>(value.GetConstant()->AsDoubleConstant()->GetValue());
+ int64_t v = bit_cast<int64_t, double>(value.GetConstant()->AsDoubleConstant()->GetValue());
Address address_high =
CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset + sizeof(int32_t));
codegen_->MoveInt64ToAddress(address, address_high, v, instruction);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index f9a513804c..e99b8a02d1 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -2348,9 +2348,9 @@ void HInstructionBuilder::BuildCheckedDivRem(uint16_t out_vreg,
second = LoadLocal(second_vreg_or_constant, type);
}
- if (!second_is_constant
- || (type == DataType::Type::kInt32 && second->AsIntConstant()->GetValue() == 0)
- || (type == DataType::Type::kInt64 && second->AsLongConstant()->GetValue() == 0)) {
+ if (!second_is_constant ||
+ (type == DataType::Type::kInt32 && second->AsIntConstant()->GetValue() == 0) ||
+ (type == DataType::Type::kInt64 && second->AsLongConstant()->GetValue() == 0)) {
second = new (allocator_) HDivZeroCheck(second, dex_pc);
AppendInstruction(second);
}
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index f32e153745..75ec85985a 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1205,7 +1205,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
HInstruction* code_point = invoke->InputAt(1);
if (code_point->IsIntConstant()) {
if (static_cast<uint32_t>(code_point->AsIntConstant()->GetValue()) >
- std::numeric_limits<uint16_t>::max()) {
+ std::numeric_limits<uint16_t>::max()) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
// rare, so for simplicity just put the full slow-path down and branch unconditionally.
slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86(invoke);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index c64bb89fea..5406450dec 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1424,7 +1424,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
HInstruction* code_point = invoke->InputAt(1);
if (code_point->IsIntConstant()) {
if (static_cast<uint32_t>(code_point->AsIntConstant()->GetValue()) >
- std::numeric_limits<uint16_t>::max()) {
+ std::numeric_limits<uint16_t>::max()) {
// Always needs the slow-path. We could directly dispatch to it, but this case should be
// rare, so for simplicity just put the full slow-path down and branch unconditionally.
slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathX86_64(invoke);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index c99cfab05f..8d678cd33d 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -3026,9 +3026,9 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
replacement = outer_graph->GetDoubleConstant(
current->AsDoubleConstant()->GetValue(), current->GetDexPc());
} else if (current->IsParameterValue()) {
- if (kIsDebugBuild
- && invoke->IsInvokeStaticOrDirect()
- && invoke->AsInvokeStaticOrDirect()->IsStaticWithExplicitClinitCheck()) {
+ if (kIsDebugBuild &&
+ invoke->IsInvokeStaticOrDirect() &&
+ invoke->AsInvokeStaticOrDirect()->IsStaticWithExplicitClinitCheck()) {
// Ensure we do not use the last input of `invoke`, as it
// contains a clinit check which is not an actual argument.
size_t last_input_index = invoke->InputCount() - 1;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d13a8c31ac..822ac9a0a1 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1715,7 +1715,7 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
const char* DebugName() const override { return #type; } \
HInstruction* Clone(ArenaAllocator* arena) const override { \
DCHECK(IsClonable()); \
- return new (arena) H##type(*this->As##type()); \
+ return new (arena) H##type(*this); \
} \
void Accept(HGraphVisitor* visitor) override
@@ -3164,7 +3164,7 @@ class HPhi final : public HVariableInputSizeInstruction {
bool IsVRegEquivalentOf(const HInstruction* other) const {
return other != nullptr
&& other->IsPhi()
- && other->AsPhi()->GetBlock() == GetBlock()
+ && other->GetBlock() == GetBlock()
&& other->AsPhi()->GetRegNumber() == GetRegNumber();
}
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 53e11f2c3d..982595b8e7 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -531,9 +531,9 @@ void RegisterAllocationResolver::AddInputMoveFor(HInstruction* input,
HInstruction* previous = user->GetPrevious();
HParallelMove* move = nullptr;
- if (previous == nullptr
- || !previous->IsParallelMove()
- || previous->GetLifetimePosition() < user->GetLifetimePosition()) {
+ if (previous == nullptr ||
+ !previous->IsParallelMove() ||
+ previous->GetLifetimePosition() < user->GetLifetimePosition()) {
move = new (allocator_) HParallelMove(allocator_);
move->SetLifetimePosition(user->GetLifetimePosition());
user->GetBlock()->InsertInstructionBefore(move, user);
@@ -604,15 +604,15 @@ void RegisterAllocationResolver::InsertParallelMoveAt(size_t position,
} else {
// Move must happen before the instruction.
HInstruction* previous = at->GetPrevious();
- if (previous == nullptr
- || !previous->IsParallelMove()
- || previous->GetLifetimePosition() != position) {
+ if (previous == nullptr ||
+ !previous->IsParallelMove() ||
+ previous->GetLifetimePosition() != position) {
// If the previous is a parallel move, then its position must be lower
// than the given `position`: it was added just after the non-parallel
// move instruction that precedes `instruction`.
- DCHECK(previous == nullptr
- || !previous->IsParallelMove()
- || previous->GetLifetimePosition() < position);
+ DCHECK(previous == nullptr ||
+ !previous->IsParallelMove() ||
+ previous->GetLifetimePosition() < position);
move = new (allocator_) HParallelMove(allocator_);
move->SetLifetimePosition(position);
at->GetBlock()->InsertInstructionBefore(move, at);
@@ -643,8 +643,9 @@ void RegisterAllocationResolver::InsertParallelMoveAtExitOf(HBasicBlock* block,
// This is a parallel move for connecting blocks. We need to differentiate
// it with moves for connecting siblings in a same block, and output moves.
size_t position = last->GetLifetimePosition();
- if (previous == nullptr || !previous->IsParallelMove()
- || previous->AsParallelMove()->GetLifetimePosition() != position) {
+ if (previous == nullptr ||
+ !previous->IsParallelMove() ||
+ previous->AsParallelMove()->GetLifetimePosition() != position) {
move = new (allocator_) HParallelMove(allocator_);
move->SetLifetimePosition(position);
block->InsertInstructionBefore(move, last);
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index fcdaa2d34f..ffa9937cc5 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -1208,8 +1208,7 @@ void RegisterAllocatorLinearScan::AllocateSpillSlotForCatchPhi(HPhi* phi) {
LiveInterval* interval = phi->GetLiveInterval();
HInstruction* previous_phi = phi->GetPrevious();
- DCHECK(previous_phi == nullptr ||
- previous_phi->AsPhi()->GetRegNumber() <= phi->GetRegNumber())
+ DCHECK(previous_phi == nullptr || previous_phi->AsPhi()->GetRegNumber() <= phi->GetRegNumber())
<< "Phis expected to be sorted by vreg number, so that equivalent phis are adjacent.";
if (phi->IsVRegEquivalentOf(previous_phi)) {
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index 6a10440d11..07065efbb7 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -46,8 +46,7 @@ static bool IsSimpleBlock(HBasicBlock* block) {
} else if (instruction->CanBeMoved() &&
!instruction->HasSideEffects() &&
!instruction->CanThrow()) {
- if (instruction->IsSelect() &&
- instruction->AsSelect()->GetCondition()->GetBlock() == block) {
+ if (instruction->IsSelect() && instruction->AsSelect()->GetCondition()->GetBlock() == block) {
// Count one HCondition and HSelect in the same block as a single instruction.
// This enables finding nested selects.
continue;