summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2023-04-06 10:29:19 +0000
committer VladimĂ­r Marko <vmarko@google.com> 2023-04-25 17:02:47 +0000
commitf2eef5f0e445f3dd439f91ee8db5fdfa2129b892 (patch)
tree519b828880ce20c98746c895e5994c1c882d20ca
parentb5fcab944b3786f27ab6b698685109bfc7f785fd (diff)
Use `down_cast<>` in `HInstruction::As##type()`.
One overload used `down_cast<>` and the other used `static_cast<>`, so make it consistent. Also avoid some unnecessary `As##type()` calls and make some style adjustments. Test: m test-art-host-gtest Change-Id: I1f368a0c21647b44fffb7361dbb92d8a09fbe904
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc3
-rw-r--r--compiler/optimizing/code_generator_vector_arm64_neon.cc6
-rw-r--r--compiler/optimizing/code_generator_vector_arm64_sve.cc3
-rw-r--r--compiler/optimizing/code_generator_x86.cc6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc6
-rw-r--r--compiler/optimizing/common_arm64.h6
-rw-r--r--compiler/optimizing/graph_checker.cc3
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.h2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc4
-rw-r--r--compiler/optimizing/intrinsics_x86.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc2
-rw-r--r--compiler/optimizing/load_store_analysis.cc23
-rw-r--r--compiler/optimizing/load_store_analysis_test.cc20
-rw-r--r--compiler/optimizing/load_store_elimination_test.cc99
-rw-r--r--compiler/optimizing/loop_optimization.cc4
-rw-r--r--compiler/optimizing/nodes.h2
-rw-r--r--compiler/optimizing/ssa_builder.cc12
17 files changed, 97 insertions, 106 deletions
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 919abfdc49..89caf32f25 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -1378,8 +1378,7 @@ class BCEVisitor final : public HGraphVisitor {
HInstruction* array_length,
HInstruction* base,
int32_t min_c, int32_t max_c) {
- HBoundsCheck* bounds_check =
- first_index_bounds_check_map_.Get(array_length->GetId())->AsBoundsCheck();
+ HBoundsCheck* bounds_check = first_index_bounds_check_map_.Get(array_length->GetId());
// Construct deoptimization on single or double bounds on range [base-min_c,base+max_c],
// for example either for a[0]..a[3] just 3 or for a[base-1]..a[base+3] both base-1
// and base+3, since we made the assumption any in between value may occur too.
diff --git a/compiler/optimizing/code_generator_vector_arm64_neon.cc b/compiler/optimizing/code_generator_vector_arm64_neon.cc
index 6b6e25cf0c..ce02bfa21a 100644
--- a/compiler/optimizing/code_generator_vector_arm64_neon.cc
+++ b/compiler/optimizing/code_generator_vector_arm64_neon.cc
@@ -61,10 +61,8 @@ inline bool NEONCanEncodeConstantAsImmediate(HConstant* constant, HInstruction*
// - constant location - if 'constant' is an actual constant and its value can be
// encoded into the instruction.
// - register location otherwise.
-inline Location NEONEncodableConstantOrRegister(HInstruction* constant,
- HInstruction* instr) {
- if (constant->IsConstant()
- && NEONCanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
+inline Location NEONEncodableConstantOrRegister(HInstruction* constant, HInstruction* instr) {
+ if (constant->IsConstant() && NEONCanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
return Location::ConstantLocation(constant);
}
diff --git a/compiler/optimizing/code_generator_vector_arm64_sve.cc b/compiler/optimizing/code_generator_vector_arm64_sve.cc
index fe15791d3f..4c16c3eb38 100644
--- a/compiler/optimizing/code_generator_vector_arm64_sve.cc
+++ b/compiler/optimizing/code_generator_vector_arm64_sve.cc
@@ -62,8 +62,7 @@ static bool SVECanEncodeConstantAsImmediate(HConstant* constant, HInstruction* i
// encoded into the instruction.
// - register location otherwise.
inline Location SVEEncodableConstantOrRegister(HInstruction* constant, HInstruction* instr) {
- if (constant->IsConstant()
- && SVECanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
+ if (constant->IsConstant() && SVECanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
return Location::ConstantLocation(constant);
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index cb1cecc45a..8adfb53681 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -8968,9 +8968,9 @@ Address CodeGeneratorX86::ArrayAddress(Register obj,
Location index,
ScaleFactor scale,
uint32_t data_offset) {
- return index.IsConstant() ?
- Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << scale) + data_offset) :
- Address(obj, index.AsRegister<Register>(), scale, data_offset);
+ return index.IsConstant()
+ ? Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << scale) + data_offset)
+ : Address(obj, index.AsRegister<Register>(), scale, data_offset);
}
Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr,
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index eea6b204fa..f4a7b4463a 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -8037,9 +8037,9 @@ Address CodeGeneratorX86_64::ArrayAddress(CpuRegister obj,
Location index,
ScaleFactor scale,
uint32_t data_offset) {
- return index.IsConstant() ?
- Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << scale) + data_offset) :
- Address(obj, index.AsRegister<CpuRegister>(), scale, data_offset);
+ return index.IsConstant()
+ ? Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << scale) + data_offset)
+ : Address(obj, index.AsRegister<CpuRegister>(), scale, data_offset);
}
void CodeGeneratorX86_64::Store64BitValueToStack(Location dest, int64_t value) {
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 20b0e38af5..e2ef8d52f2 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -311,10 +311,8 @@ inline bool Arm64CanEncodeConstantAsImmediate(HConstant* constant, HInstruction*
}
}
-inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
- HInstruction* instr) {
- if (constant->IsConstant()
- && Arm64CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
+inline Location ARM64EncodableConstantOrRegister(HInstruction* constant, HInstruction* instr) {
+ if (constant->IsConstant() && Arm64CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
return Location::ConstantLocation(constant);
}
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 190b362145..596049f369 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -944,8 +944,7 @@ static bool IsSameSizeConstant(const HInstruction* insn1, const HInstruction* in
static bool IsConstantEquivalent(const HInstruction* insn1,
const HInstruction* insn2,
BitVector* visited) {
- if (insn1->IsPhi() &&
- insn1->AsPhi()->IsVRegEquivalentOf(insn2)) {
+ if (insn1->IsPhi() && insn1->AsPhi()->IsVRegEquivalentOf(insn2)) {
HConstInputsRef insn1_inputs = insn1->GetInputs();
HConstInputsRef insn2_inputs = insn2->GetInputs();
if (insn1_inputs.size() != insn2_inputs.size()) {
diff --git a/compiler/optimizing/instruction_simplifier_shared.h b/compiler/optimizing/instruction_simplifier_shared.h
index ddc3a867b8..01489f8bcb 100644
--- a/compiler/optimizing/instruction_simplifier_shared.h
+++ b/compiler/optimizing/instruction_simplifier_shared.h
@@ -54,7 +54,7 @@ inline bool HasShifterOperand(HInstruction* instr, InstructionSet isa) {
// t3 = Sub(*, t2)
inline bool IsSubRightSubLeftShl(HSub *sub) {
HInstruction* right = sub->GetRight();
- return right->IsSub() && right->AsSub()->GetLeft()->IsShl();;
+ return right->IsSub() && right->AsSub()->GetLeft()->IsShl();
}
} // namespace helpers
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index d2dbaa32e3..b34f6a0126 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -3009,8 +3009,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
__ B(intrinsic_slow_path->GetEntryLabel(), eq);
}
// Checked when building locations.
- DCHECK(!optimizations.GetDestinationIsSource()
- || (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue()));
+ DCHECK(!optimizations.GetDestinationIsSource() ||
+ (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue()));
} else {
if (!optimizations.GetDestinationIsSource()) {
__ Cmp(src, dest);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index d2072201f8..f32e153745 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1445,7 +1445,7 @@ void IntrinsicCodeGeneratorX86::VisitStringGetCharsNoCheck(HInvoke* invoke) {
Register obj = locations->InAt(0).AsRegister<Register>();
Location srcBegin = locations->InAt(1);
int srcBegin_value =
- srcBegin.IsConstant() ? srcBegin.GetConstant()->AsIntConstant()->GetValue() : 0;
+ srcBegin.IsConstant() ? srcBegin.GetConstant()->AsIntConstant()->GetValue() : 0;
Register srcEnd = locations->InAt(2).AsRegister<Register>();
Register dst = locations->InAt(3).AsRegister<Register>();
Register dstBegin = locations->InAt(4).AsRegister<Register>();
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 9d0d5f155e..c64bb89fea 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1655,7 +1655,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
Location srcBegin = locations->InAt(1);
int srcBegin_value =
- srcBegin.IsConstant() ? srcBegin.GetConstant()->AsIntConstant()->GetValue() : 0;
+ srcBegin.IsConstant() ? srcBegin.GetConstant()->AsIntConstant()->GetValue() : 0;
CpuRegister srcEnd = locations->InAt(2).AsRegister<CpuRegister>();
CpuRegister dst = locations->InAt(3).AsRegister<CpuRegister>();
CpuRegister dstBegin = locations->InAt(4).AsRegister<CpuRegister>();
diff --git a/compiler/optimizing/load_store_analysis.cc b/compiler/optimizing/load_store_analysis.cc
index f1c50ac03c..b46e3e18d9 100644
--- a/compiler/optimizing/load_store_analysis.cc
+++ b/compiler/optimizing/load_store_analysis.cc
@@ -41,7 +41,7 @@ static bool CanBinaryOpAndIndexAlias(const HBinaryOperation* idx1,
// We currently only support Add and Sub operations.
return true;
}
- if (idx1->AsBinaryOperation()->GetLeastConstantLeft() != idx2) {
+ if (idx1->GetLeastConstantLeft() != idx2) {
// Cannot analyze [i+CONST1] and [j].
return true;
}
@@ -51,9 +51,9 @@ static bool CanBinaryOpAndIndexAlias(const HBinaryOperation* idx1,
// Since 'i' are the same in [i+CONST] and [i],
// further compare [CONST] and [0].
- int64_t l1 = idx1->IsAdd() ?
- idx1->GetConstantRight()->AsIntConstant()->GetValue() :
- -idx1->GetConstantRight()->AsIntConstant()->GetValue();
+ int64_t l1 = idx1->IsAdd()
+ ? idx1->GetConstantRight()->AsIntConstant()->GetValue()
+ : -idx1->GetConstantRight()->AsIntConstant()->GetValue();
int64_t l2 = 0;
int64_t h1 = l1 + (vector_length1 - 1);
int64_t h2 = l2 + (vector_length2 - 1);
@@ -68,8 +68,7 @@ static bool CanBinaryOpsAlias(const HBinaryOperation* idx1,
// We currently only support Add and Sub operations.
return true;
}
- if (idx1->AsBinaryOperation()->GetLeastConstantLeft() !=
- idx2->AsBinaryOperation()->GetLeastConstantLeft()) {
+ if (idx1->GetLeastConstantLeft() != idx2->GetLeastConstantLeft()) {
// Cannot analyze [i+CONST1] and [j+CONST2].
return true;
}
@@ -80,12 +79,12 @@ static bool CanBinaryOpsAlias(const HBinaryOperation* idx1,
// Since 'i' are the same in [i+CONST1] and [i+CONST2],
// further compare [CONST1] and [CONST2].
- int64_t l1 = idx1->IsAdd() ?
- idx1->GetConstantRight()->AsIntConstant()->GetValue() :
- -idx1->GetConstantRight()->AsIntConstant()->GetValue();
- int64_t l2 = idx2->IsAdd() ?
- idx2->GetConstantRight()->AsIntConstant()->GetValue() :
- -idx2->GetConstantRight()->AsIntConstant()->GetValue();
+ int64_t l1 = idx1->IsAdd()
+ ? idx1->GetConstantRight()->AsIntConstant()->GetValue()
+ : -idx1->GetConstantRight()->AsIntConstant()->GetValue();
+ int64_t l2 = idx2->IsAdd()
+ ? idx2->GetConstantRight()->AsIntConstant()->GetValue()
+ : -idx2->GetConstantRight()->AsIntConstant()->GetValue();
int64_t h1 = l1 + (vector_length1 - 1);
int64_t h2 = l2 + (vector_length2 - 1);
return CanIntegerRangesOverlap(l1, h1, l2, h2);
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index 17e7f1fb15..635f1b5fb2 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -896,7 +896,7 @@ TEST_F(LoadStoreAnalysisTest, PartialEscape) {
{ nullptr, 0 },
HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
+ call_left->SetRawInputAt(0, new_inst);
left->AddInstruction(call_left);
left->AddInstruction(goto_left);
@@ -1005,7 +1005,7 @@ TEST_F(LoadStoreAnalysisTest, PartialEscape2) {
{ nullptr, 0 },
HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
+ call_left->SetRawInputAt(0, new_inst);
left->AddInstruction(call_left);
left->AddInstruction(goto_left);
@@ -1128,7 +1128,7 @@ TEST_F(LoadStoreAnalysisTest, PartialEscape3) {
{ nullptr, 0 },
HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
+ call_left->SetRawInputAt(0, new_inst);
left->AddInstruction(call_left);
left->AddInstruction(goto_left);
@@ -1408,7 +1408,7 @@ TEST_F(LoadStoreAnalysisTest, TotalEscapeAdjacentNoPredicated) {
{nullptr, 0},
HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
+ call_left->SetRawInputAt(0, new_inst);
left->AddInstruction(call_left);
left->AddInstruction(goto_left);
@@ -1509,7 +1509,7 @@ TEST_F(LoadStoreAnalysisTest, TotalEscapeAdjacent) {
{ nullptr, 0 },
HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
+ call_left->SetRawInputAt(0, new_inst);
left->AddInstruction(call_left);
left->AddInstruction(goto_left);
@@ -1620,7 +1620,7 @@ TEST_F(LoadStoreAnalysisTest, TotalEscape) {
{ nullptr, 0 },
HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
+ call_left->SetRawInputAt(0, new_inst);
left->AddInstruction(call_left);
left->AddInstruction(goto_left);
@@ -1646,7 +1646,7 @@ TEST_F(LoadStoreAnalysisTest, TotalEscape) {
graph_->GetDexFile(),
0);
HInstruction* goto_right = new (GetAllocator()) HGoto();
- call_right->AsInvoke()->SetRawInputAt(0, new_inst);
+ call_right->SetRawInputAt(0, new_inst);
right->AddInstruction(write_right);
right->AddInstruction(call_right);
right->AddInstruction(goto_right);
@@ -1805,7 +1805,7 @@ TEST_F(LoadStoreAnalysisTest, DoubleDiamondEscape) {
{ nullptr, 0 },
HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
+ call_left->SetRawInputAt(0, new_inst);
high_left->AddInstruction(call_left);
high_left->AddInstruction(goto_left);
@@ -1861,7 +1861,7 @@ TEST_F(LoadStoreAnalysisTest, DoubleDiamondEscape) {
{ nullptr, 0 },
HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
HInstruction* goto_low_left = new (GetAllocator()) HGoto();
- call_low_left->AsInvoke()->SetRawInputAt(0, new_inst);
+ call_low_left->SetRawInputAt(0, new_inst);
low_left->AddInstruction(call_low_left);
low_left->AddInstruction(goto_low_left);
@@ -2020,7 +2020,7 @@ TEST_F(LoadStoreAnalysisTest, PartialPhiPropagation1) {
HInstruction* goto_left_merge = new (GetAllocator()) HGoto();
left_phi->SetRawInputAt(0, obj_param);
left_phi->SetRawInputAt(1, new_inst);
- call_left->AsInvoke()->SetRawInputAt(0, left_phi);
+ call_left->SetRawInputAt(0, left_phi);
left_merge->AddPhi(left_phi);
left_merge->AddInstruction(call_left);
left_merge->AddInstruction(goto_left_merge);
diff --git a/compiler/optimizing/load_store_elimination_test.cc b/compiler/optimizing/load_store_elimination_test.cc
index 1ee109980f..98f1260261 100644
--- a/compiler/optimizing/load_store_elimination_test.cc
+++ b/compiler/optimizing/load_store_elimination_test.cc
@@ -701,7 +701,7 @@ TEST_F(LoadStoreEliminationTest, StoreAfterSIMDLoopWithSideEffects) {
// b[phi,phi+1,phi+2,phi+3] = a[phi,phi+1,phi+2,phi+3];
AddVecStore(loop_, array_, phi_);
HInstruction* vload = AddVecLoad(loop_, array_, phi_);
- AddVecStore(loop_, array_b, phi_, vload->AsVecLoad());
+ AddVecStore(loop_, array_b, phi_, vload);
// a[j] = 0;
HInstruction* a_set = AddArraySet(return_block_, array_, j_, c0);
@@ -740,7 +740,7 @@ TEST_F(LoadStoreEliminationTest, LoadAfterSIMDLoopWithSideEffects) {
// b[phi,phi+1,phi+2,phi+3] = a[phi,phi+1,phi+2,phi+3];
AddVecStore(loop_, array_, phi_);
HInstruction* vload = AddVecLoad(loop_, array_, phi_);
- AddVecStore(loop_, array_b, phi_, vload->AsVecLoad());
+ AddVecStore(loop_, array_b, phi_, vload);
// x = a[j];
HInstruction* load = AddArrayGet(return_block_, array_, j_);
@@ -874,7 +874,7 @@ TEST_F(LoadStoreEliminationTest, RedundantVStoreVLoadInLoop) {
// a[i,... i + 3] = [1,...1]
HInstruction* vstore1 = AddVecStore(loop_, array_a, phi_);
HInstruction* vload = AddVecLoad(loop_, array_a, phi_);
- HInstruction* vstore2 = AddVecStore(loop_, array_b, phi_, vload->AsVecLoad());
+ HInstruction* vstore2 = AddVecStore(loop_, array_b, phi_, vload);
HInstruction* vstore3 = AddVecStore(loop_, array_a, phi_, vstore1->InputAt(2));
graph_->SetHasSIMD(true);
@@ -963,7 +963,7 @@ TEST_F(LoadStoreEliminationTest, VLoadDefaultValueInLoopWithoutWriteSideEffects)
// v = a[i,... i + 3]
// array[0,... 3] = v
HInstruction* vload = AddVecLoad(loop_, array_a, phi_);
- HInstruction* vstore = AddVecStore(return_block_, array_, c0, vload->AsVecLoad());
+ HInstruction* vstore = AddVecStore(return_block_, array_, c0, vload);
graph_->SetHasSIMD(true);
PerformLSE();
@@ -987,7 +987,7 @@ TEST_F(LoadStoreEliminationTest, VLoadDefaultValue) {
// v = a[0,... 3]
// array[0,... 3] = v
HInstruction* vload = AddVecLoad(pre_header_, array_a, c0);
- HInstruction* vstore = AddVecStore(return_block_, array_, c0, vload->AsVecLoad());
+ HInstruction* vstore = AddVecStore(return_block_, array_, c0, vload);
graph_->SetHasSIMD(true);
PerformLSE();
@@ -1063,7 +1063,7 @@ TEST_F(LoadStoreEliminationTest, VLoadAndLoadDefaultValueInLoopWithoutWriteSideE
// array[0] = v1
HInstruction* vload = AddVecLoad(loop_, array_a, phi_);
HInstruction* load = AddArrayGet(loop_, array_a, phi_);
- HInstruction* vstore = AddVecStore(return_block_, array_, c0, vload->AsVecLoad());
+ HInstruction* vstore = AddVecStore(return_block_, array_, c0, vload);
HInstruction* store = AddArraySet(return_block_, array_, c0, load);
graph_->SetHasSIMD(true);
@@ -1094,7 +1094,7 @@ TEST_F(LoadStoreEliminationTest, VLoadAndLoadDefaultValue) {
// array[0] = v1
HInstruction* vload = AddVecLoad(pre_header_, array_a, c0);
HInstruction* load = AddArrayGet(pre_header_, array_a, c0);
- HInstruction* vstore = AddVecStore(return_block_, array_, c0, vload->AsVecLoad());
+ HInstruction* vstore = AddVecStore(return_block_, array_, c0, vload);
HInstruction* store = AddArraySet(return_block_, array_, c0, load);
graph_->SetHasSIMD(true);
@@ -1126,8 +1126,8 @@ TEST_F(LoadStoreEliminationTest, VLoadDefaultValueAndVLoadInLoopWithoutWriteSide
// array[128,... 131] = v1
HInstruction* vload1 = AddVecLoad(loop_, array_a, phi_);
HInstruction* vload2 = AddVecLoad(loop_, array_a, phi_);
- HInstruction* vstore1 = AddVecStore(return_block_, array_, c0, vload1->AsVecLoad());
- HInstruction* vstore2 = AddVecStore(return_block_, array_, c128, vload2->AsVecLoad());
+ HInstruction* vstore1 = AddVecStore(return_block_, array_, c0, vload1);
+ HInstruction* vstore2 = AddVecStore(return_block_, array_, c128, vload2);
graph_->SetHasSIMD(true);
PerformLSE();
@@ -1157,8 +1157,8 @@ TEST_F(LoadStoreEliminationTest, VLoadDefaultValueAndVLoad) {
// array[128,... 131] = v1
HInstruction* vload1 = AddVecLoad(pre_header_, array_a, c0);
HInstruction* vload2 = AddVecLoad(pre_header_, array_a, c0);
- HInstruction* vstore1 = AddVecStore(return_block_, array_, c0, vload1->AsVecLoad());
- HInstruction* vstore2 = AddVecStore(return_block_, array_, c128, vload2->AsVecLoad());
+ HInstruction* vstore1 = AddVecStore(return_block_, array_, c0, vload1);
+ HInstruction* vstore2 = AddVecStore(return_block_, array_, c128, vload2);
graph_->SetHasSIMD(true);
PerformLSE();
@@ -2139,9 +2139,9 @@ TEST_F(LoadStoreEliminationTest, PartialLoadElimination) {
right->AddInstruction(read_right);
right->AddInstruction(goto_right);
- HInstruction* phi_final = MakePhi({read_left, read_right});
+ HPhi* phi_final = MakePhi({read_left, read_right});
HInstruction* return_exit = new (GetAllocator()) HReturn(phi_final);
- exit->AddPhi(phi_final->AsPhi());
+ exit->AddPhi(phi_final);
exit->AddInstruction(return_exit);
// PerformLSE expects this to be empty.
@@ -5153,7 +5153,7 @@ TEST_P(PartialComparisonTestGroup, PartialComparisonAfterCohort) {
CheckFinalInstruction(if_merge->InputAt(0), ComparisonPlacement::kAfterEscape);
EXPECT_INS_EQ(init_set->InputAt(1), c3);
ASSERT_TRUE(write_partial->InputAt(0)->IsPhi());
- EXPECT_INS_EQ(write_partial->InputAt(0)->AsPhi()->InputAt(0), init_set->InputAt(0));
+ EXPECT_INS_EQ(write_partial->InputAt(0)->InputAt(0), init_set->InputAt(0));
EXPECT_INS_EQ(write_partial->InputAt(1), c4);
EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return);
@@ -5225,14 +5225,14 @@ TEST_P(PartialComparisonTestGroup, PartialComparisonInCohortAfterEscape) {
HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
ComparisonInstructions cmp_instructions = GetComparisonInstructions(new_inst);
- HInstruction* if_left = new (GetAllocator()) HIf(cmp_instructions.cmp_);
+ HIf* if_left = new (GetAllocator()) HIf(cmp_instructions.cmp_);
left->AddInstruction(call_left);
cmp_instructions.AddSetup(left);
left->AddInstruction(cmp_instructions.cmp_);
left->AddInstruction(if_left);
call_left->CopyEnvironmentFrom(cls->GetEnvironment());
cmp_instructions.AddEnvironment(cls->GetEnvironment());
- if (if_left->AsIf()->IfTrueSuccessor() != partial) {
+ if (if_left->IfTrueSuccessor() != partial) {
left->SwapSuccessors();
}
@@ -5381,7 +5381,7 @@ TEST_F(LoadStoreEliminationTest, PredicatedStore1) {
right->AddInstruction(write_right);
right->AddInstruction(goto_right);
- HInstruction* write_bottom = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstanceFieldSet* write_bottom = MakeIFieldSet(new_inst, c3, MemberOffset(32));
HInstruction* return_exit = new (GetAllocator()) HReturnVoid();
breturn->AddInstruction(write_bottom);
breturn->AddInstruction(return_exit);
@@ -5391,7 +5391,7 @@ TEST_F(LoadStoreEliminationTest, PredicatedStore1) {
PerformLSEWithPartial(blks);
EXPECT_INS_RETAINED(write_bottom);
- EXPECT_TRUE(write_bottom->AsInstanceFieldSet()->GetIsPredicatedSet());
+ EXPECT_TRUE(write_bottom->GetIsPredicatedSet());
EXPECT_INS_REMOVED(write_right);
EXPECT_INS_RETAINED(call_left);
HPhi* merge_alloc = FindSingleInstruction<HPhi>(graph_, breturn);
@@ -5491,7 +5491,7 @@ TEST_F(LoadStoreEliminationTest, PredicatedStore2) {
non_escape->AddInstruction(non_escape_goto);
non_escape_call->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_bottom = MakeIFieldSet(new_inst, c4, MemberOffset(32));
+ HInstanceFieldSet* write_bottom = MakeIFieldSet(new_inst, c4, MemberOffset(32));
HInstruction* return_exit = new (GetAllocator()) HReturnVoid();
breturn->AddInstruction(write_bottom);
breturn->AddInstruction(return_exit);
@@ -5501,7 +5501,7 @@ TEST_F(LoadStoreEliminationTest, PredicatedStore2) {
PerformLSEWithPartial(blks);
EXPECT_INS_RETAINED(write_bottom);
- EXPECT_TRUE(write_bottom->AsInstanceFieldSet()->GetIsPredicatedSet()) << *write_bottom;
+ EXPECT_TRUE(write_bottom->GetIsPredicatedSet()) << *write_bottom;
EXPECT_INS_REMOVED(write_right);
EXPECT_INS_RETAINED(call_left);
HInstanceFieldSet* pred_set = FindSingleInstruction<HInstanceFieldSet>(graph_, breturn);
@@ -7213,7 +7213,7 @@ TEST_F(LoadStoreEliminationTest, PartialLoopPhis4) {
HInstruction* goto_no_escape = new (GetAllocator()) HGoto();
no_escape->AddInstruction(goto_no_escape);
- HInstruction* write_pre_header = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstanceFieldSet* write_pre_header = MakeIFieldSet(new_inst, c3, MemberOffset(32));
HInstruction* goto_preheader = new (GetAllocator()) HGoto();
loop_pre_header->AddInstruction(write_pre_header);
loop_pre_header->AddInstruction(goto_preheader);
@@ -7236,7 +7236,7 @@ TEST_F(LoadStoreEliminationTest, PartialLoopPhis4) {
HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
loop_if_left->AddInstruction(goto_loop_left);
- HInstruction* write_loop_right = MakeIFieldSet(new_inst, c5, MemberOffset(32));
+ HInstanceFieldSet* write_loop_right = MakeIFieldSet(new_inst, c5, MemberOffset(32));
HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
loop_if_right->AddInstruction(write_loop_right);
loop_if_right->AddInstruction(goto_loop_right);
@@ -7272,9 +7272,9 @@ TEST_F(LoadStoreEliminationTest, PartialLoopPhis4) {
EXPECT_INS_EQ(loop_merge_phi->InputAt(0), loop_header_phi);
EXPECT_INS_EQ(loop_merge_phi->InputAt(1), c5);
EXPECT_INS_RETAINED(write_loop_right) << *write_loop_right;
- EXPECT_TRUE(write_loop_right->AsInstanceFieldSet()->GetIsPredicatedSet()) << *write_loop_right;
+ EXPECT_TRUE(write_loop_right->GetIsPredicatedSet()) << *write_loop_right;
EXPECT_INS_RETAINED(write_pre_header) << *write_pre_header;
- EXPECT_TRUE(write_pre_header->AsInstanceFieldSet()->GetIsPredicatedSet()) << *write_pre_header;
+ EXPECT_TRUE(write_pre_header->GetIsPredicatedSet()) << *write_pre_header;
}
// // ENTRY
@@ -8268,13 +8268,13 @@ TEST_P(UsesOrderDependentTestGroup, RecordPredicatedReplacements1) {
FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, middle);
ASSERT_NE(replacement_middle_read, nullptr);
ASSERT_TRUE(replacement_middle_read->GetTarget()->IsPhi());
- ASSERT_EQ(2u, replacement_middle_read->GetTarget()->AsPhi()->InputCount());
- ASSERT_INS_EQ(replacement_middle_read->GetTarget()->AsPhi()->InputAt(0), replacement_new_inst);
- ASSERT_INS_EQ(replacement_middle_read->GetTarget()->AsPhi()->InputAt(1), cnull);
+ ASSERT_EQ(2u, replacement_middle_read->GetTarget()->InputCount());
+ ASSERT_INS_EQ(replacement_middle_read->GetTarget()->InputAt(0), replacement_new_inst);
+ ASSERT_INS_EQ(replacement_middle_read->GetTarget()->InputAt(1), cnull);
ASSERT_TRUE(replacement_middle_read->GetDefaultValue()->IsPhi());
- ASSERT_EQ(2u, replacement_middle_read->GetDefaultValue()->AsPhi()->InputCount());
- ASSERT_INS_EQ(replacement_middle_read->GetDefaultValue()->AsPhi()->InputAt(0), c0);
- ASSERT_INS_EQ(replacement_middle_read->GetDefaultValue()->AsPhi()->InputAt(1), c11);
+ ASSERT_EQ(2u, replacement_middle_read->GetDefaultValue()->InputCount());
+ ASSERT_INS_EQ(replacement_middle_read->GetDefaultValue()->InputAt(0), c0);
+ ASSERT_INS_EQ(replacement_middle_read->GetDefaultValue()->InputAt(1), c11);
EXPECT_INS_RETAINED(left2_write);
ASSERT_TRUE(left2_write->GetIsPredicatedSet());
@@ -8285,9 +8285,9 @@ TEST_P(UsesOrderDependentTestGroup, RecordPredicatedReplacements1) {
ASSERT_NE(replacement_breturn_read, nullptr);
ASSERT_INS_EQ(replacement_breturn_read->GetTarget(), replacement_middle_read->GetTarget());
ASSERT_TRUE(replacement_breturn_read->GetDefaultValue()->IsPhi());
- ASSERT_EQ(2u, replacement_breturn_read->GetDefaultValue()->AsPhi()->InputCount());
- ASSERT_INS_EQ(replacement_breturn_read->GetDefaultValue()->AsPhi()->InputAt(0), c33);
- HInstruction* other_input = replacement_breturn_read->GetDefaultValue()->AsPhi()->InputAt(1);
+ ASSERT_EQ(2u, replacement_breturn_read->GetDefaultValue()->InputCount());
+ ASSERT_INS_EQ(replacement_breturn_read->GetDefaultValue()->InputAt(0), c33);
+ HInstruction* other_input = replacement_breturn_read->GetDefaultValue()->InputAt(1);
ASSERT_NE(other_input->GetBlock(), nullptr) << GetParam();
ASSERT_INS_EQ(other_input, replacement_middle_read);
}
@@ -8423,13 +8423,13 @@ TEST_P(UsesOrderDependentTestGroup, RecordPredicatedReplacements2) {
FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, middle);
ASSERT_NE(replacement_middle_read, nullptr);
ASSERT_TRUE(replacement_middle_read->GetTarget()->IsPhi());
- ASSERT_EQ(2u, replacement_middle_read->GetTarget()->AsPhi()->InputCount());
- ASSERT_INS_EQ(replacement_middle_read->GetTarget()->AsPhi()->InputAt(0), replacement_new_inst);
- ASSERT_INS_EQ(replacement_middle_read->GetTarget()->AsPhi()->InputAt(1), cnull);
+ ASSERT_EQ(2u, replacement_middle_read->GetTarget()->InputCount());
+ ASSERT_INS_EQ(replacement_middle_read->GetTarget()->InputAt(0), replacement_new_inst);
+ ASSERT_INS_EQ(replacement_middle_read->GetTarget()->InputAt(1), cnull);
ASSERT_TRUE(replacement_middle_read->GetDefaultValue()->IsPhi());
- ASSERT_EQ(2u, replacement_middle_read->GetDefaultValue()->AsPhi()->InputCount());
- ASSERT_INS_EQ(replacement_middle_read->GetDefaultValue()->AsPhi()->InputAt(0), c0);
- ASSERT_INS_EQ(replacement_middle_read->GetDefaultValue()->AsPhi()->InputAt(1), c11);
+ ASSERT_EQ(2u, replacement_middle_read->GetDefaultValue()->InputCount());
+ ASSERT_INS_EQ(replacement_middle_read->GetDefaultValue()->InputAt(0), c0);
+ ASSERT_INS_EQ(replacement_middle_read->GetDefaultValue()->InputAt(1), c11);
EXPECT_INS_RETAINED(left2_call);
@@ -8627,13 +8627,13 @@ TEST_P(UsesOrderDependentTestGroupForThreeItems, RecordPredicatedReplacements3)
FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, middle1);
ASSERT_NE(replacement_middle1_read, nullptr);
ASSERT_TRUE(replacement_middle1_read->GetTarget()->IsPhi());
- ASSERT_EQ(2u, replacement_middle1_read->GetTarget()->AsPhi()->InputCount());
- ASSERT_INS_EQ(replacement_middle1_read->GetTarget()->AsPhi()->InputAt(0), replacement_new_inst);
- ASSERT_INS_EQ(replacement_middle1_read->GetTarget()->AsPhi()->InputAt(1), cnull);
+ ASSERT_EQ(2u, replacement_middle1_read->GetTarget()->InputCount());
+ ASSERT_INS_EQ(replacement_middle1_read->GetTarget()->InputAt(0), replacement_new_inst);
+ ASSERT_INS_EQ(replacement_middle1_read->GetTarget()->InputAt(1), cnull);
ASSERT_TRUE(replacement_middle1_read->GetDefaultValue()->IsPhi());
- ASSERT_EQ(2u, replacement_middle1_read->GetDefaultValue()->AsPhi()->InputCount());
- ASSERT_INS_EQ(replacement_middle1_read->GetDefaultValue()->AsPhi()->InputAt(0), c0);
- ASSERT_INS_EQ(replacement_middle1_read->GetDefaultValue()->AsPhi()->InputAt(1), c11);
+ ASSERT_EQ(2u, replacement_middle1_read->GetDefaultValue()->InputCount());
+ ASSERT_INS_EQ(replacement_middle1_read->GetDefaultValue()->InputAt(0), c0);
+ ASSERT_INS_EQ(replacement_middle1_read->GetDefaultValue()->InputAt(1), c11);
EXPECT_INS_RETAINED(left2_call);
@@ -8652,11 +8652,10 @@ TEST_P(UsesOrderDependentTestGroupForThreeItems, RecordPredicatedReplacements3)
FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
ASSERT_NE(replacement_breturn_read, nullptr);
ASSERT_INS_EQ(replacement_breturn_read->GetTarget(), replacement_middle1_read->GetTarget());
- ASSERT_EQ(2u, replacement_breturn_read->GetDefaultValue()->AsPhi()->InputCount());
- ASSERT_INS_EQ(replacement_breturn_read->GetDefaultValue()->AsPhi()->InputAt(0),
- replacement_left3_read);
- ASSERT_INS_EQ(replacement_breturn_read->GetDefaultValue()->AsPhi()->InputAt(1),
- replacement_middle1_read);
+ ASSERT_TRUE(replacement_breturn_read->GetDefaultValue()->IsPhi());
+ ASSERT_EQ(2u, replacement_breturn_read->GetDefaultValue()->InputCount());
+ ASSERT_INS_EQ(replacement_breturn_read->GetDefaultValue()->InputAt(0), replacement_left3_read);
+ ASSERT_INS_EQ(replacement_breturn_read->GetDefaultValue()->InputAt(1), replacement_middle1_read);
EXPECT_INS_RETAINED(breturn_add1);
ASSERT_INS_EQ(breturn_add1->InputAt(0), replacement_middle1_read);
ASSERT_INS_EQ(breturn_add1->InputAt(1), replacement_breturn_read);
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 7a52502562..d5e34634c8 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -2026,7 +2026,7 @@ HInstruction* HLoopOptimization::ReduceAndExtractIfNeeded(HInstruction* instruct
// x = REDUCE( [x_1, .., x_n] )
// y = x_1
// along the exit of the defining loop.
- HInstruction* reduce = new (global_allocator_) HVecReduce(
+ HVecReduce* reduce = new (global_allocator_) HVecReduce(
global_allocator_, instruction, type, vector_length, kind, kNoDexPc);
exit->InsertInstructionBefore(reduce, exit->GetFirstInstruction());
instruction = new (global_allocator_) HVecExtractScalar(
@@ -2040,7 +2040,7 @@ HInstruction* HLoopOptimization::ReduceAndExtractIfNeeded(HInstruction* instruct
vector_length,
0u);
exit->InsertInstructionBefore(set_pred, reduce);
- reduce->AsVecOperation()->SetMergingGoverningPredicate(set_pred);
+ reduce->SetMergingGoverningPredicate(set_pred);
instruction->AsVecOperation()->SetMergingGoverningPredicate(set_pred);
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index fc5d2196da..e562d7e901 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -8743,7 +8743,7 @@ inline bool IsZeroBitPattern(HInstruction* instruction) {
return Is##type() ? down_cast<const H##type*>(this) : nullptr; \
} \
inline H##type* HInstruction::As##type() { \
- return Is##type() ? static_cast<H##type*>(this) : nullptr; \
+ return Is##type() ? down_cast<H##type*>(this) : nullptr; \
}
FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CAST)
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index a658252e69..08ccbeee0d 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -652,16 +652,16 @@ HPhi* SsaBuilder::GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, DataType::
// We place the floating point /reference phi next to this phi.
HInstruction* next = phi->GetNext();
- if (next != nullptr
- && next->AsPhi()->GetRegNumber() == phi->GetRegNumber()
- && next->GetType() != type) {
+ if (next != nullptr &&
+ next->AsPhi()->GetRegNumber() == phi->GetRegNumber() &&
+ next->GetType() != type) {
// Move to the next phi to see if it is the one we are looking for.
next = next->GetNext();
}
- if (next == nullptr
- || (next->AsPhi()->GetRegNumber() != phi->GetRegNumber())
- || (next->GetType() != type)) {
+ if (next == nullptr ||
+ (next->AsPhi()->GetRegNumber() != phi->GetRegNumber()) ||
+ (next->GetType() != type)) {
ArenaAllocator* allocator = graph_->GetAllocator();
HInputsRef inputs = phi->GetInputs();
HPhi* new_phi = new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type);