summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.bp3
-rw-r--r--compiler/optimizing/code_generator.cc2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc65
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc55
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc65
-rw-r--r--compiler/optimizing/code_generator_x86.cc64
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc60
-rw-r--r--compiler/optimizing/code_sinking.cc1
-rw-r--r--compiler/optimizing/execution_subgraph.cc359
-rw-r--r--compiler/optimizing/execution_subgraph.h365
-rw-r--r--compiler/optimizing/execution_subgraph_test.cc975
-rw-r--r--compiler/optimizing/execution_subgraph_test.h40
-rw-r--r--compiler/optimizing/graph_visualizer.cc9
-rw-r--r--compiler/optimizing/instruction_simplifier.cc65
-rw-r--r--compiler/optimizing/instruction_simplifier_test.cc254
-rw-r--r--compiler/optimizing/load_store_analysis.cc76
-rw-r--r--compiler/optimizing/load_store_analysis.h102
-rw-r--r--compiler/optimizing/load_store_analysis_test.cc787
-rw-r--r--compiler/optimizing/load_store_elimination.cc1206
-rw-r--r--compiler/optimizing/load_store_elimination.h11
-rw-r--r--compiler/optimizing/load_store_elimination_test.cc6274
-rw-r--r--compiler/optimizing/loop_analysis.cc3
-rw-r--r--compiler/optimizing/nodes.h97
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h2
-rw-r--r--compiler/optimizing/reference_type_propagation.cc8
-rw-r--r--compiler/optimizing/scheduler.cc10
-rw-r--r--compiler/optimizing/scheduler_arm.cc9
-rw-r--r--compiler/optimizing/scheduler_arm.h1
-rw-r--r--compiler/optimizing/scheduler_test.cc3
-rw-r--r--test/530-checker-instance-of-simplifier/jasmin/Main.j1
30 files changed, 111 insertions, 10861 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 110785dc6b..4427cd0f22 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -155,7 +155,6 @@ art_cc_defaults {
"optimizing/data_type.cc",
"optimizing/dead_code_elimination.cc",
"optimizing/escape.cc",
- "optimizing/execution_subgraph.cc",
"optimizing/graph_checker.cc",
"optimizing/graph_visualizer.cc",
"optimizing/gvn.cc",
@@ -459,7 +458,6 @@ art_cc_defaults {
"jni/jni_compiler_test.cc",
"optimizing/codegen_test.cc",
"optimizing/constant_folding_test.cc",
- "optimizing/execution_subgraph_test.cc",
"optimizing/induction_var_range_test.cc",
"optimizing/load_store_elimination_test.cc",
"optimizing/ssa_test.cc",
@@ -505,7 +503,6 @@ art_cc_defaults {
"utils/dedupe_set_test.cc",
"optimizing/codegen_test.cc",
- "optimizing/execution_subgraph_test.cc",
"optimizing/instruction_simplifier_test.cc",
"optimizing/load_store_analysis_test.cc",
"optimizing/load_store_elimination_test.cc",
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 80c0f84d6e..05aebee2ee 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1642,7 +1642,6 @@ void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
// GC.
(EmitNonBakerReadBarrier() &&
(instruction->IsInstanceFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsStaticFieldGet() ||
instruction->IsArrayGet() ||
instruction->IsLoadClass() ||
@@ -1679,7 +1678,6 @@ void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* in
// PC-related information.
DCHECK(kUseBakerReadBarrier);
DCHECK(instruction->IsInstanceFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsStaticFieldGet() ||
instruction->IsArrayGet() ||
instruction->IsArraySet() ||
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index bd1f71f344..0e791b19ba 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -608,7 +608,6 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 {
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg()));
DCHECK(instruction_->IsInstanceFieldGet() ||
- instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
@@ -2169,11 +2168,7 @@ void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet());
-
- bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
bool object_field_get_with_read_barrier =
(instruction->GetType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
@@ -2193,37 +2188,24 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction,
}
}
// Input for object receiver.
- locations->SetInAt(is_predicated ? 1 : 0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
if (DataType::IsFloatingPointType(instruction->GetType())) {
- if (is_predicated) {
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetOut(Location::SameAsFirstInput());
- } else {
- locations->SetOut(Location::RequiresFpuRegister());
- }
+ locations->SetOut(Location::RequiresFpuRegister());
} else {
- if (is_predicated) {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
- } else {
- // The output overlaps for an object field get when read barriers
- // are enabled: we do not want the load to overwrite the object's
- // location, as we need it to emit the read barrier.
- locations->SetOut(Location::RequiresRegister(),
- object_field_get_with_read_barrier ? Location::kOutputOverlap
- : Location::kNoOutputOverlap);
- }
+ // The output overlaps for an object field get when read barriers
+ // are enabled: we do not want the load to overwrite the object's
+ // location, as we need it to emit the read barrier.
+ locations->SetOut(
+ Location::RequiresRegister(),
+ object_field_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
}
}
void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet());
- bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
LocationSummary* locations = instruction->GetLocations();
- uint32_t receiver_input = is_predicated ? 1 : 0;
+ uint32_t receiver_input = 0;
Location base_loc = locations->InAt(receiver_input);
Location out = locations->Out();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
@@ -2293,20 +2275,12 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
bool value_can_be_null,
WriteBarrierKind write_barrier_kind) {
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
- bool is_predicated =
- instruction->IsInstanceFieldSet() && instruction->AsInstanceFieldSet()->GetIsPredicatedSet();
Register obj = InputRegisterAt(instruction, 0);
CPURegister value = InputCPURegisterOrZeroRegAt(instruction, 1);
CPURegister source = value;
Offset offset = field_info.GetFieldOffset();
DataType::Type field_type = field_info.GetFieldType();
- std::optional<vixl::aarch64::Label> pred_is_null;
- if (is_predicated) {
- pred_is_null.emplace();
- __ Cbz(obj, &*pred_is_null);
- }
-
{
// We use a block to end the scratch scope before the write barrier, thus
// freeing the temporary registers so they can be used in `MarkGCCard`.
@@ -2338,10 +2312,6 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
Register(value),
value_can_be_null && write_barrier_kind == WriteBarrierKind::kEmitWithNullCheck);
}
-
- if (is_predicated) {
- __ Bind(&*pred_is_null);
- }
}
void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
@@ -4031,23 +4001,10 @@ void CodeGeneratorARM64::GenerateNop() {
__ Nop();
}
-void LocationsBuilderARM64::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* instruction) {
- HandleFieldGet(instruction, instruction->GetFieldInfo());
-}
-
void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
-void InstructionCodeGeneratorARM64::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* instruction) {
- vixl::aarch64::Label finish;
- __ Cbz(InputRegisterAt(instruction, 1), &finish);
- HandleFieldGet(instruction, instruction->GetFieldInfo());
- __ Bind(&finish);
-}
-
void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 25331dfcc9..c9e454e858 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -741,7 +741,6 @@ class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL {
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out.GetCode()));
DCHECK(instruction_->IsInstanceFieldGet() ||
- instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
@@ -5955,10 +5954,7 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction,
LocationSummary* locations = instruction->GetLocations();
vixl32::Register base = InputRegisterAt(instruction, 0);
Location value = locations->InAt(1);
- std::optional<vixl::aarch32::Label> pred_is_null;
- bool is_predicated =
- instruction->IsInstanceFieldSet() && instruction->AsInstanceFieldSet()->GetIsPredicatedSet();
bool is_volatile = field_info.IsVolatile();
bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
DataType::Type field_type = field_info.GetFieldType();
@@ -5966,11 +5962,6 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction,
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
- if (is_predicated) {
- pred_is_null.emplace();
- __ CompareAndBranchIfZero(base, &*pred_is_null, /* is_far_target= */ false);
- }
-
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
}
@@ -6080,21 +6071,14 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction,
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
-
- if (is_predicated) {
- __ Bind(&*pred_is_null);
- }
}
void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
bool object_field_get_with_read_barrier =
(field_info.GetFieldType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
- bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_field_get_with_read_barrier
@@ -6104,7 +6088,7 @@ void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction,
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
// Input for object receiver.
- locations->SetInAt(is_predicated ? 1 : 0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
bool volatile_for_double = field_info.IsVolatile()
&& (field_info.GetFieldType() == DataType::Type::kFloat64)
@@ -6119,20 +6103,10 @@ void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction,
object_field_get_with_read_barrier;
if (DataType::IsFloatingPointType(instruction->GetType())) {
- if (is_predicated) {
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetOut(Location::SameAsFirstInput());
- } else {
- locations->SetOut(Location::RequiresFpuRegister());
- }
+ locations->SetOut(Location::RequiresFpuRegister());
} else {
- if (is_predicated) {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
- } else {
- locations->SetOut(Location::RequiresRegister(),
- (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
- }
+ locations->SetOut(Location::RequiresRegister(),
+ (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
}
if (volatile_for_double) {
// ARM encoding have some additional constraints for ldrexd/strexd:
@@ -6232,12 +6206,10 @@ bool LocationsBuilderARMVIXL::CanEncodeConstantAsImmediate(HConstant* input_cst,
void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
LocationSummary* locations = instruction->GetLocations();
- uint32_t receiver_input = instruction->IsPredicatedInstanceFieldGet() ? 1 : 0;
+ uint32_t receiver_input = 0;
vixl32::Register base = InputRegisterAt(instruction, receiver_input);
Location out = locations->Out();
bool is_volatile = field_info.IsVolatile();
@@ -6360,19 +6332,6 @@ void LocationsBuilderARMVIXL::VisitInstanceFieldGet(HInstanceFieldGet* instructi
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
-void LocationsBuilderARMVIXL::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* instruction) {
- HandleFieldGet(instruction, instruction->GetFieldInfo());
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* instruction) {
- vixl::aarch32::Label finish;
- __ CompareAndBranchIfZero(InputRegisterAt(instruction, 1), &finish, false);
- HandleFieldGet(instruction, instruction->GetFieldInfo());
- __ Bind(&finish);
-}
-
void InstructionCodeGeneratorARMVIXL::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index 92ed3f1d4f..3decee3be0 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -684,7 +684,6 @@ class ReadBarrierMarkSlowPathRISCV64 : public SlowPathCodeRISCV64 {
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
DCHECK(instruction_->IsInstanceFieldGet() ||
- instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsArraySet() ||
@@ -2464,13 +2463,6 @@ void InstructionCodeGeneratorRISCV64::HandleFieldSet(HInstruction* instruction,
DCHECK_IMPLIES(value.IsConstant(), IsZeroBitPattern(value.GetConstant()));
bool is_volatile = field_info.IsVolatile();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
- bool is_predicated =
- instruction->IsInstanceFieldSet() && instruction->AsInstanceFieldSet()->GetIsPredicatedSet();
-
- Riscv64Label pred_is_null;
- if (is_predicated) {
- __ Beqz(obj, &pred_is_null);
- }
if (is_volatile) {
StoreSeqCst(value, obj, offset, type, instruction);
@@ -2486,18 +2478,10 @@ void InstructionCodeGeneratorRISCV64::HandleFieldSet(HInstruction* instruction,
value.AsRegister<XRegister>(),
value_can_be_null && write_barrier_kind == WriteBarrierKind::kEmitWithNullCheck);
}
-
- if (is_predicated) {
- __ Bind(&pred_is_null);
- }
}
void LocationsBuilderRISCV64::HandleFieldGet(HInstruction* instruction) {
- DCHECK(instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet());
-
- bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
bool object_field_get_with_read_barrier =
(instruction->GetType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
@@ -2508,27 +2492,17 @@ void LocationsBuilderRISCV64::HandleFieldGet(HInstruction* instruction) {
: LocationSummary::kNoCall);
// Input for object receiver.
- locations->SetInAt(is_predicated ? 1 : 0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
if (DataType::IsFloatingPointType(instruction->GetType())) {
- if (is_predicated) {
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetOut(Location::SameAsFirstInput());
- } else {
- locations->SetOut(Location::RequiresFpuRegister());
- }
+ locations->SetOut(Location::RequiresFpuRegister());
} else {
- if (is_predicated) {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::SameAsFirstInput());
- } else {
- // The output overlaps for an object field get when read barriers
- // are enabled: we do not want the load to overwrite the object's
- // location, as we need it to emit the read barrier.
- locations->SetOut(Location::RequiresRegister(),
- object_field_get_with_read_barrier ? Location::kOutputOverlap
- : Location::kNoOutputOverlap);
- }
+ // The output overlaps for an object field get when read barriers
+ // are enabled: we do not want the load to overwrite the object's
+ // location, as we need it to emit the read barrier.
+ locations->SetOut(
+ Location::RequiresRegister(),
+ object_field_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
}
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
@@ -2541,13 +2515,11 @@ void LocationsBuilderRISCV64::HandleFieldGet(HInstruction* instruction) {
void InstructionCodeGeneratorRISCV64::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType()));
DataType::Type type = instruction->GetType();
LocationSummary* locations = instruction->GetLocations();
- Location obj_loc = locations->InAt(instruction->IsPredicatedInstanceFieldGet() ? 1 : 0);
+ Location obj_loc = locations->InAt(0);
XRegister obj = obj_loc.AsRegister<XRegister>();
Location dst_loc = locations->Out();
bool is_volatile = field_info.IsVolatile();
@@ -3778,21 +3750,6 @@ void InstructionCodeGeneratorRISCV64::VisitInstanceFieldSet(HInstanceFieldSet* i
instruction->GetWriteBarrierKind());
}
-void LocationsBuilderRISCV64::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* instruction) {
- HandleFieldGet(instruction);
-}
-
-void InstructionCodeGeneratorRISCV64::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* instruction) {
- Riscv64Label finish;
- LocationSummary* locations = instruction->GetLocations();
- XRegister target = locations->InAt(1).AsRegister<XRegister>();
- __ Beqz(target, &finish);
- HandleFieldGet(instruction, instruction->GetFieldInfo());
- __ Bind(&finish);
-}
-
void LocationsBuilderRISCV64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1dfeb536f8..959de47b8d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -516,7 +516,6 @@ class ReadBarrierMarkSlowPathX86 : public SlowPathCode {
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
DCHECK(instruction_->IsInstanceFieldGet() ||
- instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsArraySet() ||
@@ -769,7 +768,6 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode {
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
DCHECK(instruction_->IsInstanceFieldGet() ||
- instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
@@ -5901,13 +5899,10 @@ void CodeGeneratorX86::MarkGCCard(
}
void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
bool object_field_get_with_read_barrier =
(instruction->GetType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
- bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
codegen_->EmitReadBarrier()
@@ -5917,29 +5912,20 @@ void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldI
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
// receiver_input
- locations->SetInAt(is_predicated ? 1 : 0, Location::RequiresRegister());
- if (is_predicated) {
- if (DataType::IsFloatingPointType(instruction->GetType())) {
- locations->SetInAt(0, Location::RequiresFpuRegister());
- } else {
- locations->SetInAt(0, Location::RequiresRegister());
- }
- }
+ locations->SetInAt(0, Location::RequiresRegister());
if (DataType::IsFloatingPointType(instruction->GetType())) {
- locations->SetOut(is_predicated ? Location::SameAsFirstInput()
- : Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
} else {
// The output overlaps in case of long: we don't want the low move
// to overwrite the object's location. Likewise, in the case of
// an object field get with read barriers enabled, we do not want
// the move to overwrite the object's location, as we need it to emit
// the read barrier.
- locations->SetOut(is_predicated ? Location::SameAsFirstInput() : Location::RequiresRegister(),
- (object_field_get_with_read_barrier ||
- instruction->GetType() == DataType::Type::kInt64 ||
- is_predicated)
- ? Location::kOutputOverlap
- : Location::kNoOutputOverlap);
+ locations->SetOut(
+ Location::RequiresRegister(),
+ (object_field_get_with_read_barrier || instruction->GetType() == DataType::Type::kInt64)
+ ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
}
if (field_info.IsVolatile() && (field_info.GetFieldType() == DataType::Type::kInt64)) {
@@ -5953,12 +5939,10 @@ void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldI
void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
LocationSummary* locations = instruction->GetLocations();
- Location base_loc = locations->InAt(instruction->IsPredicatedInstanceFieldGet() ? 1 : 0);
+ Location base_loc = locations->InAt(0);
Register base = base_loc.AsRegister<Register>();
Location out = locations->Out();
bool is_volatile = field_info.IsVolatile();
@@ -6196,17 +6180,8 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction,
bool is_volatile = field_info.IsVolatile();
DataType::Type field_type = field_info.GetFieldType();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
- bool is_predicated =
- instruction->IsInstanceFieldSet() && instruction->AsInstanceFieldSet()->GetIsPredicatedSet();
-
Address field_addr(base, offset);
- NearLabel pred_is_null;
- if (is_predicated) {
- __ testl(base, base);
- __ j(kEqual, &pred_is_null);
- }
-
HandleFieldSet(instruction,
/* value_index= */ 1,
field_type,
@@ -6215,10 +6190,6 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction,
is_volatile,
value_can_be_null,
write_barrier_kind);
-
- if (is_predicated) {
- __ Bind(&pred_is_null);
- }
}
void LocationsBuilderX86::VisitStaticFieldGet(HStaticFieldGet* instruction) {
@@ -6251,25 +6222,10 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldSet(HInstanceFieldSet* instr
instruction->GetWriteBarrierKind());
}
-void LocationsBuilderX86::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* instruction) {
- HandleFieldGet(instruction, instruction->GetFieldInfo());
-}
-
void LocationsBuilderX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
-void InstructionCodeGeneratorX86::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* instruction) {
- NearLabel finish;
- LocationSummary* locations = instruction->GetLocations();
- Register recv = locations->InAt(1).AsRegister<Register>();
- __ testl(recv, recv);
- __ j(kZero, &finish);
- HandleFieldGet(instruction, instruction->GetFieldInfo());
- __ Bind(&finish);
-}
void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 560c4e1371..4f81c00741 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -524,7 +524,6 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
DCHECK(instruction_->IsInstanceFieldGet() ||
- instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsArraySet() ||
@@ -786,7 +785,6 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode {
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out.AsRegister())) << out_;
DCHECK(instruction_->IsInstanceFieldGet() ||
- instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
@@ -5185,13 +5183,10 @@ void CodeGeneratorX86_64::GenerateMemoryBarrier(MemBarrierKind kind) {
}
void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) {
- DCHECK(instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
bool object_field_get_with_read_barrier =
(instruction->GetType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
- bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_field_get_with_read_barrier
@@ -5201,37 +5196,26 @@ void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
// receiver_input
- locations->SetInAt(is_predicated ? 1 : 0, Location::RequiresRegister());
- if (is_predicated) {
- if (DataType::IsFloatingPointType(instruction->GetType())) {
- locations->SetInAt(0, Location::RequiresFpuRegister());
- } else {
- locations->SetInAt(0, Location::RequiresRegister());
- }
- }
+ locations->SetInAt(0, Location::RequiresRegister());
if (DataType::IsFloatingPointType(instruction->GetType())) {
- locations->SetOut(is_predicated ? Location::SameAsFirstInput()
- : Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
} else {
// The output overlaps for an object field get when read barriers are
// enabled: we do not want the move to overwrite the object's location, as
// we need it to emit the read barrier. For predicated instructions we can
// always overlap since the output is SameAsFirst and the default value.
- locations->SetOut(is_predicated ? Location::SameAsFirstInput() : Location::RequiresRegister(),
- object_field_get_with_read_barrier || is_predicated
- ? Location::kOutputOverlap
- : Location::kNoOutputOverlap);
+ locations->SetOut(
+ Location::RequiresRegister(),
+ object_field_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
}
}
void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
LocationSummary* locations = instruction->GetLocations();
- Location base_loc = locations->InAt(instruction->IsPredicatedInstanceFieldGet() ? 1 : 0);
+ Location base_loc = locations->InAt(0);
CpuRegister base = base_loc.AsRegister<CpuRegister>();
Location out = locations->Out();
bool is_volatile = field_info.IsVolatile();
@@ -5507,14 +5491,6 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction,
bool is_volatile = field_info.IsVolatile();
DataType::Type field_type = field_info.GetFieldType();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
- bool is_predicated =
- instruction->IsInstanceFieldSet() && instruction->AsInstanceFieldSet()->GetIsPredicatedSet();
-
- NearLabel pred_is_null;
- if (is_predicated) {
- __ testl(base, base);
- __ j(kZero, &pred_is_null);
- }
HandleFieldSet(instruction,
/*value_index=*/ 1,
@@ -5527,10 +5503,6 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction,
value_can_be_null,
/*byte_swap=*/ false,
write_barrier_kind);
-
- if (is_predicated) {
- __ Bind(&pred_is_null);
- }
}
void LocationsBuilderX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
@@ -5544,26 +5516,10 @@ void InstructionCodeGeneratorX86_64::VisitInstanceFieldSet(HInstanceFieldSet* in
instruction->GetWriteBarrierKind());
}
-void LocationsBuilderX86_64::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* instruction) {
- HandleFieldGet(instruction);
-}
-
void LocationsBuilderX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction);
}
-void InstructionCodeGeneratorX86_64::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* instruction) {
- NearLabel finish;
- LocationSummary* locations = instruction->GetLocations();
- CpuRegister target = locations->InAt(1).AsRegister<CpuRegister>();
- __ testl(target, target);
- __ j(kZero, &finish);
- HandleFieldGet(instruction, instruction->GetFieldInfo());
- __ Bind(&finish);
-}
-
void InstructionCodeGeneratorX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index 33b5bd5169..a2371817ee 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -137,7 +137,6 @@ static bool IsInterestingInstruction(HInstruction* instruction) {
// hard to test, as LSE removes them.
if (instruction->IsStaticFieldGet() ||
instruction->IsInstanceFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsArrayGet()) {
return false;
}
diff --git a/compiler/optimizing/execution_subgraph.cc b/compiler/optimizing/execution_subgraph.cc
deleted file mode 100644
index 06aabbe040..0000000000
--- a/compiler/optimizing/execution_subgraph.cc
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "execution_subgraph.h"
-
-#include <algorithm>
-#include <unordered_set>
-
-#include "android-base/macros.h"
-#include "base/arena_allocator.h"
-#include "base/arena_bit_vector.h"
-#include "base/globals.h"
-#include "base/scoped_arena_allocator.h"
-#include "nodes.h"
-
-namespace art HIDDEN {
-
-ExecutionSubgraph::ExecutionSubgraph(HGraph* graph, ScopedArenaAllocator* allocator)
- : graph_(graph),
- allocator_(allocator),
- allowed_successors_(graph_->GetBlocks().size(),
- ~(std::bitset<kMaxFilterableSuccessors> {}),
- allocator_->Adapter(kArenaAllocLSA)),
- unreachable_blocks_(
- allocator_, graph_->GetBlocks().size(), /*expandable=*/ false, kArenaAllocLSA),
- valid_(true),
- needs_prune_(false),
- finalized_(false) {
- if (valid_) {
- DCHECK(std::all_of(graph->GetBlocks().begin(), graph->GetBlocks().end(), [](HBasicBlock* it) {
- return it == nullptr || it->GetSuccessors().size() <= kMaxFilterableSuccessors;
- }));
- }
-}
-
-void ExecutionSubgraph::RemoveBlock(const HBasicBlock* to_remove) {
- if (!valid_) {
- return;
- }
- uint32_t id = to_remove->GetBlockId();
- if (unreachable_blocks_.IsBitSet(id)) {
- if (kIsDebugBuild) {
- // This isn't really needed but it's good to have this so it functions as
- // a DCHECK that we always call Prune after removing any block.
- needs_prune_ = true;
- }
- return;
- }
- unreachable_blocks_.SetBit(id);
- for (HBasicBlock* pred : to_remove->GetPredecessors()) {
- std::bitset<kMaxFilterableSuccessors> allowed_successors {};
- // ZipCount iterates over both the successors and the index of them at the same time.
- for (auto [succ, i] : ZipCount(MakeIterationRange(pred->GetSuccessors()))) {
- if (succ != to_remove) {
- allowed_successors.set(i);
- }
- }
- LimitBlockSuccessors(pred, allowed_successors);
- }
-}
-
-// Removes sink nodes.
-void ExecutionSubgraph::Prune() {
- if (UNLIKELY(!valid_)) {
- return;
- }
- needs_prune_ = false;
- // This is the record of the edges that were both (1) explored and (2) reached
- // the exit node.
- {
- // Allocator for temporary values.
- ScopedArenaAllocator temporaries(graph_->GetArenaStack());
- ScopedArenaVector<std::bitset<kMaxFilterableSuccessors>> results(
- graph_->GetBlocks().size(), temporaries.Adapter(kArenaAllocLSA));
- unreachable_blocks_.ClearAllBits();
- // Fills up the 'results' map with what we need to add to update
- // allowed_successors in order to prune sink nodes.
- bool start_reaches_end = false;
- // This is basically a DFS of the graph with some edges skipped.
- {
- const size_t num_blocks = graph_->GetBlocks().size();
- constexpr ssize_t kUnvisitedSuccIdx = -1;
- ArenaBitVector visiting(&temporaries, num_blocks, false, kArenaAllocLSA);
- // How many of the successors of each block we have already examined. This
- // has three states.
- // (1) kUnvisitedSuccIdx: we have not examined any edges,
- // (2) 0 <= val < # of successors: we have examined 'val' successors/are
- // currently examining successors_[val],
- // (3) kMaxFilterableSuccessors: We have examined all of the successors of
- // the block (the 'result' is final).
- ScopedArenaVector<ssize_t> last_succ_seen(
- num_blocks, kUnvisitedSuccIdx, temporaries.Adapter(kArenaAllocLSA));
- // A stack of which blocks we are visiting in this DFS traversal. Does not
- // include the current-block. Used with last_succ_seen to figure out which
- // bits to set if we find a path to the end/loop.
- ScopedArenaVector<uint32_t> current_path(temporaries.Adapter(kArenaAllocLSA));
- // Just ensure we have enough space. The allocator will be cleared shortly
- // anyway so this is fast.
- current_path.reserve(num_blocks);
- // Current block we are examining. Modified only by 'push_block' and 'pop_block'
- const HBasicBlock* cur_block = graph_->GetEntryBlock();
- // Used to note a recur where we will start iterating on 'blk' and save
- // where we are. We must 'continue' immediately after this.
- auto push_block = [&](const HBasicBlock* blk) {
- DCHECK(std::find(current_path.cbegin(), current_path.cend(), cur_block->GetBlockId()) ==
- current_path.end());
- if (kIsDebugBuild) {
- std::for_each(current_path.cbegin(), current_path.cend(), [&](auto id) {
- DCHECK_GT(last_succ_seen[id], kUnvisitedSuccIdx) << id;
- DCHECK_LT(last_succ_seen[id], static_cast<ssize_t>(kMaxFilterableSuccessors)) << id;
- });
- }
- current_path.push_back(cur_block->GetBlockId());
- visiting.SetBit(cur_block->GetBlockId());
- cur_block = blk;
- };
- // Used to note that we have fully explored a block and should return back
- // up. Sets cur_block appropriately. We must 'continue' immediately after
- // calling this.
- auto pop_block = [&]() {
- if (UNLIKELY(current_path.empty())) {
- // Should only happen if entry-blocks successors are exhausted.
- DCHECK_GE(last_succ_seen[graph_->GetEntryBlock()->GetBlockId()],
- static_cast<ssize_t>(graph_->GetEntryBlock()->GetSuccessors().size()));
- cur_block = nullptr;
- } else {
- const HBasicBlock* last = graph_->GetBlocks()[current_path.back()];
- visiting.ClearBit(current_path.back());
- current_path.pop_back();
- cur_block = last;
- }
- };
- // Mark the current path as a path to the end. This is in contrast to paths
- // that end in (eg) removed blocks.
- auto propagate_true = [&]() {
- for (uint32_t id : current_path) {
- DCHECK_GT(last_succ_seen[id], kUnvisitedSuccIdx);
- DCHECK_LT(last_succ_seen[id], static_cast<ssize_t>(kMaxFilterableSuccessors));
- results[id].set(last_succ_seen[id]);
- }
- };
- ssize_t num_entry_succ = graph_->GetEntryBlock()->GetSuccessors().size();
- // As long as the entry-block has not explored all successors we still have
- // work to do.
- const uint32_t entry_block_id = graph_->GetEntryBlock()->GetBlockId();
- while (num_entry_succ > last_succ_seen[entry_block_id]) {
- DCHECK(cur_block != nullptr);
- uint32_t id = cur_block->GetBlockId();
- DCHECK((current_path.empty() && cur_block == graph_->GetEntryBlock()) ||
- current_path.front() == graph_->GetEntryBlock()->GetBlockId())
- << "current path size: " << current_path.size()
- << " cur_block id: " << cur_block->GetBlockId() << " entry id "
- << graph_->GetEntryBlock()->GetBlockId();
- if (visiting.IsBitSet(id)) {
- // TODO We should support infinite loops as well.
- start_reaches_end = false;
- break;
- }
- std::bitset<kMaxFilterableSuccessors>& result = results[id];
- if (cur_block == graph_->GetExitBlock()) {
- start_reaches_end = true;
- propagate_true();
- pop_block();
- continue;
- } else if (last_succ_seen[id] == kMaxFilterableSuccessors) {
- // Already fully explored.
- if (result.any()) {
- propagate_true();
- }
- pop_block();
- continue;
- }
- // NB This is a pointer. Modifications modify the last_succ_seen.
- ssize_t* cur_succ = &last_succ_seen[id];
- std::bitset<kMaxFilterableSuccessors> succ_bitmap = GetAllowedSuccessors(cur_block);
- // Get next successor allowed.
- while (++(*cur_succ) < static_cast<ssize_t>(kMaxFilterableSuccessors) &&
- !succ_bitmap.test(*cur_succ)) {
- DCHECK_GE(*cur_succ, 0);
- }
- if (*cur_succ >= static_cast<ssize_t>(cur_block->GetSuccessors().size())) {
- // No more successors. Mark that we've checked everything. Later visits
- // to this node can use the existing data.
- DCHECK_LE(*cur_succ, static_cast<ssize_t>(kMaxFilterableSuccessors));
- *cur_succ = kMaxFilterableSuccessors;
- pop_block();
- continue;
- }
- const HBasicBlock* nxt = cur_block->GetSuccessors()[*cur_succ];
- DCHECK(nxt != nullptr) << "id: " << *cur_succ
- << " max: " << cur_block->GetSuccessors().size();
- if (visiting.IsBitSet(nxt->GetBlockId())) {
- // This is a loop. Mark it and continue on. Mark allowed-successor on
- // this block's results as well.
- result.set(*cur_succ);
- propagate_true();
- } else {
- // Not a loop yet. Recur.
- push_block(nxt);
- }
- }
- }
- // If we can't reach the end then there is no path through the graph without
- // hitting excluded blocks
- if (UNLIKELY(!start_reaches_end)) {
- valid_ = false;
- return;
- }
- // Mark blocks we didn't see in the ReachesEnd flood-fill
- for (const HBasicBlock* blk : graph_->GetBlocks()) {
- if (blk != nullptr &&
- results[blk->GetBlockId()].none() &&
- blk != graph_->GetExitBlock() &&
- blk != graph_->GetEntryBlock()) {
- // We never visited this block, must be unreachable.
- unreachable_blocks_.SetBit(blk->GetBlockId());
- }
- }
- // write the new data.
- memcpy(allowed_successors_.data(),
- results.data(),
- results.size() * sizeof(std::bitset<kMaxFilterableSuccessors>));
- }
- RecalculateExcludedCohort();
-}
-
-void ExecutionSubgraph::RemoveConcavity() {
- if (UNLIKELY(!valid_)) {
- return;
- }
- DCHECK(!needs_prune_);
- for (const HBasicBlock* blk : graph_->GetBlocks()) {
- if (blk == nullptr || unreachable_blocks_.IsBitSet(blk->GetBlockId())) {
- continue;
- }
- uint32_t blkid = blk->GetBlockId();
- if (std::any_of(unreachable_blocks_.Indexes().begin(),
- unreachable_blocks_.Indexes().end(),
- [&](uint32_t skipped) { return graph_->PathBetween(skipped, blkid); }) &&
- std::any_of(unreachable_blocks_.Indexes().begin(),
- unreachable_blocks_.Indexes().end(),
- [&](uint32_t skipped) { return graph_->PathBetween(blkid, skipped); })) {
- RemoveBlock(blk);
- }
- }
- Prune();
-}
-
-void ExecutionSubgraph::RecalculateExcludedCohort() {
- DCHECK(!needs_prune_);
- excluded_list_.emplace(allocator_->Adapter(kArenaAllocLSA));
- ScopedArenaVector<ExcludedCohort>& res = excluded_list_.value();
- // Make a copy of unreachable_blocks_;
- ArenaBitVector unreachable(allocator_, graph_->GetBlocks().size(), false, kArenaAllocLSA);
- unreachable.Copy(&unreachable_blocks_);
- // Split cohorts with union-find
- while (unreachable.IsAnyBitSet()) {
- res.emplace_back(allocator_, graph_);
- ExcludedCohort& cohort = res.back();
- // We don't allocate except for the queue beyond here so create another arena to save memory.
- ScopedArenaAllocator alloc(graph_->GetArenaStack());
- ScopedArenaQueue<const HBasicBlock*> worklist(alloc.Adapter(kArenaAllocLSA));
- // Select an arbitrary node
- const HBasicBlock* first = graph_->GetBlocks()[unreachable.GetHighestBitSet()];
- worklist.push(first);
- do {
- // Flood-fill both forwards and backwards.
- const HBasicBlock* cur = worklist.front();
- worklist.pop();
- if (!unreachable.IsBitSet(cur->GetBlockId())) {
- // Already visited or reachable somewhere else.
- continue;
- }
- unreachable.ClearBit(cur->GetBlockId());
- cohort.blocks_.SetBit(cur->GetBlockId());
- // don't bother filtering here, it's done next go-around
- for (const HBasicBlock* pred : cur->GetPredecessors()) {
- worklist.push(pred);
- }
- for (const HBasicBlock* succ : cur->GetSuccessors()) {
- worklist.push(succ);
- }
- } while (!worklist.empty());
- }
- // Figure out entry & exit nodes.
- for (ExcludedCohort& cohort : res) {
- DCHECK(cohort.blocks_.IsAnyBitSet());
- auto is_external = [&](const HBasicBlock* ext) -> bool {
- return !cohort.blocks_.IsBitSet(ext->GetBlockId());
- };
- for (const HBasicBlock* blk : cohort.Blocks()) {
- const auto& preds = blk->GetPredecessors();
- const auto& succs = blk->GetSuccessors();
- if (std::any_of(preds.cbegin(), preds.cend(), is_external)) {
- cohort.entry_blocks_.SetBit(blk->GetBlockId());
- }
- if (std::any_of(succs.cbegin(), succs.cend(), is_external)) {
- cohort.exit_blocks_.SetBit(blk->GetBlockId());
- }
- }
- }
-}
-
-std::ostream& operator<<(std::ostream& os, const ExecutionSubgraph::ExcludedCohort& ex) {
- ex.Dump(os);
- return os;
-}
-
-void ExecutionSubgraph::ExcludedCohort::Dump(std::ostream& os) const {
- auto dump = [&](BitVecBlockRange arr) {
- os << "[";
- bool first = true;
- for (const HBasicBlock* b : arr) {
- if (!first) {
- os << ", ";
- }
- first = false;
- os << b->GetBlockId();
- }
- os << "]";
- };
- auto dump_blocks = [&]() {
- os << "[";
- bool first = true;
- for (const HBasicBlock* b : Blocks()) {
- if (!entry_blocks_.IsBitSet(b->GetBlockId()) && !exit_blocks_.IsBitSet(b->GetBlockId())) {
- if (!first) {
- os << ", ";
- }
- first = false;
- os << b->GetBlockId();
- }
- }
- os << "]";
- };
-
- os << "{ entry: ";
- dump(EntryBlocks());
- os << ", interior: ";
- dump_blocks();
- os << ", exit: ";
- dump(ExitBlocks());
- os << "}";
-}
-
-} // namespace art
diff --git a/compiler/optimizing/execution_subgraph.h b/compiler/optimizing/execution_subgraph.h
deleted file mode 100644
index 5ddf17de60..0000000000
--- a/compiler/optimizing/execution_subgraph.h
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_EXECUTION_SUBGRAPH_H_
-#define ART_COMPILER_OPTIMIZING_EXECUTION_SUBGRAPH_H_
-
-#include <algorithm>
-#include <sstream>
-
-#include "base/arena_allocator.h"
-#include "base/arena_bit_vector.h"
-#include "base/arena_containers.h"
-#include "base/array_ref.h"
-#include "base/bit_vector-inl.h"
-#include "base/globals.h"
-#include "base/iteration_range.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "base/scoped_arena_allocator.h"
-#include "base/scoped_arena_containers.h"
-#include "base/stl_util.h"
-#include "base/transform_iterator.h"
-#include "nodes.h"
-
-namespace art HIDDEN {
-
-// Helper for transforming blocks to block_ids.
-class BlockToBlockIdTransformer {
- public:
- BlockToBlockIdTransformer(BlockToBlockIdTransformer&&) = default;
- BlockToBlockIdTransformer(const BlockToBlockIdTransformer&) = default;
- BlockToBlockIdTransformer() {}
-
- inline uint32_t operator()(const HBasicBlock* b) const {
- return b->GetBlockId();
- }
-};
-
-// Helper for transforming block ids to blocks.
-class BlockIdToBlockTransformer {
- public:
- BlockIdToBlockTransformer(BlockIdToBlockTransformer&&) = default;
- BlockIdToBlockTransformer(const BlockIdToBlockTransformer&) = default;
- explicit BlockIdToBlockTransformer(const HGraph* graph) : graph_(graph) {}
-
- inline const HGraph* GetGraph() const {
- return graph_;
- }
-
- inline HBasicBlock* GetBlock(uint32_t id) const {
- DCHECK_LT(id, graph_->GetBlocks().size()) << graph_->PrettyMethod();
- HBasicBlock* blk = graph_->GetBlocks()[id];
- DCHECK(blk != nullptr);
- return blk;
- }
-
- inline HBasicBlock* operator()(uint32_t id) const {
- return GetBlock(id);
- }
-
- private:
- const HGraph* const graph_;
-};
-
-class BlockIdFilterThunk {
- public:
- explicit BlockIdFilterThunk(const BitVector& i) : inner_(i) {}
- BlockIdFilterThunk(BlockIdFilterThunk&& other) noexcept = default;
- BlockIdFilterThunk(const BlockIdFilterThunk&) = default;
-
- bool operator()(const HBasicBlock* b) const {
- return inner_.IsBitSet(b->GetBlockId());
- }
-
- private:
- const BitVector& inner_;
-};
-
-// A representation of a particular section of the graph. The graph is split
-// into an excluded and included area and is used to track escapes.
-//
-// This object is a view of the graph and is not updated as the graph is
-// changed.
-//
-// This is implemented by removing various escape points from the subgraph using
-// the 'RemoveBlock' function. Once all required blocks are removed one will
-// 'Finalize' the subgraph. This will extend the removed area to include:
-// (1) Any block which inevitably leads to (post-dominates) a removed block
-// (2) any block which is between 2 removed blocks
-//
-// This allows us to create a set of 'ExcludedCohorts' which are the
-// well-connected subsets of the graph made up of removed blocks. These cohorts
-// have a set of entry and exit blocks which act as the boundary of the cohort.
-// Since we removed blocks between 2 excluded blocks it is impossible for any
-// cohort-exit block to reach any cohort-entry block. This means we can use the
-// boundary between the cohort and the rest of the graph to insert
-// materialization blocks for partial LSE.
-//
-// TODO We really should expand this to take into account where the object
-// allocation takes place directly. Currently we always act as though it were
-// allocated in the entry block. This is a massively simplifying assumption but
-// means we can't partially remove objects that are repeatedly allocated in a
-// loop.
-class ExecutionSubgraph : public DeletableArenaObject<kArenaAllocLSA> {
- public:
- using BitVecBlockRange =
- IterationRange<TransformIterator<BitVector::IndexIterator, BlockIdToBlockTransformer>>;
- using FilteredBitVecBlockRange = IterationRange<
- FilterIterator<ArenaVector<HBasicBlock*>::const_iterator, BlockIdFilterThunk>>;
-
- // A set of connected blocks which are connected and removed from the
- // ExecutionSubgraph. See above comment for explanation.
- class ExcludedCohort : public ArenaObject<kArenaAllocLSA> {
- public:
- ExcludedCohort(ExcludedCohort&&) = default;
- ExcludedCohort(const ExcludedCohort&) = delete;
- explicit ExcludedCohort(ScopedArenaAllocator* allocator, HGraph* graph)
- : graph_(graph),
- entry_blocks_(allocator, graph_->GetBlocks().size(), false, kArenaAllocLSA),
- exit_blocks_(allocator, graph_->GetBlocks().size(), false, kArenaAllocLSA),
- blocks_(allocator, graph_->GetBlocks().size(), false, kArenaAllocLSA) {}
-
- ~ExcludedCohort() = default;
-
- // All blocks in the cohort.
- BitVecBlockRange Blocks() const {
- return BlockIterRange(blocks_);
- }
-
- // Blocks that have predecessors outside of the cohort. These blocks will
- // need to have PHIs/control-flow added to create the escaping value.
- BitVecBlockRange EntryBlocks() const {
- return BlockIterRange(entry_blocks_);
- }
-
- FilteredBitVecBlockRange EntryBlocksReversePostOrder() const {
- return Filter(MakeIterationRange(graph_->GetReversePostOrder()),
- BlockIdFilterThunk(entry_blocks_));
- }
-
- bool IsEntryBlock(const HBasicBlock* blk) const {
- return entry_blocks_.IsBitSet(blk->GetBlockId());
- }
-
- // Blocks that have successors outside of the cohort. The successors of
- // these blocks will need to have PHI's to restore state.
- BitVecBlockRange ExitBlocks() const {
- return BlockIterRange(exit_blocks_);
- }
-
- bool operator==(const ExcludedCohort& other) const {
- return blocks_.Equal(&other.blocks_);
- }
-
- bool ContainsBlock(const HBasicBlock* blk) const {
- return blocks_.IsBitSet(blk->GetBlockId());
- }
-
- // Returns true if there is a path from 'blk' to any block in this cohort.
- // NB blocks contained within the cohort are not considered to be succeeded
- // by the cohort (i.e. this function will return false).
- bool SucceedsBlock(const HBasicBlock* blk) const {
- if (ContainsBlock(blk)) {
- return false;
- }
- auto idxs = entry_blocks_.Indexes();
- return std::any_of(idxs.begin(), idxs.end(), [&](uint32_t entry) -> bool {
- return blk->GetGraph()->PathBetween(blk->GetBlockId(), entry);
- });
- }
-
- // Returns true if there is a path from any block in this cohort to 'blk'.
- // NB blocks contained within the cohort are not considered to be preceded
- // by the cohort (i.e. this function will return false).
- bool PrecedesBlock(const HBasicBlock* blk) const {
- if (ContainsBlock(blk)) {
- return false;
- }
- auto idxs = exit_blocks_.Indexes();
- return std::any_of(idxs.begin(), idxs.end(), [&](uint32_t exit) -> bool {
- return blk->GetGraph()->PathBetween(exit, blk->GetBlockId());
- });
- }
-
- void Dump(std::ostream& os) const;
-
- private:
- BitVecBlockRange BlockIterRange(const ArenaBitVector& bv) const {
- auto indexes = bv.Indexes();
- BitVecBlockRange res = MakeTransformRange(indexes, BlockIdToBlockTransformer(graph_));
- return res;
- }
-
- ExcludedCohort() = delete;
-
- HGraph* graph_;
- ArenaBitVector entry_blocks_;
- ArenaBitVector exit_blocks_;
- ArenaBitVector blocks_;
-
- friend class ExecutionSubgraph;
- friend class LoadStoreAnalysisTest;
- };
-
- // The number of successors we can track on a single block. Graphs which
- // contain a block with a branching factor greater than this will not be
- // analysed. This is used to both limit the memory usage of analysis to
- // reasonable levels and ensure that the analysis will complete in a
- // reasonable amount of time. It also simplifies the implementation somewhat
- // to have a constant branching factor.
- static constexpr uint32_t kMaxFilterableSuccessors = 8;
-
- // Instantiate a subgraph. The subgraph can be instantiated only if partial-escape
- // analysis is desired (eg not when being used for instruction scheduling) and
- // when the branching factor in the graph is not too high. These conditions
- // are determined once and passed down for performance reasons.
- ExecutionSubgraph(HGraph* graph, ScopedArenaAllocator* allocator);
-
- void Invalidate() {
- valid_ = false;
- }
-
- // A block is contained by the ExecutionSubgraph if it is reachable. This
- // means it has not been removed explicitly or via pruning/concavity removal.
- // Finalization is needed to call this function.
- // See RemoveConcavity and Prune for more information.
- bool ContainsBlock(const HBasicBlock* blk) const {
- DCHECK_IMPLIES(finalized_, !needs_prune_);
- if (!valid_) {
- return false;
- }
- return !unreachable_blocks_.IsBitSet(blk->GetBlockId());
- }
-
- // Mark the block as removed from the subgraph.
- void RemoveBlock(const HBasicBlock* to_remove);
-
- // Called when no more updates will be done to the subgraph. Calculate the
- // final subgraph
- void Finalize() {
- Prune();
- RemoveConcavity();
- finalized_ = true;
- }
-
- BitVecBlockRange UnreachableBlocks() const {
- auto idxs = unreachable_blocks_.Indexes();
- return MakeTransformRange(idxs, BlockIdToBlockTransformer(graph_));
- }
-
- // Returns true if all allowed execution paths from start eventually reach the
- // graph's exit block (or diverge).
- bool IsValid() const {
- return valid_;
- }
-
- ArrayRef<const ExcludedCohort> GetExcludedCohorts() const {
- DCHECK_IMPLIES(valid_, !needs_prune_);
- if (!valid_ || !unreachable_blocks_.IsAnyBitSet()) {
- return ArrayRef<const ExcludedCohort>();
- } else {
- return ArrayRef<const ExcludedCohort>(*excluded_list_);
- }
- }
-
- // Helper class to create reachable blocks iterator.
- class ContainsFunctor {
- public:
- bool operator()(HBasicBlock* blk) const {
- return subgraph_->ContainsBlock(blk);
- }
-
- private:
- explicit ContainsFunctor(const ExecutionSubgraph* subgraph) : subgraph_(subgraph) {}
- const ExecutionSubgraph* const subgraph_;
- friend class ExecutionSubgraph;
- };
- // Returns an iterator over reachable blocks (filtered as we go). This is primarilly for testing.
- IterationRange<
- FilterIterator<typename ArenaVector<HBasicBlock*>::const_iterator, ContainsFunctor>>
- ReachableBlocks() const {
- return Filter(MakeIterationRange(graph_->GetBlocks()), ContainsFunctor(this));
- }
-
- static bool CanAnalyse(HGraph* graph) {
- // If there are any blocks with more than kMaxFilterableSuccessors we can't
- // analyse the graph. We avoid this case to prevent excessive memory and
- // time usage while allowing a simpler algorithm with a fixed-width
- // branching factor.
- return std::all_of(graph->GetBlocks().begin(), graph->GetBlocks().end(), [](HBasicBlock* blk) {
- return blk == nullptr || blk->GetSuccessors().size() <= kMaxFilterableSuccessors;
- });
- }
-
- private:
- std::bitset<kMaxFilterableSuccessors> GetAllowedSuccessors(const HBasicBlock* blk) const {
- DCHECK(valid_);
- return allowed_successors_[blk->GetBlockId()];
- }
-
- void LimitBlockSuccessors(const HBasicBlock* block,
- std::bitset<kMaxFilterableSuccessors> allowed) {
- needs_prune_ = true;
- allowed_successors_[block->GetBlockId()] &= allowed;
- }
-
- // Remove nodes which both precede and follow any exclusions. This ensures we don't need to deal
- // with only conditionally materializing objects depending on if we already materialized them
- // Ensure that for all blocks A, B, C: Unreachable(A) && Unreachable(C) && PathBetween(A, B) &&
- // PathBetween(A, C) implies Unreachable(B). This simplifies later transforms since it ensures
- // that no execution can leave and then re-enter any exclusion.
- void RemoveConcavity();
-
- // Removes sink nodes. Sink nodes are nodes where there is no execution which
- // avoids all removed nodes.
- void Prune();
-
- void RecalculateExcludedCohort();
-
- HGraph* graph_;
- ScopedArenaAllocator* allocator_;
- // The map from block_id -> allowed-successors.
- // This is the canonical representation of this subgraph. If a bit in the
- // bitset is not set then the corresponding outgoing edge of that block is not
- // considered traversable.
- ScopedArenaVector<std::bitset<kMaxFilterableSuccessors>> allowed_successors_;
- // Helper that holds which blocks we are able to reach. Only valid if
- // 'needs_prune_ == false'.
- ArenaBitVector unreachable_blocks_;
- // A list of the excluded-cohorts of this subgraph. This is only valid when
- // 'needs_prune_ == false'
- std::optional<ScopedArenaVector<ExcludedCohort>> excluded_list_;
- // Bool to hold if there is at least one known path from the start block to
- // the end in this graph. Used to short-circuit computation.
- bool valid_;
- // True if the subgraph is consistent and can be queried. Modifying the
- // subgraph clears this and requires a prune to restore.
- bool needs_prune_;
- // True if no more modification of the subgraph is permitted.
- bool finalized_;
-
- friend class ExecutionSubgraphTest;
- friend class LoadStoreAnalysisTest;
-
- DISALLOW_COPY_AND_ASSIGN(ExecutionSubgraph);
-};
-
-std::ostream& operator<<(std::ostream& os, const ExecutionSubgraph::ExcludedCohort& ex);
-
-} // namespace art
-
-#endif // ART_COMPILER_OPTIMIZING_EXECUTION_SUBGRAPH_H_
diff --git a/compiler/optimizing/execution_subgraph_test.cc b/compiler/optimizing/execution_subgraph_test.cc
deleted file mode 100644
index 921ef056ba..0000000000
--- a/compiler/optimizing/execution_subgraph_test.cc
+++ /dev/null
@@ -1,975 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "execution_subgraph_test.h"
-
-#include <array>
-#include <sstream>
-#include <string_view>
-#include <unordered_map>
-#include <unordered_set>
-
-#include "base/scoped_arena_allocator.h"
-#include "base/stl_util.h"
-#include "class_root.h"
-#include "dex/dex_file_types.h"
-#include "dex/method_reference.h"
-#include "entrypoints/quick/quick_entrypoints_enum.h"
-#include "execution_subgraph.h"
-#include "gtest/gtest.h"
-#include "handle.h"
-#include "handle_scope.h"
-#include "nodes.h"
-#include "optimizing/data_type.h"
-#include "optimizing_unit_test.h"
-#include "scoped_thread_state_change.h"
-
-namespace art HIDDEN {
-
-using BlockSet = std::unordered_set<const HBasicBlock*>;
-
-// Helper that checks validity directly.
-bool ExecutionSubgraphTestHelper::CalculateValidity(HGraph* graph, const ExecutionSubgraph* esg) {
- bool reached_end = false;
- std::queue<const HBasicBlock*> worklist;
- std::unordered_set<const HBasicBlock*> visited;
- worklist.push(graph->GetEntryBlock());
- while (!worklist.empty()) {
- const HBasicBlock* cur = worklist.front();
- worklist.pop();
- if (visited.find(cur) != visited.end()) {
- continue;
- } else {
- visited.insert(cur);
- }
- if (cur == graph->GetExitBlock()) {
- reached_end = true;
- continue;
- }
- bool has_succ = false;
- for (const HBasicBlock* succ : cur->GetSuccessors()) {
- DCHECK(succ != nullptr) << "Bad successors on block " << cur->GetBlockId();
- if (!esg->ContainsBlock(succ)) {
- continue;
- }
- has_succ = true;
- worklist.push(succ);
- }
- if (!has_succ) {
- // We aren't at the end and have nowhere to go so fail.
- return false;
- }
- }
- return reached_end;
-}
-
-class ExecutionSubgraphTest : public OptimizingUnitTest {
- public:
- ExecutionSubgraphTest() : graph_(CreateGraph()) {}
-
- AdjacencyListGraph SetupFromAdjacencyList(const std::string_view entry_name,
- const std::string_view exit_name,
- const std::vector<AdjacencyListGraph::Edge>& adj) {
- return AdjacencyListGraph(graph_, GetAllocator(), entry_name, exit_name, adj);
- }
-
- bool IsValidSubgraph(const ExecutionSubgraph* esg) {
- return ExecutionSubgraphTestHelper::CalculateValidity(graph_, esg);
- }
-
- bool IsValidSubgraph(const ExecutionSubgraph& esg) {
- return ExecutionSubgraphTestHelper::CalculateValidity(graph_, &esg);
- }
-
- HGraph* graph_;
-};
-
-// Some comparators used by these tests to avoid having to deal with various set types.
-template <typename BLKS, typename = std::enable_if_t<!std::is_same_v<BlockSet, BLKS>>>
-bool operator==(const BlockSet& bs, const BLKS& sas) {
- std::unordered_set<const HBasicBlock*> us(sas.begin(), sas.end());
- return bs == us;
-}
-template <typename BLKS, typename = std::enable_if_t<!std::is_same_v<BlockSet, BLKS>>>
-bool operator==(const BLKS& sas, const BlockSet& bs) {
- return bs == sas;
-}
-template <typename BLKS, typename = std::enable_if_t<!std::is_same_v<BlockSet, BLKS>>>
-bool operator!=(const BlockSet& bs, const BLKS& sas) {
- return !(bs == sas);
-}
-template <typename BLKS, typename = std::enable_if_t<!std::is_same_v<BlockSet, BLKS>>>
-bool operator!=(const BLKS& sas, const BlockSet& bs) {
- return !(bs == sas);
-}
-
-// +-------+ +-------+
-// | right | <-- | entry |
-// +-------+ +-------+
-// | |
-// | |
-// | v
-// | + - - - - - +
-// | ' removed '
-// | ' '
-// | ' +-------+ '
-// | ' | left | '
-// | ' +-------+ '
-// | ' '
-// | + - - - - - +
-// | |
-// | |
-// | v
-// | +-------+
-// +---------> | exit |
-// +-------+
-TEST_F(ExecutionSubgraphTest, Basic) {
- AdjacencyListGraph blks(SetupFromAdjacencyList(
- "entry",
- "exit",
- { { "entry", "left" }, { "entry", "right" }, { "left", "exit" }, { "right", "exit" } }));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.RemoveBlock(blks.Get("left"));
- esg.Finalize();
- ASSERT_TRUE(esg.IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 3u);
- ASSERT_TRUE(contents.find(blks.Get("left")) == contents.end());
-
- ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
- esg.RemoveBlock(blks.Get("right"));
- esg.Finalize();
- std::unordered_set<const HBasicBlock*> contents_2(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
- ASSERT_EQ(contents_2.size(), 0u);
-}
-
-// +-------+ +-------+
-// | right | <-- | entry |
-// +-------+ +-------+
-// | |
-// | |
-// | v
-// | + - - - - - - - - - - - - - - - - - - - -+
-// | ' indirectly_removed '
-// | ' '
-// | ' +-------+ +-----+ '
-// | ' | l1 | -------------------> | l1r | '
-// | ' +-------+ +-----+ '
-// | ' | | '
-// | ' | | '
-// | ' v | '
-// | ' +-------+ | '
-// | ' | l1l | | '
-// | ' +-------+ | '
-// | ' | | '
-// | ' | | '
-// | ' | | '
-// + - - - - - - - -+ | +- - - | | '
-// ' ' | +- v | '
-// ' +-----+ | +----------------+ | '
-// ' | l2r | <---------+-------------- | l2 (removed) | <-------------+ '
-// ' +-----+ | +----------------+ '
-// ' | ' | +- | '
-// ' | - - -+ | +- - - | - - - - - - - - - - - - - -+
-// ' | ' | ' | '
-// ' | ' | ' | '
-// ' | ' | ' v '
-// ' | ' | ' +-------+ '
-// ' | ' | ' | l2l | '
-// ' | ' | ' +-------+ '
-// ' | ' | ' | '
-// ' | ' | ' | '
-// ' | ' | ' | '
-// ' | - - -+ | +- - - | '
-// ' | ' | +- v '
-// ' | | +-------+ '
-// ' +---------------+-------------> | l3 | '
-// ' | +-------+ '
-// ' ' | +- '
-// + - - - - - - - -+ | +- - - - - - - - - +
-// | |
-// | |
-// | v
-// | +-------+
-// +-----------> | exit |
-// +-------+
-TEST_F(ExecutionSubgraphTest, Propagation) {
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- { { "entry", "l1" },
- { "l1", "l1l" },
- { "l1", "l1r" },
- { "l1l", "l2" },
- { "l1r", "l2" },
- { "l2", "l2l" },
- { "l2", "l2r" },
- { "l2l", "l3" },
- { "l2r", "l3" },
- { "l3", "exit" },
- { "entry", "right" },
- { "right", "exit" } }));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.RemoveBlock(blks.Get("l2"));
- esg.Finalize();
- ASSERT_TRUE(esg.IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- // ASSERT_EQ(contents.size(), 3u);
- // Not present, no path through.
- ASSERT_TRUE(contents.find(blks.Get("l1")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l2")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l3")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l1l")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l1r")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l2l")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l2r")) == contents.end());
-
- // present, path through.
- ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
-}
-
-// +------------------------------------+
-// | |
-// | +-------+ +-------+ |
-// | | right | <-- | entry | |
-// | +-------+ +-------+ |
-// | | | |
-// | | | |
-// | | v |
-// | | +-------+ +--------+
-// +----+---------> | l1 | --> | l1loop |
-// | +-------+ +--------+
-// | |
-// | |
-// | v
-// | +- - - - - -+
-// | ' removed '
-// | ' '
-// | ' +-------+ '
-// | ' | l2 | '
-// | ' +-------+ '
-// | ' '
-// | +- - - - - -+
-// | |
-// | |
-// | v
-// | +-------+
-// +---------> | exit |
-// +-------+
-TEST_F(ExecutionSubgraphTest, PropagationLoop) {
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- { { "entry", "l1" },
- { "l1", "l2" },
- { "l1", "l1loop" },
- { "l1loop", "l1" },
- { "l2", "exit" },
- { "entry", "right" },
- { "right", "exit" } }));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.RemoveBlock(blks.Get("l2"));
- esg.Finalize();
- ASSERT_TRUE(esg.IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 5u);
-
- // Not present, no path through.
- ASSERT_TRUE(contents.find(blks.Get("l2")) == contents.end());
-
- // present, path through.
- // Since the loop can diverge we should leave it in the execution subgraph.
- ASSERT_TRUE(contents.find(blks.Get("l1")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l1loop")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
-}
-
-// +--------------------------------+
-// | |
-// | +-------+ +-------+ |
-// | | right | <-- | entry | |
-// | +-------+ +-------+ |
-// | | | |
-// | | | |
-// | | v |
-// | | +-------+ +--------+
-// +----+---------> | l1 | --> | l1loop |
-// | +-------+ +--------+
-// | |
-// | |
-// | v
-// | +-------+
-// | | l2 |
-// | +-------+
-// | |
-// | |
-// | v
-// | +-------+
-// +---------> | exit |
-// +-------+
-TEST_F(ExecutionSubgraphTest, PropagationLoop2) {
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- { { "entry", "l1" },
- { "l1", "l2" },
- { "l1", "l1loop" },
- { "l1loop", "l1" },
- { "l2", "exit" },
- { "entry", "right" },
- { "right", "exit" } }));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.RemoveBlock(blks.Get("l1"));
- esg.Finalize();
- ASSERT_TRUE(esg.IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 3u);
-
- // Not present, no path through.
- ASSERT_TRUE(contents.find(blks.Get("l1")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l1loop")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l2")) == contents.end());
-
- // present, path through.
- ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
-}
-
-// +--------------------------------+
-// | |
-// | +-------+ +-------+ |
-// | | right | <-- | entry | |
-// | +-------+ +-------+ |
-// | | | |
-// | | | |
-// | | v |
-// | | +-------+ +--------+
-// +----+---------> | l1 | --> | l1loop |
-// | +-------+ +--------+
-// | |
-// | |
-// | v
-// | +-------+
-// | | l2 |
-// | +-------+
-// | |
-// | |
-// | v
-// | +-------+
-// +---------> | exit |
-// +-------+
-TEST_F(ExecutionSubgraphTest, PropagationLoop3) {
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- { { "entry", "l1" },
- { "l1", "l2" },
- { "l1", "l1loop" },
- { "l1loop", "l1" },
- { "l2", "exit" },
- { "entry", "right" },
- { "right", "exit" } }));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.RemoveBlock(blks.Get("l1loop"));
- esg.Finalize();
- ASSERT_TRUE(esg.IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 3u);
-
- // Not present, no path through. If we got to l1 loop then we must merge back
- // with l1 and l2 so they're bad too.
- ASSERT_TRUE(contents.find(blks.Get("l1loop")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l1")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l2")) == contents.end());
-
- // present, path through.
- ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
-}
-
-// ┌───────┐ ┌──────────────┐
-// │ right │ ◀── │ entry │
-// └───────┘ └──────────────┘
-// │ │
-// │ │
-// ▼ ▼
-// ┌────┐ ┌───────┐ ┌──────────────┐
-// │ l2 │ ──▶ │ exit │ ┌─ │ l1 │ ◀┐
-// └────┘ └───────┘ │ └──────────────┘ │
-// ▲ │ │ │
-// └───────────────────┘ │ │
-// ▼ │
-// ┌──────────────┐ │ ┌──────────────┐
-// ┌─ │ l1loop │ │ │ l1loop_right │ ◀┐
-// │ └──────────────┘ │ └──────────────┘ │
-// │ │ │ │ │
-// │ │ │ │ │
-// │ ▼ │ │ │
-// │ ┌−−−−−−−−−−−−−−−−−−┐ │ │ │
-// │ ╎ removed ╎ │ │ │
-// │ ╎ ╎ │ │ │
-// │ ╎ ┌──────────────┐ ╎ │ │ │
-// │ ╎ │ l1loop_left │ ╎ │ │ │
-// │ ╎ └──────────────┘ ╎ │ │ │
-// │ ╎ ╎ │ │ │
-// │ └−−−−−−−−−−−−−−−−−−┘ │ │ │
-// │ │ │ │ │
-// │ │ │ │ │
-// │ ▼ │ │ │
-// │ ┌──────────────┐ │ │ │
-// │ │ l1loop_merge │ ─┘ │ │
-// │ └──────────────┘ │ │
-// │ ▲ │ │
-// │ └──────────────────────┘ │
-// │ │
-// │ │
-// └─────────────────────────────────────────────┘
-
-TEST_F(ExecutionSubgraphTest, PropagationLoop4) {
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "l1"},
- {"l1", "l2"},
- {"l1", "l1loop"},
- {"l1loop", "l1loop_left"},
- {"l1loop", "l1loop_right"},
- {"l1loop_left", "l1loop_merge"},
- {"l1loop_right", "l1loop_merge"},
- {"l1loop_merge", "l1"},
- {"l2", "exit"},
- {"entry", "right"},
- {"right", "exit"}}));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.RemoveBlock(blks.Get("l1loop_left"));
- esg.Finalize();
- ASSERT_TRUE(esg.IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 3u);
-
- // Not present, no path through. If we got to l1 loop then we must merge back
- // with l1 and l2 so they're bad too.
- ASSERT_TRUE(contents.find(blks.Get("l1loop")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l1")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l1loop_left")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l1loop_right")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l1loop_merge")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l2")) == contents.end());
-
- // present, path through.
- ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
-}
-
-// +------------------------------------------------------+
-// | |
-// | +--------------+ +-------------+ |
-// | | right | <-- | entry | |
-// | +--------------+ +-------------+ |
-// | | | |
-// | | | |
-// | v v |
-// | +--------------+ +--------------------+ +----+
-// +> | exit | +> | l1 | --> | l2 |
-// +--------------+ | +--------------------+ +----+
-// | | ^
-// +---------------+ | |
-// | v |
-// +--------------+ +-------------+ |
-// | l1loop_right | <-- | l1loop | |
-// +--------------+ +-------------+ |
-// | |
-// | |
-// v |
-// + - - - - - - - - + |
-// ' removed ' |
-// ' ' |
-// ' +-------------+ ' |
-// ' | l1loop_left | ' -+
-// ' +-------------+ '
-// ' '
-// + - - - - - - - - +
-TEST_F(ExecutionSubgraphTest, PropagationLoop5) {
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "l1"},
- {"l1", "l2"},
- {"l1", "l1loop"},
- {"l1loop", "l1loop_left"},
- {"l1loop", "l1loop_right"},
- {"l1loop_left", "l1"},
- {"l1loop_right", "l1"},
- {"l2", "exit"},
- {"entry", "right"},
- {"right", "exit"}}));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.RemoveBlock(blks.Get("l1loop_left"));
- esg.Finalize();
- ASSERT_TRUE(esg.IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 3u);
-
- // Not present, no path through. If we got to l1 loop then we must merge back
- // with l1 and l2 so they're bad too.
- ASSERT_TRUE(contents.find(blks.Get("l1loop")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l1")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l1loop_left")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l1loop_right")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("l2")) == contents.end());
-
- // present, path through.
- ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
-}
-
-TEST_F(ExecutionSubgraphTest, Invalid) {
- AdjacencyListGraph blks(SetupFromAdjacencyList(
- "entry",
- "exit",
- { { "entry", "left" }, { "entry", "right" }, { "left", "exit" }, { "right", "exit" } }));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.RemoveBlock(blks.Get("left"));
- esg.RemoveBlock(blks.Get("right"));
- esg.Finalize();
-
- ASSERT_FALSE(esg.IsValid());
- ASSERT_FALSE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 0u);
-}
-// Sibling branches are disconnected.
-TEST_F(ExecutionSubgraphTest, Exclusions) {
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- { { "entry", "a" },
- { "entry", "b" },
- { "entry", "c" },
- { "a", "exit" },
- { "b", "exit" },
- { "c", "exit" } }));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.RemoveBlock(blks.Get("a"));
- esg.RemoveBlock(blks.Get("c"));
- esg.Finalize();
- ASSERT_TRUE(esg.IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 3u);
- // Not present, no path through.
- ASSERT_TRUE(contents.find(blks.Get("a")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("c")) == contents.end());
-
- // present, path through.
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("b")) != contents.end());
-
- ArrayRef<const ExecutionSubgraph::ExcludedCohort> exclusions(esg.GetExcludedCohorts());
- ASSERT_EQ(exclusions.size(), 2u);
- std::unordered_set<const HBasicBlock*> exclude_a({ blks.Get("a") });
- std::unordered_set<const HBasicBlock*> exclude_c({ blks.Get("c") });
- ASSERT_TRUE(std::find_if(exclusions.cbegin(),
- exclusions.cend(),
- [&](const ExecutionSubgraph::ExcludedCohort& it) {
- return it.Blocks() == exclude_a;
- }) != exclusions.cend());
- ASSERT_TRUE(std::find_if(exclusions.cbegin(),
- exclusions.cend(),
- [&](const ExecutionSubgraph::ExcludedCohort& it) {
- return it.Blocks() == exclude_c;
- }) != exclusions.cend());
-}
-
-// Sibling branches are disconnected.
-// +- - - - - - - - - - - - - - - - - - - - - - +
-// ' remove_c '
-// ' '
-// ' +-----------+ '
-// ' | c_begin_2 | -------------------------+ '
-// ' +-----------+ | '
-// ' | '
-// +- - - - - - - - - - - - - - - - - - | '
-// ^ ' | '
-// | ' | '
-// | ' | '
-// + - - - - - -+ ' | '
-// ' remove_a ' ' | '
-// ' ' ' | '
-// ' +--------+ ' +-----------+ +---+' | '
-// ' | **a** | ' <-- | entry | --> | b |' | '
-// ' +--------+ ' +-----------+ +---+' | '
-// ' ' ' | '
-// + - - - - - -+ ' | '
-// | | | ' | '
-// | | | ' | '
-// | v | ' | '
-// | +- - - - - - - -+ | ' | '
-// | ' ' | ' | '
-// | ' +-----------+ ' | ' | '
-// | ' | c_begin_1 | ' | ' | '
-// | ' +-----------+ ' | ' | '
-// | ' | ' | ' | '
-// | ' | ' | ' | '
-// | ' | ' | ' | '
-// + - - - - - - - - -+ | + - - - | - - - - - - - + | ' | '
-// ' ' | + v ' | + | '
-// ' +---------+ | +-----------+ | | '
-// ' | c_end_2 | <-------+--------------- | **c_mid** | <-----------------+------+ '
-// ' +---------+ | +-----------+ | '
-// ' ' | + | ' | + '
-// + - - - - - - - - -+ | + - - - | - - - - - - - + | + - - - +
-// | | ' | ' |
-// | | ' | ' |
-// | | ' v ' |
-// | | ' +-----------+ ' |
-// | | ' | c_end_1 | ' |
-// | | ' +-----------+ ' |
-// | | ' ' |
-// | | +- - - - - - - -+ |
-// | | | |
-// | | | |
-// | | v v
-// | | +---------------------------------+
-// | +------------> | exit |
-// | +---------------------------------+
-// | ^
-// +------------------------------------+
-TEST_F(ExecutionSubgraphTest, ExclusionExtended) {
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- { { "entry", "a" },
- { "entry", "b" },
- { "entry", "c_begin_1" },
- { "entry", "c_begin_2" },
- { "c_begin_1", "c_mid" },
- { "c_begin_2", "c_mid" },
- { "c_mid", "c_end_1" },
- { "c_mid", "c_end_2" },
- { "a", "exit" },
- { "b", "exit" },
- { "c_end_1", "exit" },
- { "c_end_2", "exit" } }));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.RemoveBlock(blks.Get("a"));
- esg.RemoveBlock(blks.Get("c_mid"));
- esg.Finalize();
- ASSERT_TRUE(esg.IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 3u);
- // Not present, no path through.
- ASSERT_TRUE(contents.find(blks.Get("a")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("c_begin_1")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("c_begin_2")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("c_mid")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("c_end_1")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("c_end_2")) == contents.end());
-
- // present, path through.
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("b")) != contents.end());
-
- ArrayRef<const ExecutionSubgraph::ExcludedCohort> exclusions(esg.GetExcludedCohorts());
- ASSERT_EQ(exclusions.size(), 2u);
- BlockSet exclude_a({ blks.Get("a") });
- BlockSet exclude_c({ blks.Get("c_begin_1"),
- blks.Get("c_begin_2"),
- blks.Get("c_mid"),
- blks.Get("c_end_1"),
- blks.Get("c_end_2") });
- ASSERT_TRUE(std::find_if(exclusions.cbegin(),
- exclusions.cend(),
- [&](const ExecutionSubgraph::ExcludedCohort& it) {
- return it.Blocks() == exclude_a;
- }) != exclusions.cend());
- ASSERT_TRUE(
- std::find_if(
- exclusions.cbegin(), exclusions.cend(), [&](const ExecutionSubgraph::ExcludedCohort& it) {
- return it.Blocks() == exclude_c &&
- BlockSet({ blks.Get("c_begin_1"), blks.Get("c_begin_2") }) == it.EntryBlocks() &&
- BlockSet({ blks.Get("c_end_1"), blks.Get("c_end_2") }) == it.ExitBlocks();
- }) != exclusions.cend());
-}
-
-// ┌───────┐ ┌────────────┐
-// ┌─ │ right │ ◀── │ entry │
-// │ └───────┘ └────────────┘
-// │ │
-// │ │
-// │ ▼
-// │ ┌────────────┐
-// │ │ esc_top │
-// │ └────────────┘
-// │ │
-// │ │
-// │ ▼
-// │ ┌────────────┐
-// └──────────────▶ │ middle │ ─┐
-// └────────────┘ │
-// │ │
-// │ │
-// ▼ │
-// ┌────────────┐ │
-// │ esc_bottom │ │
-// └────────────┘ │
-// │ │
-// │ │
-// ▼ │
-// ┌────────────┐ │
-// │ exit │ ◀┘
-// └────────────┘
-TEST_F(ExecutionSubgraphTest, InAndOutEscape) {
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- { { "entry", "esc_top" },
- { "entry", "right" },
- { "esc_top", "middle" },
- { "right", "middle" },
- { "middle", "exit" },
- { "middle", "esc_bottom" },
- { "esc_bottom", "exit" } }));
-
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.RemoveBlock(blks.Get("esc_top"));
- esg.RemoveBlock(blks.Get("esc_bottom"));
- esg.Finalize();
-
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
- ASSERT_EQ(contents.size(), 0u);
- ASSERT_FALSE(esg.IsValid());
- ASSERT_FALSE(IsValidSubgraph(esg));
-
- ASSERT_EQ(contents.size(), 0u);
-}
-
-// Test with max number of successors and no removals.
-TEST_F(ExecutionSubgraphTest, BigNodes) {
- std::vector<std::string> mid_blocks;
- for (auto i : Range(ExecutionSubgraph::kMaxFilterableSuccessors)) {
- std::ostringstream oss;
- oss << "blk" << i;
- mid_blocks.push_back(oss.str().c_str());
- }
- ASSERT_EQ(mid_blocks.size(), ExecutionSubgraph::kMaxFilterableSuccessors);
- std::vector<AdjacencyListGraph::Edge> edges;
- for (const auto& mid : mid_blocks) {
- edges.emplace_back("entry", mid);
- edges.emplace_back(mid, "exit");
- }
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry", "exit", edges));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.Finalize();
- ASSERT_TRUE(esg.IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- for (const auto& mid : mid_blocks) {
- EXPECT_TRUE(contents.find(blks.Get(mid)) != contents.end()) << mid;
- }
- // + 2 for entry and exit nodes.
- ASSERT_EQ(contents.size(), ExecutionSubgraph::kMaxFilterableSuccessors + 2);
-}
-
-// Test with max number of successors and some removals.
-TEST_F(ExecutionSubgraphTest, BigNodesMissing) {
- std::vector<std::string> mid_blocks;
- for (auto i : Range(ExecutionSubgraph::kMaxFilterableSuccessors)) {
- std::ostringstream oss;
- oss << "blk" << i;
- mid_blocks.push_back(oss.str());
- }
- std::vector<AdjacencyListGraph::Edge> edges;
- for (const auto& mid : mid_blocks) {
- edges.emplace_back("entry", mid);
- edges.emplace_back(mid, "exit");
- }
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry", "exit", edges));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.RemoveBlock(blks.Get("blk2"));
- esg.RemoveBlock(blks.Get("blk4"));
- esg.Finalize();
- ASSERT_TRUE(esg.IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), ExecutionSubgraph::kMaxFilterableSuccessors + 2 - 2);
-
- // Not present, no path through.
- ASSERT_TRUE(contents.find(blks.Get("blk2")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("blk4")) == contents.end());
-}
-
-// Test with max number of successors and all successors removed.
-TEST_F(ExecutionSubgraphTest, BigNodesNoPath) {
- std::vector<std::string> mid_blocks;
- for (auto i : Range(ExecutionSubgraph::kMaxFilterableSuccessors)) {
- std::ostringstream oss;
- oss << "blk" << i;
- mid_blocks.push_back(oss.str());
- }
- std::vector<AdjacencyListGraph::Edge> edges;
- for (const auto& mid : mid_blocks) {
- edges.emplace_back("entry", mid);
- edges.emplace_back(mid, "exit");
- }
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry", "exit", edges));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- for (const auto& mid : mid_blocks) {
- esg.RemoveBlock(blks.Get(mid));
- }
- esg.Finalize();
- ASSERT_FALSE(esg.IsValid());
- ASSERT_FALSE(IsValidSubgraph(esg));
-}
-
-// Test with max number of successors
-TEST_F(ExecutionSubgraphTest, CanAnalyseBig) {
- // Make an absurdly huge and well connected graph. This should be pretty worst-case scenario.
- constexpr size_t kNumBlocks = ExecutionSubgraph::kMaxFilterableSuccessors + 1000;
- std::vector<std::string> mid_blocks;
- for (auto i : Range(kNumBlocks)) {
- std::ostringstream oss;
- oss << "blk" << i;
- mid_blocks.push_back(oss.str());
- }
- std::vector<AdjacencyListGraph::Edge> edges;
- for (auto cur : Range(kNumBlocks)) {
- for (auto nxt :
- Range(cur + 1,
- std::min(cur + ExecutionSubgraph::kMaxFilterableSuccessors + 1, kNumBlocks))) {
- edges.emplace_back(mid_blocks[cur], mid_blocks[nxt]);
- }
- }
- AdjacencyListGraph blks(SetupFromAdjacencyList(mid_blocks.front(), mid_blocks.back(), edges));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
-
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- esg.Finalize();
- ASSERT_TRUE(esg.IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), kNumBlocks);
-}
-
-// Test with many successors
-TEST_F(ExecutionSubgraphTest, CanAnalyseBig2) {
- // Make an absurdly huge and well connected graph. This should be pretty worst-case scenario.
- constexpr size_t kNumBlocks = ExecutionSubgraph::kMaxFilterableSuccessors + 1000;
- constexpr size_t kTestMaxSuccessors = ExecutionSubgraph::kMaxFilterableSuccessors - 1;
- std::vector<std::string> mid_blocks;
- for (auto i : Range(kNumBlocks)) {
- std::ostringstream oss;
- oss << "blk" << i;
- mid_blocks.push_back(oss.str());
- }
- std::vector<AdjacencyListGraph::Edge> edges;
- for (auto cur : Range(kNumBlocks)) {
- for (auto nxt : Range(cur + 1, std::min(cur + 1 + kTestMaxSuccessors, kNumBlocks))) {
- edges.emplace_back(mid_blocks[cur], mid_blocks[nxt]);
- }
- }
- edges.emplace_back(mid_blocks.front(), mid_blocks.back());
- AdjacencyListGraph blks(SetupFromAdjacencyList(mid_blocks.front(), mid_blocks.back(), edges));
- ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
- ExecutionSubgraph esg(graph_, GetScopedAllocator());
- constexpr size_t kToRemoveIdx = kNumBlocks / 2;
- HBasicBlock* remove_implicit = blks.Get(mid_blocks[kToRemoveIdx]);
- for (HBasicBlock* pred : remove_implicit->GetPredecessors()) {
- esg.RemoveBlock(pred);
- }
- esg.Finalize();
- EXPECT_TRUE(esg.IsValid());
- EXPECT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
- esg.ReachableBlocks().end());
-
- // Only entry and exit. The middle ones should eliminate everything else.
- EXPECT_EQ(contents.size(), 2u);
- EXPECT_TRUE(contents.find(remove_implicit) == contents.end());
- EXPECT_TRUE(contents.find(blks.Get(mid_blocks.front())) != contents.end());
- EXPECT_TRUE(contents.find(blks.Get(mid_blocks.back())) != contents.end());
-}
-
-// Test with too many successors
-TEST_F(ExecutionSubgraphTest, CanNotAnalyseBig) {
- std::vector<std::string> mid_blocks;
- for (auto i : Range(ExecutionSubgraph::kMaxFilterableSuccessors + 4)) {
- std::ostringstream oss;
- oss << "blk" << i;
- mid_blocks.push_back(oss.str());
- }
- std::vector<AdjacencyListGraph::Edge> edges;
- for (const auto& mid : mid_blocks) {
- edges.emplace_back("entry", mid);
- edges.emplace_back(mid, "exit");
- }
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry", "exit", edges));
- ASSERT_FALSE(ExecutionSubgraph::CanAnalyse(graph_));
-}
-} // namespace art
diff --git a/compiler/optimizing/execution_subgraph_test.h b/compiler/optimizing/execution_subgraph_test.h
deleted file mode 100644
index cee105a045..0000000000
--- a/compiler/optimizing/execution_subgraph_test.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_EXECUTION_SUBGRAPH_TEST_H_
-#define ART_COMPILER_OPTIMIZING_EXECUTION_SUBGRAPH_TEST_H_
-
-#include "android-base/macros.h"
-
-#include "base/macros.h"
-
-namespace art HIDDEN {
-
-class HGraph;
-class ExecutionSubgraph;
-
-class ExecutionSubgraphTestHelper {
- public:
- static bool CalculateValidity(HGraph* graph, const ExecutionSubgraph* subgraph);
-
- private:
- ExecutionSubgraphTestHelper() = delete;
-
- DISALLOW_COPY_AND_ASSIGN(ExecutionSubgraphTestHelper);
-};
-} // namespace art
-
-#endif // ART_COMPILER_OPTIMIZING_EXECUTION_SUBGRAPH_TEST_H_
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index e5aa5d30df..b7f7a0f550 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -545,13 +545,6 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("invoke_type") << "InvokePolymorphic";
}
- void VisitPredicatedInstanceFieldGet(HPredicatedInstanceFieldGet* iget) override {
- StartAttributeStream("field_name") <<
- iget->GetFieldInfo().GetDexFile().PrettyField(iget->GetFieldInfo().GetFieldIndex(),
- /* with type */ false);
- StartAttributeStream("field_type") << iget->GetFieldType();
- }
-
void VisitInstanceFieldGet(HInstanceFieldGet* iget) override {
StartAttributeStream("field_name") <<
iget->GetFieldInfo().GetDexFile().PrettyField(iget->GetFieldInfo().GetFieldIndex(),
@@ -564,8 +557,6 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
iset->GetFieldInfo().GetDexFile().PrettyField(iset->GetFieldInfo().GetFieldIndex(),
/* with type */ false);
StartAttributeStream("field_type") << iset->GetFieldType();
- StartAttributeStream("predicated")
- << std::boolalpha << iset->GetIsPredicatedSet() << std::noboolalpha;
StartAttributeStream("write_barrier_kind") << iset->GetWriteBarrierKind();
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 3328f3babe..94b201e876 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -115,7 +115,6 @@ class InstructionSimplifierVisitor final : public HGraphDelegateVisitor {
void VisitInvoke(HInvoke* invoke) override;
void VisitDeoptimize(HDeoptimize* deoptimize) override;
void VisitVecMul(HVecMul* instruction) override;
- void VisitPredicatedInstanceFieldGet(HPredicatedInstanceFieldGet* instruction) override;
void SimplifyBoxUnbox(HInvoke* instruction, ArtField* field, DataType::Type type);
void SimplifySystemArrayCopy(HInvoke* invoke);
void SimplifyStringEquals(HInvoke* invoke);
@@ -950,67 +949,6 @@ static HInstruction* AllowInMinMax(IfCondition cmp,
return nullptr;
}
-// TODO This should really be done by LSE itself since there is significantly
-// more information available there.
-void InstructionSimplifierVisitor::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* pred_get) {
- HInstruction* target = pred_get->GetTarget();
- HInstruction* default_val = pred_get->GetDefaultValue();
- if (target->IsNullConstant()) {
- pred_get->ReplaceWith(default_val);
- pred_get->GetBlock()->RemoveInstruction(pred_get);
- RecordSimplification();
- return;
- } else if (!target->CanBeNull()) {
- HInstruction* replace_with = new (GetGraph()->GetAllocator())
- HInstanceFieldGet(pred_get->GetTarget(),
- pred_get->GetFieldInfo().GetField(),
- pred_get->GetFieldType(),
- pred_get->GetFieldOffset(),
- pred_get->IsVolatile(),
- pred_get->GetFieldInfo().GetFieldIndex(),
- pred_get->GetFieldInfo().GetDeclaringClassDefIndex(),
- pred_get->GetFieldInfo().GetDexFile(),
- pred_get->GetDexPc());
- if (pred_get->GetType() == DataType::Type::kReference) {
- replace_with->SetReferenceTypeInfoIfValid(pred_get->GetReferenceTypeInfo());
- }
- pred_get->GetBlock()->InsertInstructionBefore(replace_with, pred_get);
- pred_get->ReplaceWith(replace_with);
- pred_get->GetBlock()->RemoveInstruction(pred_get);
- RecordSimplification();
- return;
- }
- if (!target->IsPhi() || !default_val->IsPhi() || default_val->GetBlock() != target->GetBlock()) {
- // The iget has already been reduced. We know the target or the phi
- // selection will differ between the target and default.
- return;
- }
- DCHECK_EQ(default_val->InputCount(), target->InputCount());
- // In the same block both phis only one non-null we can remove the phi from default_val.
- HInstruction* single_value = nullptr;
- auto inputs = target->GetInputs();
- for (auto [input, idx] : ZipCount(MakeIterationRange(inputs))) {
- if (input->CanBeNull()) {
- if (single_value == nullptr) {
- single_value = default_val->InputAt(idx);
- } else if (single_value != default_val->InputAt(idx) &&
- !single_value->Equals(default_val->InputAt(idx))) {
- // Multiple values are associated with potential nulls, can't combine.
- return;
- }
- }
- }
- DCHECK(single_value != nullptr) << "All target values are non-null but the phi as a whole still"
- << " can be null? This should not be possible." << std::endl
- << pred_get->DumpWithArgs();
- if (single_value->StrictlyDominates(pred_get)) {
- // Combine all the maybe null values into one.
- pred_get->ReplaceInput(single_value, 0);
- RecordSimplification();
- }
-}
-
void InstructionSimplifierVisitor::VisitSelect(HSelect* select) {
HInstruction* replace_with = nullptr;
HInstruction* condition = select->GetCondition();
@@ -1234,9 +1172,6 @@ static inline bool TryReplaceFieldOrArrayGetType(HInstruction* maybe_get, DataTy
if (maybe_get->IsInstanceFieldGet()) {
maybe_get->AsInstanceFieldGet()->SetType(new_type);
return true;
- } else if (maybe_get->IsPredicatedInstanceFieldGet()) {
- maybe_get->AsPredicatedInstanceFieldGet()->SetType(new_type);
- return true;
} else if (maybe_get->IsStaticFieldGet()) {
maybe_get->AsStaticFieldGet()->SetType(new_type);
return true;
diff --git a/compiler/optimizing/instruction_simplifier_test.cc b/compiler/optimizing/instruction_simplifier_test.cc
index 966f5b91cf..9f47995cf5 100644
--- a/compiler/optimizing/instruction_simplifier_test.cc
+++ b/compiler/optimizing/instruction_simplifier_test.cc
@@ -134,260 +134,6 @@ class InstanceOfInstructionSimplifierTestGroup
};
// // ENTRY
-// switch (param) {
-// case 1:
-// obj1 = param2; break;
-// case 2:
-// obj1 = param3; break;
-// default:
-// obj2 = new Obj();
-// }
-// val_phi = PHI[3,4,10]
-// target_phi = PHI[param2, param3, obj2]
-// return PredFieldGet[val_phi, target_phi] => PredFieldGet[val_phi, target_phi]
-TEST_F(InstructionSimplifierTest, SimplifyPredicatedFieldGetNoMerge) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "case1"},
- {"entry", "case2"},
- {"entry", "case3"},
- {"case1", "breturn"},
- {"case2", "breturn"},
- {"case3", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(case1);
- GET_BLOCK(case2);
- GET_BLOCK(case3);
- GET_BLOCK(breturn);
-#undef GET_BLOCK
-
- HInstruction* bool_value = MakeParam(DataType::Type::kInt32);
- HInstruction* obj1_param = MakeParam(DataType::Type::kReference);
- HInstruction* obj2_param = MakeParam(DataType::Type::kReference);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c4 = graph_->GetIntConstant(4);
- HInstruction* c10 = graph_->GetIntConstant(10);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(switch_inst);
- ManuallyBuildEnvFor(cls, {});
-
- HInstruction* goto_c1 = new (GetAllocator()) HGoto();
- case1->AddInstruction(goto_c1);
-
- HInstruction* goto_c2 = new (GetAllocator()) HGoto();
- case2->AddInstruction(goto_c2);
-
- HInstruction* obj3 = MakeNewInstance(cls);
- HInstruction* goto_c3 = new (GetAllocator()) HGoto();
- case3->AddInstruction(obj3);
- case3->AddInstruction(goto_c3);
-
- HPhi* val_phi = MakePhi({c3, c4, c10});
- HPhi* obj_phi = MakePhi({obj1_param, obj2_param, obj3});
- HPredicatedInstanceFieldGet* read_end =
- new (GetAllocator()) HPredicatedInstanceFieldGet(obj_phi,
- nullptr,
- val_phi,
- val_phi->GetType(),
- MemberOffset(10),
- false,
- 42,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
- breturn->AddPhi(val_phi);
- breturn->AddPhi(obj_phi);
- breturn->AddInstruction(read_end);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformSimplification(blks);
-
- EXPECT_INS_RETAINED(read_end);
-
- EXPECT_INS_EQ(read_end->GetTarget(), obj_phi);
- EXPECT_INS_EQ(read_end->GetDefaultValue(), val_phi);
-}
-
-// // ENTRY
-// switch (param) {
-// case 1:
-// obj1 = param2; break;
-// case 2:
-// obj1 = param3; break;
-// default:
-// obj2 = new Obj();
-// }
-// val_phi = PHI[3,3,10]
-// target_phi = PHI[param2, param3, obj2]
-// return PredFieldGet[val_phi, target_phi] => PredFieldGet[3, target_phi]
-TEST_F(InstructionSimplifierTest, SimplifyPredicatedFieldGetMerge) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "case1"},
- {"entry", "case2"},
- {"entry", "case3"},
- {"case1", "breturn"},
- {"case2", "breturn"},
- {"case3", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(case1);
- GET_BLOCK(case2);
- GET_BLOCK(case3);
- GET_BLOCK(breturn);
-#undef GET_BLOCK
-
- HInstruction* bool_value = MakeParam(DataType::Type::kInt32);
- HInstruction* obj1_param = MakeParam(DataType::Type::kReference);
- HInstruction* obj2_param = MakeParam(DataType::Type::kReference);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c10 = graph_->GetIntConstant(10);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(switch_inst);
- ManuallyBuildEnvFor(cls, {});
-
- HInstruction* goto_c1 = new (GetAllocator()) HGoto();
- case1->AddInstruction(goto_c1);
-
- HInstruction* goto_c2 = new (GetAllocator()) HGoto();
- case2->AddInstruction(goto_c2);
-
- HInstruction* obj3 = MakeNewInstance(cls);
- HInstruction* goto_c3 = new (GetAllocator()) HGoto();
- case3->AddInstruction(obj3);
- case3->AddInstruction(goto_c3);
-
- HPhi* val_phi = MakePhi({c3, c3, c10});
- HPhi* obj_phi = MakePhi({obj1_param, obj2_param, obj3});
- HPredicatedInstanceFieldGet* read_end =
- new (GetAllocator()) HPredicatedInstanceFieldGet(obj_phi,
- nullptr,
- val_phi,
- val_phi->GetType(),
- MemberOffset(10),
- false,
- 42,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
- breturn->AddPhi(val_phi);
- breturn->AddPhi(obj_phi);
- breturn->AddInstruction(read_end);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformSimplification(blks);
-
- EXPECT_FALSE(obj3->CanBeNull());
- EXPECT_INS_RETAINED(read_end);
-
- EXPECT_INS_EQ(read_end->GetTarget(), obj_phi);
- EXPECT_INS_EQ(read_end->GetDefaultValue(), c3);
-}
-
-// // ENTRY
-// if (param) {
-// obj1 = new Obj();
-// } else {
-// obj2 = new Obj();
-// }
-// val_phi = PHI[3,10]
-// target_phi = PHI[obj1, obj2]
-// return PredFieldGet[val_phi, target_phi] => FieldGet[target_phi]
-TEST_F(InstructionSimplifierTest, SimplifyPredicatedFieldGetNoNull) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(left);
- GET_BLOCK(right);
- GET_BLOCK(breturn);
-#undef GET_BLOCK
-
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c10 = graph_->GetIntConstant(10);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
-
- HInstruction* obj1 = MakeNewInstance(cls);
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(obj1);
- left->AddInstruction(goto_left);
-
- HInstruction* obj2 = MakeNewInstance(cls);
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(obj2);
- right->AddInstruction(goto_right);
-
- HPhi* val_phi = MakePhi({c3, c10});
- HPhi* obj_phi = MakePhi({obj1, obj2});
- obj_phi->SetCanBeNull(false);
- HInstruction* read_end = new (GetAllocator()) HPredicatedInstanceFieldGet(obj_phi,
- nullptr,
- val_phi,
- val_phi->GetType(),
- MemberOffset(10),
- false,
- 42,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
- breturn->AddPhi(val_phi);
- breturn->AddPhi(obj_phi);
- breturn->AddInstruction(read_end);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformSimplification(blks);
-
- EXPECT_FALSE(obj1->CanBeNull());
- EXPECT_FALSE(obj2->CanBeNull());
- EXPECT_INS_REMOVED(read_end);
-
- HInstanceFieldGet* ifget = FindSingleInstruction<HInstanceFieldGet>(graph_, breturn);
- ASSERT_NE(ifget, nullptr);
- EXPECT_INS_EQ(ifget->InputAt(0), obj_phi);
-}
-
-// // ENTRY
// obj = new Obj();
// // Make sure this graph isn't broken
// if (obj instanceof <other>) {
diff --git a/compiler/optimizing/load_store_analysis.cc b/compiler/optimizing/load_store_analysis.cc
index 75000c8b91..474c3bd92f 100644
--- a/compiler/optimizing/load_store_analysis.cc
+++ b/compiler/optimizing/load_store_analysis.cc
@@ -90,43 +90,6 @@ static bool CanBinaryOpsAlias(const HBinaryOperation* idx1,
return CanIntegerRangesOverlap(l1, h1, l2, h2);
}
-// Make sure we mark any writes/potential writes to heap-locations within partially
-// escaped values as escaping.
-void ReferenceInfo::PrunePartialEscapeWrites() {
- DCHECK(subgraph_ != nullptr);
- if (!subgraph_->IsValid()) {
- // All paths escape.
- return;
- }
- HGraph* graph = reference_->GetBlock()->GetGraph();
- ArenaBitVector additional_exclusions(
- allocator_, graph->GetBlocks().size(), false, kArenaAllocLSA);
- for (const HUseListNode<HInstruction*>& use : reference_->GetUses()) {
- const HInstruction* user = use.GetUser();
- if (!additional_exclusions.IsBitSet(user->GetBlock()->GetBlockId()) &&
- subgraph_->ContainsBlock(user->GetBlock()) &&
- (user->IsUnresolvedInstanceFieldSet() || user->IsUnresolvedStaticFieldSet() ||
- user->IsInstanceFieldSet() || user->IsStaticFieldSet() || user->IsArraySet()) &&
- (reference_ == user->InputAt(0)) &&
- std::any_of(subgraph_->UnreachableBlocks().begin(),
- subgraph_->UnreachableBlocks().end(),
- [&](const HBasicBlock* excluded) -> bool {
- return reference_->GetBlock()->GetGraph()->PathBetween(excluded,
- user->GetBlock());
- })) {
- // This object had memory written to it somewhere, if it escaped along
- // some paths prior to the current block this write also counts as an
- // escape.
- additional_exclusions.SetBit(user->GetBlock()->GetBlockId());
- }
- }
- if (UNLIKELY(additional_exclusions.IsAnyBitSet())) {
- for (uint32_t exc : additional_exclusions.Indexes()) {
- subgraph_->RemoveBlock(graph->GetBlocks()[exc]);
- }
- }
-}
-
bool HeapLocationCollector::InstructionEligibleForLSERemoval(HInstruction* inst) const {
if (inst->IsNewInstance()) {
return !inst->AsNewInstance()->NeedsChecks();
@@ -148,37 +111,6 @@ bool HeapLocationCollector::InstructionEligibleForLSERemoval(HInstruction* inst)
}
}
-void ReferenceInfo::CollectPartialEscapes(HGraph* graph) {
- ScopedArenaAllocator saa(graph->GetArenaStack());
- ArenaBitVector seen_instructions(&saa, graph->GetCurrentInstructionId(), false, kArenaAllocLSA);
- // Get regular escapes.
- ScopedArenaVector<HInstruction*> additional_escape_vectors(saa.Adapter(kArenaAllocLSA));
- LambdaEscapeVisitor scan_instructions([&](HInstruction* escape) -> bool {
- HandleEscape(escape);
- // LSE can't track heap-locations through Phi and Select instructions so we
- // need to assume all escapes from these are escapes for the base reference.
- if ((escape->IsPhi() || escape->IsSelect()) && !seen_instructions.IsBitSet(escape->GetId())) {
- seen_instructions.SetBit(escape->GetId());
- additional_escape_vectors.push_back(escape);
- }
- return true;
- });
- additional_escape_vectors.push_back(reference_);
- while (!additional_escape_vectors.empty()) {
- HInstruction* ref = additional_escape_vectors.back();
- additional_escape_vectors.pop_back();
- DCHECK(ref == reference_ || ref->IsPhi() || ref->IsSelect()) << *ref;
- VisitEscapes(ref, scan_instructions);
- }
-
- // Mark irreducible loop headers as escaping since they cannot be tracked through.
- for (HBasicBlock* blk : graph->GetActiveBlocks()) {
- if (blk->IsLoopHeader() && blk->GetLoopInformation()->IsIrreducible()) {
- HandleEscape(blk);
- }
- }
-}
-
void HeapLocationCollector::DumpReferenceStats(OptimizingCompilerStats* stats) {
if (stats == nullptr) {
return;
@@ -196,14 +128,6 @@ void HeapLocationCollector::DumpReferenceStats(OptimizingCompilerStats* stats) {
MaybeRecordStat(stats, MethodCompilationStat::kFullLSEPossible);
}
}
- // TODO This is an estimate of the number of allocations we will be able
- // to (partially) remove. As additional work is done this can be refined.
- if (ri->IsPartialSingleton() && instruction->IsNewInstance() &&
- ri->GetNoEscapeSubgraph()->ContainsBlock(instruction->GetBlock()) &&
- !ri->GetNoEscapeSubgraph()->GetExcludedCohorts().empty() &&
- InstructionEligibleForLSERemoval(instruction)) {
- MaybeRecordStat(stats, MethodCompilationStat::kPartialLSEPossible);
- }
}
}
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index ee425454a0..4a630ddf8f 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -25,65 +25,26 @@
#include "base/scoped_arena_containers.h"
#include "base/stl_util.h"
#include "escape.h"
-#include "execution_subgraph.h"
#include "nodes.h"
#include "optimizing/optimizing_compiler_stats.h"
namespace art HIDDEN {
-enum class LoadStoreAnalysisType {
- kBasic,
- kNoPredicatedInstructions,
- kFull,
-};
-
// A ReferenceInfo contains additional info about a reference such as
// whether it's a singleton, returned, etc.
class ReferenceInfo : public DeletableArenaObject<kArenaAllocLSA> {
public:
- ReferenceInfo(HInstruction* reference,
- ScopedArenaAllocator* allocator,
- size_t pos,
- LoadStoreAnalysisType elimination_type)
+ ReferenceInfo(HInstruction* reference, size_t pos)
: reference_(reference),
position_(pos),
is_singleton_(true),
is_singleton_and_not_returned_(true),
- is_singleton_and_not_deopt_visible_(true),
- allocator_(allocator),
- subgraph_(nullptr) {
- // TODO We can do this in one pass.
- // TODO NewArray is possible but will need to get a handle on how to deal with the dynamic loads
- // for now just ignore it.
- bool can_be_partial = elimination_type != LoadStoreAnalysisType::kBasic &&
- (/* reference_->IsNewArray() || */ reference_->IsNewInstance());
- if (can_be_partial) {
- subgraph_.reset(
- new (allocator) ExecutionSubgraph(reference->GetBlock()->GetGraph(), allocator));
- CollectPartialEscapes(reference_->GetBlock()->GetGraph());
- }
+ is_singleton_and_not_deopt_visible_(true) {
CalculateEscape(reference_,
nullptr,
&is_singleton_,
&is_singleton_and_not_returned_,
&is_singleton_and_not_deopt_visible_);
- if (can_be_partial) {
- if (elimination_type == LoadStoreAnalysisType::kNoPredicatedInstructions) {
- // This is to mark writes to partially escaped values as also part of the escaped subset.
- // TODO We can avoid this if we have a 'ConditionalWrite' instruction. Will require testing
- // to see if the additional branches are worth it.
- PrunePartialEscapeWrites();
- }
- DCHECK(subgraph_ != nullptr);
- subgraph_->Finalize();
- } else {
- DCHECK(subgraph_ == nullptr);
- }
- }
-
- const ExecutionSubgraph* GetNoEscapeSubgraph() const {
- DCHECK(IsPartialSingleton());
- return subgraph_.get();
}
HInstruction* GetReference() const {
@@ -101,16 +62,6 @@ class ReferenceInfo : public DeletableArenaObject<kArenaAllocLSA> {
return is_singleton_;
}
- // This is a singleton and there are paths that don't escape the method
- bool IsPartialSingleton() const {
- auto ref = GetReference();
- // TODO NewArray is possible but will need to get a handle on how to deal with the dynamic loads
- // for now just ignore it.
- return (/* ref->IsNewArray() || */ ref->IsNewInstance()) &&
- subgraph_ != nullptr &&
- subgraph_->IsValid();
- }
-
// Returns true if reference_ is a singleton and not returned to the caller or
// used as an environment local of an HDeoptimize instruction.
// The allocation and stores into reference_ may be eliminated for such cases.
@@ -126,19 +77,6 @@ class ReferenceInfo : public DeletableArenaObject<kArenaAllocLSA> {
}
private:
- void CollectPartialEscapes(HGraph* graph);
- void HandleEscape(HBasicBlock* escape) {
- DCHECK(subgraph_ != nullptr);
- subgraph_->RemoveBlock(escape);
- }
- void HandleEscape(HInstruction* escape) {
- HandleEscape(escape->GetBlock());
- }
-
- // Make sure we mark any writes/potential writes to heap-locations within partially
- // escaped values as escaping.
- void PrunePartialEscapeWrites();
-
HInstruction* const reference_;
const size_t position_; // position in HeapLocationCollector's ref_info_array_.
@@ -149,10 +87,6 @@ class ReferenceInfo : public DeletableArenaObject<kArenaAllocLSA> {
// Is singleton and not used as an environment local of HDeoptimize.
bool is_singleton_and_not_deopt_visible_;
- ScopedArenaAllocator* allocator_;
-
- std::unique_ptr<ExecutionSubgraph> subgraph_;
-
DISALLOW_COPY_AND_ASSIGN(ReferenceInfo);
};
@@ -249,16 +183,13 @@ class HeapLocationCollector : public HGraphVisitor {
// aliasing matrix of 8 heap locations.
static constexpr uint32_t kInitialAliasingMatrixBitVectorSize = 32;
- HeapLocationCollector(HGraph* graph,
- ScopedArenaAllocator* allocator,
- LoadStoreAnalysisType lse_type)
+ HeapLocationCollector(HGraph* graph, ScopedArenaAllocator* allocator)
: HGraphVisitor(graph),
allocator_(allocator),
ref_info_array_(allocator->Adapter(kArenaAllocLSA)),
heap_locations_(allocator->Adapter(kArenaAllocLSA)),
aliasing_matrix_(allocator, kInitialAliasingMatrixBitVectorSize, true, kArenaAllocLSA),
- has_heap_stores_(false),
- lse_type_(lse_type) {
+ has_heap_stores_(false) {
aliasing_matrix_.ClearAllBits();
}
@@ -272,12 +203,6 @@ class HeapLocationCollector : public HGraphVisitor {
ref_info_array_.clear();
}
- size_t CountPartialSingletons() const {
- return std::count_if(ref_info_array_.begin(),
- ref_info_array_.end(),
- [](ReferenceInfo* ri) { return ri->IsPartialSingleton(); });
- }
-
size_t GetNumberOfHeapLocations() const {
return heap_locations_.size();
}
@@ -507,7 +432,7 @@ class HeapLocationCollector : public HGraphVisitor {
ReferenceInfo* ref_info = FindReferenceInfoOf(instruction);
if (ref_info == nullptr) {
size_t pos = ref_info_array_.size();
- ref_info = new (allocator_) ReferenceInfo(instruction, allocator_, pos, lse_type_);
+ ref_info = new (allocator_) ReferenceInfo(instruction, pos);
ref_info_array_.push_back(ref_info);
}
return ref_info;
@@ -566,10 +491,6 @@ class HeapLocationCollector : public HGraphVisitor {
is_vec_op);
}
- void VisitPredicatedInstanceFieldGet(HPredicatedInstanceFieldGet* instruction) override {
- VisitFieldAccess(instruction->GetTarget(), instruction->GetFieldInfo());
- CreateReferenceInfoForReferenceType(instruction);
- }
void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override {
VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
CreateReferenceInfoForReferenceType(instruction);
@@ -645,25 +566,16 @@ class HeapLocationCollector : public HGraphVisitor {
ArenaBitVector aliasing_matrix_; // aliasing info between each pair of locations.
bool has_heap_stores_; // If there is no heap stores, LSE acts as GVN with better
// alias analysis and won't be as effective.
- LoadStoreAnalysisType lse_type_;
DISALLOW_COPY_AND_ASSIGN(HeapLocationCollector);
};
class LoadStoreAnalysis {
public:
- // for_elimination controls whether we should keep track of escapes at a per-block level for
- // partial LSE.
explicit LoadStoreAnalysis(HGraph* graph,
OptimizingCompilerStats* stats,
- ScopedArenaAllocator* local_allocator,
- LoadStoreAnalysisType lse_type)
- : graph_(graph),
- stats_(stats),
- heap_location_collector_(
- graph,
- local_allocator,
- ExecutionSubgraph::CanAnalyse(graph_) ? lse_type : LoadStoreAnalysisType::kBasic) {}
+ ScopedArenaAllocator* local_allocator)
+ : graph_(graph), stats_(stats), heap_location_collector_(graph, local_allocator) {}
const HeapLocationCollector& GetHeapLocationCollector() const {
return heap_location_collector_;
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index 8c6812f184..947bf04923 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -27,8 +27,6 @@
#include "dex/dex_file_types.h"
#include "dex/method_reference.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
-#include "execution_subgraph.h"
-#include "execution_subgraph_test.h"
#include "gtest/gtest.h"
#include "handle.h"
#include "handle_scope.h"
@@ -52,13 +50,6 @@ class LoadStoreAnalysisTest : public CommonCompilerTest, public OptimizingUnitTe
return AdjacencyListGraph(graph_, GetAllocator(), entry_name, exit_name, adj);
}
- bool IsValidSubgraph(const ExecutionSubgraph* esg) {
- return ExecutionSubgraphTestHelper::CalculateValidity(graph_, esg);
- }
-
- bool IsValidSubgraph(const ExecutionSubgraph& esg) {
- return ExecutionSubgraphTestHelper::CalculateValidity(graph_, &esg);
- }
void CheckReachability(const AdjacencyListGraph& adj,
const std::vector<AdjacencyListGraph::Edge>& reach);
};
@@ -102,7 +93,7 @@ TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) {
// Test HeapLocationCollector initialization.
// Should be no heap locations, no operations on the heap.
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- HeapLocationCollector heap_location_collector(graph_, &allocator, LoadStoreAnalysisType::kFull);
+ HeapLocationCollector heap_location_collector(graph_, &allocator);
ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 0U);
ASSERT_FALSE(heap_location_collector.HasHeapStores());
@@ -201,7 +192,7 @@ TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) {
// Test HeapLocationCollector initialization.
// Should be no heap locations, no operations on the heap.
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- HeapLocationCollector heap_location_collector(graph_, &allocator, LoadStoreAnalysisType::kFull);
+ HeapLocationCollector heap_location_collector(graph_, &allocator);
ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 0U);
ASSERT_FALSE(heap_location_collector.HasHeapStores());
@@ -283,7 +274,7 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexAliasingTest) {
body->AddInstruction(new (GetAllocator()) HReturnVoid());
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kBasic);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
@@ -451,7 +442,7 @@ TEST_F(LoadStoreAnalysisTest, ArrayAliasingTest) {
entry->AddInstruction(vstore_i_add6_vlen2);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kBasic);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
@@ -611,7 +602,7 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) {
entry->AddInstruction(arr_set_8);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kBasic);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
@@ -702,7 +693,7 @@ TEST_F(LoadStoreAnalysisTest, TestHuntOriginalRef) {
entry->AddInstruction(array_get4);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- HeapLocationCollector heap_location_collector(graph_, &allocator, LoadStoreAnalysisType::kFull);
+ HeapLocationCollector heap_location_collector(graph_, &allocator);
heap_location_collector.VisitBasicBlock(entry);
// Test that the HeapLocationCollector should be able to tell
@@ -817,756 +808,6 @@ TEST_F(LoadStoreAnalysisTest, ReachabilityTest3) {
});
}
-static bool AreExclusionsIndependent(HGraph* graph, const ExecutionSubgraph* esg) {
- auto excluded = esg->GetExcludedCohorts();
- if (excluded.size() < 2) {
- return true;
- }
- for (auto first = excluded.begin(); first != excluded.end(); ++first) {
- for (auto second = excluded.begin(); second != excluded.end(); ++second) {
- if (first == second) {
- continue;
- }
- for (const HBasicBlock* entry : first->EntryBlocks()) {
- for (const HBasicBlock* exit : second->ExitBlocks()) {
- if (graph->PathBetween(exit, entry)) {
- return false;
- }
- }
- }
- }
- }
- return true;
-}
-
-// // ENTRY
-// obj = new Obj();
-// if (parameter_value) {
-// // LEFT
-// call_func(obj);
-// } else {
-// // RIGHT
-// obj.field = 1;
-// }
-// // EXIT
-// obj.field;
-TEST_F(LoadStoreAnalysisTest, PartialEscape) {
- CreateGraph();
- AdjacencyListGraph blks(SetupFromAdjacencyList(
- "entry",
- "exit",
- { { "entry", "left" }, { "entry", "right" }, { "left", "exit" }, { "right", "exit" } }));
- HBasicBlock* entry = blks.Get("entry");
- HBasicBlock* left = blks.Get("left");
- HBasicBlock* right = blks.Get("right");
- HBasicBlock* exit = blks.Get("exit");
-
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
- HInstruction* c0 = graph_->GetIntConstant(0);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(if_inst);
-
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone,
- !graph_->IsDebuggable());
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->SetRawInputAt(0, new_inst);
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
-
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c0,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* read_final = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- exit->AddInstruction(read_final);
-
- ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
- lsa.Run();
-
- const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
- ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
- ASSERT_TRUE(info->IsPartialSingleton());
- const ExecutionSubgraph* esg = info->GetNoEscapeSubgraph();
-
- ASSERT_TRUE(esg->IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- ASSERT_TRUE(AreExclusionsIndependent(graph_, esg));
- std::unordered_set<const HBasicBlock*> contents(esg->ReachableBlocks().begin(),
- esg->ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 3u);
- ASSERT_TRUE(contents.find(blks.Get("left")) == contents.end());
-
- ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
-}
-
-// // ENTRY
-// obj = new Obj();
-// if (parameter_value) {
-// // LEFT
-// call_func(obj);
-// } else {
-// // RIGHT
-// obj.field = 1;
-// }
-// // EXIT
-// obj.field2;
-TEST_F(LoadStoreAnalysisTest, PartialEscape2) {
- CreateGraph();
- AdjacencyListGraph blks(SetupFromAdjacencyList(
- "entry",
- "exit",
- { { "entry", "left" }, { "entry", "right" }, { "left", "exit" }, { "right", "exit" } }));
- HBasicBlock* entry = blks.Get("entry");
- HBasicBlock* left = blks.Get("left");
- HBasicBlock* right = blks.Get("right");
- HBasicBlock* exit = blks.Get("exit");
-
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
- HInstruction* c0 = graph_->GetIntConstant(0);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(if_inst);
-
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone,
- !graph_->IsDebuggable());
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->SetRawInputAt(0, new_inst);
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
-
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c0,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* read_final = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(16),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- exit->AddInstruction(read_final);
-
- ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
- lsa.Run();
-
- const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
- ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
- ASSERT_TRUE(info->IsPartialSingleton());
- const ExecutionSubgraph* esg = info->GetNoEscapeSubgraph();
-
- ASSERT_TRUE(esg->IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- ASSERT_TRUE(AreExclusionsIndependent(graph_, esg));
- std::unordered_set<const HBasicBlock*> contents(esg->ReachableBlocks().begin(),
- esg->ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 3u);
- ASSERT_TRUE(contents.find(blks.Get("left")) == contents.end());
-
- ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
-}
-
-// // ENTRY
-// obj = new Obj();
-// obj.field = 10;
-// if (parameter_value) {
-// // LEFT
-// call_func(obj);
-// } else {
-// // RIGHT
-// obj.field = 20;
-// }
-// // EXIT
-// obj.field;
-TEST_F(LoadStoreAnalysisTest, PartialEscape3) {
- CreateGraph();
- AdjacencyListGraph blks(SetupFromAdjacencyList(
- "entry",
- "exit",
- { { "entry", "left" }, { "entry", "right" }, { "left", "exit" }, { "right", "exit" } }));
- HBasicBlock* entry = blks.Get("entry");
- HBasicBlock* left = blks.Get("left");
- HBasicBlock* right = blks.Get("right");
- HBasicBlock* exit = blks.Get("exit");
-
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
- HInstruction* c10 = graph_->GetIntConstant(10);
- HInstruction* c20 = graph_->GetIntConstant(20);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
-
- HInstruction* write_entry = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c10,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_entry);
- entry->AddInstruction(if_inst);
-
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone,
- !graph_->IsDebuggable());
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->SetRawInputAt(0, new_inst);
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
-
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c20,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* read_final = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- exit->AddInstruction(read_final);
-
- ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
- lsa.Run();
-
- const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
- ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
- ASSERT_TRUE(info->IsPartialSingleton());
- const ExecutionSubgraph* esg = info->GetNoEscapeSubgraph();
-
- ASSERT_TRUE(esg->IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- ASSERT_TRUE(AreExclusionsIndependent(graph_, esg));
- std::unordered_set<const HBasicBlock*> contents(esg->ReachableBlocks().begin(),
- esg->ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 3u);
- ASSERT_TRUE(contents.find(blks.Get("left")) == contents.end());
-
- ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
-}
-
-// For simplicity Partial LSE considers check-casts to escape. It means we don't
-// need to worry about inserting throws.
-// // ENTRY
-// obj = new Obj();
-// obj.field = 10;
-// if (parameter_value) {
-// // LEFT
-// (Foo)obj;
-// } else {
-// // RIGHT
-// obj.field = 20;
-// }
-// // EXIT
-// obj.field;
-TEST_F(LoadStoreAnalysisTest, PartialEscape4) {
- CreateGraph();
- AdjacencyListGraph blks(SetupFromAdjacencyList(
- "entry",
- "exit",
- { { "entry", "left" }, { "entry", "right" }, { "left", "exit" }, { "right", "exit" } }));
- HBasicBlock* entry = blks.Get("entry");
- HBasicBlock* left = blks.Get("left");
- HBasicBlock* right = blks.Get("right");
- HBasicBlock* exit = blks.Get("exit");
-
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
- HInstruction* c10 = graph_->GetIntConstant(10);
- HInstruction* c20 = graph_->GetIntConstant(20);
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
-
- HInstruction* write_entry = MakeIFieldSet(new_inst, c10, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_entry);
- entry->AddInstruction(if_inst);
-
- ScopedNullHandle<mirror::Class> null_klass_;
- HInstruction* cls2 = MakeClassLoad();
- HInstruction* check_cast = new (GetAllocator()) HCheckCast(
- new_inst, cls2, TypeCheckKind::kExactCheck, null_klass_, 0, GetAllocator(), nullptr, nullptr);
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(cls2);
- left->AddInstruction(check_cast);
- left->AddInstruction(goto_left);
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c20, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* read_final = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- exit->AddInstruction(read_final);
-
- ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
- lsa.Run();
-
- const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
- ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
- ASSERT_TRUE(info->IsPartialSingleton());
- const ExecutionSubgraph* esg = info->GetNoEscapeSubgraph();
-
- ASSERT_TRUE(esg->IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- ASSERT_TRUE(AreExclusionsIndependent(graph_, esg));
- std::unordered_set<const HBasicBlock*> contents(esg->ReachableBlocks().begin(),
- esg->ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 3u);
- ASSERT_TRUE(contents.find(blks.Get("left")) == contents.end());
-
- ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
-}
-
-// For simplicity Partial LSE considers instance-ofs with bitvectors to escape.
-// // ENTRY
-// obj = new Obj();
-// obj.field = 10;
-// if (parameter_value) {
-// // LEFT
-// obj instanceof /*bitvector*/ Foo;
-// } else {
-// // RIGHT
-// obj.field = 20;
-// }
-// // EXIT
-// obj.field;
-TEST_F(LoadStoreAnalysisTest, PartialEscape5) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList(
- "entry",
- "exit",
- { { "entry", "left" }, { "entry", "right" }, { "left", "exit" }, { "right", "exit" } }));
- HBasicBlock* entry = blks.Get("entry");
- HBasicBlock* left = blks.Get("left");
- HBasicBlock* right = blks.Get("right");
- HBasicBlock* exit = blks.Get("exit");
-
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
- HInstruction* c10 = graph_->GetIntConstant(10);
- HInstruction* c20 = graph_->GetIntConstant(20);
- HIntConstant* bs1 = graph_->GetIntConstant(0xffff);
- HIntConstant* bs2 = graph_->GetIntConstant(0x00ff);
- HInstruction* cls = MakeClassLoad();
- HInstruction* null_const = graph_->GetNullConstant();
- HInstruction* new_inst = MakeNewInstance(cls);
-
- HInstruction* write_entry = MakeIFieldSet(new_inst, c10, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_entry);
- entry->AddInstruction(if_inst);
-
- ScopedNullHandle<mirror::Class> null_klass_;
- HInstruction* instanceof = new (GetAllocator()) HInstanceOf(new_inst,
- null_const,
- TypeCheckKind::kBitstringCheck,
- null_klass_,
- 0,
- GetAllocator(),
- bs1,
- bs2);
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(instanceof);
- left->AddInstruction(goto_left);
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c20, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* read_final = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- exit->AddInstruction(read_final);
-
- ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
- lsa.Run();
-
- const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
- ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
- ASSERT_TRUE(info->IsPartialSingleton());
- const ExecutionSubgraph* esg = info->GetNoEscapeSubgraph();
-
- ASSERT_TRUE(esg->IsValid());
- ASSERT_TRUE(IsValidSubgraph(esg));
- ASSERT_TRUE(AreExclusionsIndependent(graph_, esg));
- std::unordered_set<const HBasicBlock*> contents(esg->ReachableBlocks().begin(),
- esg->ReachableBlocks().end());
-
- ASSERT_EQ(contents.size(), 3u);
- ASSERT_TRUE(contents.find(blks.Get("left")) == contents.end());
-
- ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
-}
-
-// before we had predicated-set we needed to be able to remove the store as
-// well. This test makes sure that still works.
-// // ENTRY
-// obj = new Obj();
-// if (parameter_value) {
-// // LEFT
-// call_func(obj);
-// } else {
-// // RIGHT
-// obj.f1 = 0;
-// }
-// // EXIT
-// // call_func prevents the elimination of this store.
-// obj.f2 = 0;
-TEST_F(LoadStoreAnalysisTest, TotalEscapeAdjacentNoPredicated) {
- CreateGraph();
- AdjacencyListGraph blks(SetupFromAdjacencyList(
- "entry",
- "exit",
- {{"entry", "left"}, {"entry", "right"}, {"left", "exit"}, {"right", "exit"}}));
- HBasicBlock* entry = blks.Get("entry");
- HBasicBlock* left = blks.Get("left");
- HBasicBlock* right = blks.Get("right");
- HBasicBlock* exit = blks.Get("exit");
-
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
- HInstruction* c0 = graph_->GetIntConstant(0);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(if_inst);
-
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- {nullptr, 0},
- nullptr,
- {},
- InvokeType::kStatic,
- {nullptr, 0},
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone,
- !graph_->IsDebuggable());
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->SetRawInputAt(0, new_inst);
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
-
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c0,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* write_final = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c0,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(16),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- exit->AddInstruction(write_final);
-
- ScopedArenaAllocator allocator(graph_->GetArenaStack());
- graph_->ClearDominanceInformation();
- graph_->BuildDominatorTree();
- LoadStoreAnalysis lsa(
- graph_, nullptr, &allocator, LoadStoreAnalysisType::kNoPredicatedInstructions);
- lsa.Run();
-
- const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
- ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
- ASSERT_FALSE(info->IsPartialSingleton());
-}
-
-// With predicated-set we can (partially) remove the store as well.
-// // ENTRY
-// obj = new Obj();
-// if (parameter_value) {
-// // LEFT
-// call_func(obj);
-// } else {
-// // RIGHT
-// obj.f1 = 0;
-// }
-// // EXIT
-// // call_func prevents the elimination of this store.
-// obj.f2 = 0;
-TEST_F(LoadStoreAnalysisTest, TotalEscapeAdjacent) {
- CreateGraph();
- AdjacencyListGraph blks(SetupFromAdjacencyList(
- "entry",
- "exit",
- { { "entry", "left" }, { "entry", "right" }, { "left", "exit" }, { "right", "exit" } }));
- HBasicBlock* entry = blks.Get("entry");
- HBasicBlock* left = blks.Get("left");
- HBasicBlock* right = blks.Get("right");
- HBasicBlock* exit = blks.Get("exit");
-
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
- HInstruction* c0 = graph_->GetIntConstant(0);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(if_inst);
-
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone,
- !graph_->IsDebuggable());
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->SetRawInputAt(0, new_inst);
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
-
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c0,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* write_final = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c0,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(16),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- exit->AddInstruction(write_final);
-
- ScopedArenaAllocator allocator(graph_->GetArenaStack());
- graph_->ClearDominanceInformation();
- graph_->BuildDominatorTree();
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
- lsa.Run();
-
- const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
- ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
- ASSERT_TRUE(info->IsPartialSingleton());
- const ExecutionSubgraph* esg = info->GetNoEscapeSubgraph();
-
- EXPECT_TRUE(esg->IsValid()) << esg->GetExcludedCohorts();
- EXPECT_TRUE(IsValidSubgraph(esg));
- std::unordered_set<const HBasicBlock*> contents(esg->ReachableBlocks().begin(),
- esg->ReachableBlocks().end());
-
- EXPECT_EQ(contents.size(), 3u);
- EXPECT_TRUE(contents.find(blks.Get("left")) == contents.end());
- EXPECT_FALSE(contents.find(blks.Get("right")) == contents.end());
- EXPECT_FALSE(contents.find(blks.Get("entry")) == contents.end());
- EXPECT_FALSE(contents.find(blks.Get("exit")) == contents.end());
-}
-
// // ENTRY
// obj = new Obj();
// if (parameter_value) {
@@ -1670,12 +911,12 @@ TEST_F(LoadStoreAnalysisTest, TotalEscape) {
exit->AddInstruction(read_final);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
- ASSERT_FALSE(info->IsPartialSingleton());
+ ASSERT_FALSE(info->IsSingleton());
}
// // ENTRY
@@ -1725,12 +966,12 @@ TEST_F(LoadStoreAnalysisTest, TotalEscape2) {
exit->AddInstruction(return_final);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
- ASSERT_FALSE(info->IsPartialSingleton());
+ ASSERT_TRUE(info->IsSingletonAndNonRemovable());
}
// // ENTRY
@@ -1900,12 +1141,12 @@ TEST_F(LoadStoreAnalysisTest, DoubleDiamondEscape) {
exit->AddInstruction(read_final);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
- ASSERT_FALSE(info->IsPartialSingleton());
+ ASSERT_FALSE(info->IsSingleton());
}
// // ENTRY
@@ -2065,11 +1306,11 @@ TEST_F(LoadStoreAnalysisTest, PartialPhiPropagation1) {
graph_->BuildDominatorTree();
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
- ASSERT_FALSE(info->IsPartialSingleton());
+ ASSERT_FALSE(info->IsSingleton());
}
} // namespace art
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 36c3770c42..2e5ee84d76 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -33,13 +33,11 @@
#include "base/scoped_arena_containers.h"
#include "base/transform_iterator.h"
#include "escape.h"
-#include "execution_subgraph.h"
#include "handle.h"
#include "load_store_analysis.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "nodes.h"
-#include "optimizing/execution_subgraph.h"
#include "optimizing_compiler_stats.h"
#include "reference_type_propagation.h"
#include "side_effects_analysis.h"
@@ -94,9 +92,7 @@
* to maintain the validity of all heap locations during the optimization
* phase, we only record substitutes at this phase and the real elimination
* is delayed till the end of LSE. Loads that require a loop Phi placeholder
- * replacement are recorded for processing later. We also keep track of the
- * heap-value at the start load so that later partial-LSE can predicate the
- * load.
+ * replacement are recorded for processing later.
* - If the instruction is a store, it updates the heap value for the heap
* location with the stored value and records the store itself so that we can
* mark it for keeping if the value becomes observable. Heap values are
@@ -240,79 +236,6 @@
* The time complexity of this phase is
* O(instructions + instruction_uses) .
*
- * 5. Partial LSE
- *
- * Move allocations closer to their escapes and remove/predicate loads and
- * stores as required.
- *
- * Partial singletons are objects which only escape from the function or have
- * multiple names along certain execution paths. In cases where we recognize
- * these partial singletons we can move the allocation and initialization
- * closer to the actual escape(s). We can then perform a simplified version of
- * LSE step 2 to determine the unescaped value of any reads performed after the
- * object may have escaped. These are used to replace these reads with
- * 'predicated-read' instructions where the value is only read if the object
- * has actually escaped. We use the existence of the object itself as the
- * marker of whether escape has occurred.
- *
- * There are several steps in this sub-pass
- *
- * 5.1 Group references
- *
- * Since all heap-locations for a single reference escape at the same time, we
- * need to group the heap-locations by reference and process them at the same
- * time.
- *
- * O(heap_locations).
- *
- * FIXME: The time complexity above assumes we can bucket the heap-locations in
- * O(1) which is not true since we just perform a linear-scan of the heap-ref
- * list. Since there are generally only a small number of heap-references which
- * are partial-singletons this is fine and lower real overhead than a hash map.
- *
- * 5.2 Generate materializations
- *
- * Once we have the references we add new 'materialization blocks' on the edges
- * where escape becomes inevitable. This information is calculated by the
- * execution-subgraphs created during load-store-analysis. We create new
- * 'materialization's in these blocks and initialize them with the value of
- * each heap-location ignoring side effects (since the object hasn't escaped
- * yet). Worst case this is the same time-complexity as step 3 since we may
- * need to materialize phis.
- *
- * O(heap_locations^2 * materialization_edges)
- *
- * 5.3 Propagate materializations
- *
- * Since we use the materialization as the marker for escape we need to
- * propagate it throughout the graph. Since the subgraph analysis considers any
- * lifetime that escapes a loop (and hence would require a loop-phi) to be
- * escaping at the loop-header we do not need to create any loop-phis to do
- * this.
- *
- * O(edges)
- *
- * NB: Currently the subgraph analysis considers all objects to have their
- * lifetimes start at the entry block. This simplifies that analysis enormously
- * but means that we cannot distinguish between an escape in a loop where the
- * lifetime does not escape the loop (in which case this pass could optimize)
- * and one where it does escape the loop (in which case the whole loop is
- * escaping). This is a shortcoming that would be good to fix at some point.
- *
- * 5.4 Propagate partial values
- *
- * We need to replace loads and stores to the partial reference with predicated
- * ones that have default non-escaping values. Again this is the same as step 3.
- *
- * O(heap_locations^2 * edges)
- *
- * 5.5 Final fixup
- *
- * Now all we need to do is replace and remove uses of the old reference with the
- * appropriate materialization.
- *
- * O(instructions + uses)
- *
* FIXME: The time complexities described above assumes that the
* HeapLocationCollector finds a heap location for an instruction in O(1)
* time but it is currently O(heap_locations); this can be fixed by adding
@@ -324,7 +247,6 @@ namespace art HIDDEN {
#define LSE_VLOG \
if (::art::LoadStoreElimination::kVerboseLoggingMode && VLOG_IS_ON(compiler)) LOG(INFO)
-class PartialLoadStoreEliminationHelper;
class HeapRefHolder;
// Use HGraphDelegateVisitor for which all VisitInvokeXXX() delegate to VisitInvoke().
@@ -332,7 +254,6 @@ class LSEVisitor final : private HGraphDelegateVisitor {
public:
LSEVisitor(HGraph* graph,
const HeapLocationCollector& heap_location_collector,
- bool perform_partial_lse,
OptimizingCompilerStats* stats);
void Run();
@@ -615,27 +536,7 @@ class LSEVisitor final : private HGraphDelegateVisitor {
return PhiPlaceholderIndex(phi_placeholder.GetPhiPlaceholder());
}
- bool IsEscapingObject(ReferenceInfo* info, HBasicBlock* block, size_t index) {
- return !info->IsSingletonAndRemovable() &&
- !(info->IsPartialSingleton() && IsPartialNoEscape(block, index));
- }
-
- bool IsPartialNoEscape(HBasicBlock* blk, size_t idx) {
- auto* ri = heap_location_collector_.GetHeapLocation(idx)->GetReferenceInfo();
- if (!ri->IsPartialSingleton()) {
- return false;
- }
- ArrayRef<const ExecutionSubgraph::ExcludedCohort> cohorts =
- ri->GetNoEscapeSubgraph()->GetExcludedCohorts();
- return std::none_of(cohorts.cbegin(),
- cohorts.cend(),
- [&](const ExecutionSubgraph::ExcludedCohort& ex) -> bool {
- // Make sure we haven't yet and never will escape.
- return ex.PrecedesBlock(blk) ||
- ex.ContainsBlock(blk) ||
- ex.SucceedsBlock(blk);
- });
- }
+ bool IsEscapingObject(ReferenceInfo* info) { return !info->IsSingletonAndRemovable(); }
PhiPlaceholder GetPhiPlaceholderAt(size_t off) const {
DCHECK_LT(off, num_phi_placeholders_);
@@ -652,9 +553,7 @@ class LSEVisitor final : private HGraphDelegateVisitor {
}
Value Replacement(Value value) const {
- DCHECK(value.NeedsPhi() ||
- (current_phase_ == Phase::kPartialElimination && value.IsMergedUnknown()))
- << value << " phase: " << current_phase_;
+ DCHECK(value.NeedsPhi()) << value << " phase: " << current_phase_;
Value replacement = phi_placeholder_replacements_[PhiPlaceholderIndex(value)];
DCHECK(replacement.IsUnknown() || replacement.IsInstruction());
DCHECK(replacement.IsUnknown() ||
@@ -663,35 +562,6 @@ class LSEVisitor final : private HGraphDelegateVisitor {
}
Value ReplacementOrValue(Value value) const {
- if (current_phase_ == Phase::kPartialElimination) {
- // In this phase we are materializing the default values which are used
- // only if the partial singleton did not escape, so we can replace
- // a partial unknown with the prior value.
- if (value.IsPartialUnknown()) {
- value = value.GetPriorValue().ToValue();
- }
- if ((value.IsMergedUnknown() || value.NeedsPhi()) &&
- phi_placeholder_replacements_[PhiPlaceholderIndex(value)].IsValid()) {
- value = phi_placeholder_replacements_[PhiPlaceholderIndex(value)];
- DCHECK(!value.IsMergedUnknown());
- DCHECK(!value.NeedsPhi());
- } else if (value.IsMergedUnknown()) {
- return Value::ForLoopPhiPlaceholder(value.GetPhiPlaceholder());
- }
- if (value.IsInstruction() && value.GetInstruction()->IsInstanceFieldGet()) {
- DCHECK_LT(static_cast<size_t>(value.GetInstruction()->GetId()),
- substitute_instructions_for_loads_.size());
- HInstruction* substitute =
- substitute_instructions_for_loads_[value.GetInstruction()->GetId()];
- if (substitute != nullptr) {
- DCHECK(substitute->IsPredicatedInstanceFieldGet());
- return Value::ForInstruction(substitute);
- }
- }
- DCHECK_IMPLIES(value.IsInstruction(),
- FindSubstitute(value.GetInstruction()) == value.GetInstruction());
- return value;
- }
if (value.NeedsPhi() && phi_placeholder_replacements_[PhiPlaceholderIndex(value)].IsValid()) {
return Replacement(value);
} else {
@@ -752,8 +622,8 @@ class LSEVisitor final : private HGraphDelegateVisitor {
HInstruction* FindSubstitute(HInstruction* instruction) const {
size_t id = static_cast<size_t>(instruction->GetId());
if (id >= substitute_instructions_for_loads_.size()) {
- // New Phi (may not be in the graph yet), default value or PredicatedInstanceFieldGet.
- DCHECK_IMPLIES(IsLoad(instruction), instruction->IsPredicatedInstanceFieldGet());
+ // New Phi (may not be in the graph yet), or default value.
+ DCHECK(!IsLoad(instruction));
return instruction;
}
HInstruction* substitute = substitute_instructions_for_loads_[id];
@@ -789,7 +659,6 @@ class LSEVisitor final : private HGraphDelegateVisitor {
static bool IsLoad(HInstruction* instruction) {
// Unresolved load is not treated as a load.
return instruction->IsInstanceFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsStaticFieldGet() ||
instruction->IsVecLoad() ||
instruction->IsArrayGet();
@@ -818,12 +687,7 @@ class LSEVisitor final : private HGraphDelegateVisitor {
if (value.IsPureUnknown() || value.IsPartialUnknown()) {
return;
}
- if (value.IsMergedUnknown()) {
- kept_merged_unknowns_.SetBit(PhiPlaceholderIndex(value));
- phi_placeholders_to_search_for_kept_stores_.SetBit(PhiPlaceholderIndex(value));
- return;
- }
- if (value.NeedsPhi()) {
+ if (value.IsMergedUnknown() || value.NeedsPhi()) {
phi_placeholders_to_search_for_kept_stores_.SetBit(PhiPlaceholderIndex(value));
} else {
HInstruction* instruction = value.GetInstruction();
@@ -843,9 +707,7 @@ class LSEVisitor final : private HGraphDelegateVisitor {
// We use this function when reading a location with unknown value and
// therefore we cannot know what exact store wrote that unknown value.
// But we can have a phi placeholder here marking multiple stores to keep.
- DCHECK(
- !heap_values[i].stored_by.IsInstruction() ||
- heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo()->IsPartialSingleton());
+ DCHECK(!heap_values[i].stored_by.IsInstruction());
KeepStores(heap_values[i].stored_by);
heap_values[i].stored_by = Value::PureUnknown();
} else if (heap_location_collector_.MayAlias(i, loc_index)) {
@@ -925,7 +787,6 @@ class LSEVisitor final : private HGraphDelegateVisitor {
enum class Phase {
kLoadElimination,
kStoreElimination,
- kPartialElimination,
};
bool MayAliasOnBackEdge(HBasicBlock* loop_header, size_t idx1, size_t idx2) const;
@@ -958,21 +819,6 @@ class LSEVisitor final : private HGraphDelegateVisitor {
void FindOldValueForPhiPlaceholder(PhiPlaceholder phi_placeholder, DataType::Type type);
void FindStoresWritingOldValues();
void FinishFullLSE();
- void PrepareForPartialPhiComputation();
- // Create materialization block and materialization object for the given predecessor of entry.
- HInstruction* SetupPartialMaterialization(PartialLoadStoreEliminationHelper& helper,
- HeapRefHolder&& holder,
- size_t pred_idx,
- HBasicBlock* blk);
- // Returns the value that would be read by the 'read' instruction on
- // 'orig_new_inst' if 'orig_new_inst' has not escaped.
- HInstruction* GetPartialValueAt(HNewInstance* orig_new_inst, HInstruction* read);
- void MovePartialEscapes();
-
- void VisitPredicatedInstanceFieldGet(HPredicatedInstanceFieldGet* instruction) override {
- LOG(FATAL) << "Visited instruction " << instruction->DumpWithoutArgs()
- << " but LSE should be the only source of predicated-ifield-gets!";
- }
void HandleAcquireLoad(HInstruction* instruction) {
DCHECK((instruction->IsInstanceFieldGet() && instruction->AsInstanceFieldGet()->IsVolatile()) ||
@@ -1109,7 +955,7 @@ class LSEVisitor final : private HGraphDelegateVisitor {
// Finalizable objects always escape.
const bool finalizable_object =
reference->IsNewInstance() && reference->AsNewInstance()->IsFinalizable();
- if (!finalizable_object && !IsEscapingObject(info, block, i)) {
+ if (!finalizable_object && !IsEscapingObject(info)) {
// Check whether the reference for a store is used by an environment local of
// the HDeoptimize. If not, the singleton is not observed after deoptimization.
const HUseList<HEnvironment*>& env_uses = reference->GetEnvUses();
@@ -1133,7 +979,7 @@ class LSEVisitor final : private HGraphDelegateVisitor {
ScopedArenaVector<ValueRecord>& heap_values = heap_values_for_[block->GetBlockId()];
for (size_t i = 0u, size = heap_values.size(); i != size; ++i) {
ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
- if (must_keep_stores || IsEscapingObject(ref_info, block, i)) {
+ if (must_keep_stores || IsEscapingObject(ref_info)) {
KeepStores(heap_values[i].stored_by);
heap_values[i].stored_by = Value::PureUnknown();
}
@@ -1216,30 +1062,9 @@ class LSEVisitor final : private HGraphDelegateVisitor {
heap_values_for_[instruction->GetBlock()->GetBlockId()];
for (size_t i = 0u, size = heap_values.size(); i != size; ++i) {
ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
- HBasicBlock* blk = instruction->GetBlock();
// We don't need to do anything if the reference has not escaped at this point.
- // This is true if either we (1) never escape or (2) sometimes escape but
- // there is no possible execution where we have done so at this time. NB
- // We count being in the excluded cohort as escaping. Technically, this is
- // a bit over-conservative (since we can have multiple non-escaping calls
- // before a single escaping one) but this simplifies everything greatly.
- auto partial_singleton_did_not_escape = [](ReferenceInfo* ref_info, HBasicBlock* blk) {
- DCHECK(ref_info->IsPartialSingleton());
- if (!ref_info->GetNoEscapeSubgraph()->ContainsBlock(blk)) {
- return false;
- }
- ArrayRef<const ExecutionSubgraph::ExcludedCohort> cohorts =
- ref_info->GetNoEscapeSubgraph()->GetExcludedCohorts();
- return std::none_of(cohorts.begin(),
- cohorts.end(),
- [&](const ExecutionSubgraph::ExcludedCohort& cohort) {
- return cohort.PrecedesBlock(blk);
- });
- };
- if (!can_throw_inside_a_try &&
- (ref_info->IsSingleton() ||
- // partial and we aren't currently escaping and we haven't escaped yet.
- (ref_info->IsPartialSingleton() && partial_singleton_did_not_escape(ref_info, blk)))) {
+ // This is true if we never escape.
+ if (!can_throw_inside_a_try && ref_info->IsSingleton()) {
// Singleton references cannot be seen by the callee.
} else {
if (can_throw || side_effects.DoesAnyRead() || side_effects.DoesAnyWrite()) {
@@ -1315,7 +1140,7 @@ class LSEVisitor final : private HGraphDelegateVisitor {
heap_values[i].value = Value::ForInstruction(new_instance->GetLoadClass());
heap_values[i].stored_by = Value::PureUnknown();
}
- } else if (inside_a_try || IsEscapingObject(info, block, i)) {
+ } else if (inside_a_try || IsEscapingObject(info)) {
// Since NewInstance can throw, we presume all previous stores could be visible.
KeepStores(heap_values[i].stored_by);
heap_values[i].stored_by = Value::PureUnknown();
@@ -1350,7 +1175,7 @@ class LSEVisitor final : private HGraphDelegateVisitor {
// Array elements are set to default heap values.
heap_values[i].value = Value::Default();
heap_values[i].stored_by = Value::PureUnknown();
- } else if (inside_a_try || IsEscapingObject(info, block, i)) {
+ } else if (inside_a_try || IsEscapingObject(info)) {
// Since NewArray can throw, we presume all previous stores could be visible.
KeepStores(heap_values[i].stored_by);
heap_values[i].stored_by = Value::PureUnknown();
@@ -1363,14 +1188,6 @@ class LSEVisitor final : private HGraphDelegateVisitor {
DCHECK(!instruction->CanThrow());
}
- bool ShouldPerformPartialLSE() const {
- return perform_partial_lse_ &&
- !GetGraph()->IsCompilingOsr() &&
- !GetGraph()->HasIrreducibleLoops();
- }
-
- bool perform_partial_lse_;
-
const HeapLocationCollector& heap_location_collector_;
// Use local allocator for allocating memory.
@@ -1427,10 +1244,6 @@ class LSEVisitor final : private HGraphDelegateVisitor {
// The invalid heap value is used to mark Phi placeholders that cannot be replaced.
ScopedArenaVector<Value> phi_placeholder_replacements_;
- // Merged-unknowns that must have their predecessor values kept to ensure
- // partially escaped values are written
- ArenaBitVector kept_merged_unknowns_;
-
ScopedArenaVector<HInstruction*> singleton_new_instances_;
// The field infos for each heap location (if relevant).
@@ -1438,7 +1251,6 @@ class LSEVisitor final : private HGraphDelegateVisitor {
Phase current_phase_;
- friend class PartialLoadStoreEliminationHelper;
friend struct ScopedRestoreHeapValues;
friend std::ostream& operator<<(std::ostream& os, const Value& v);
@@ -1459,8 +1271,6 @@ std::ostream& operator<<(std::ostream& oss, const LSEVisitor::Phase& phase) {
return oss << "kLoadElimination";
case LSEVisitor::Phase::kStoreElimination:
return oss << "kStoreElimination";
- case LSEVisitor::Phase::kPartialElimination:
- return oss << "kPartialElimination";
}
}
@@ -1584,10 +1394,8 @@ std::ostream& operator<<(std::ostream& os, const LSEVisitor::Value& v) {
LSEVisitor::LSEVisitor(HGraph* graph,
const HeapLocationCollector& heap_location_collector,
- bool perform_partial_lse,
OptimizingCompilerStats* stats)
: HGraphDelegateVisitor(graph, stats),
- perform_partial_lse_(perform_partial_lse),
heap_location_collector_(heap_location_collector),
allocator_(graph->GetArenaStack()),
num_phi_placeholders_(GetGraph()->GetBlocks().size() *
@@ -1617,10 +1425,6 @@ LSEVisitor::LSEVisitor(HGraph* graph,
phi_placeholder_replacements_(num_phi_placeholders_,
Value::Invalid(),
allocator_.Adapter(kArenaAllocLSE)),
- kept_merged_unknowns_(&allocator_,
- /*start_bits=*/num_phi_placeholders_,
- /*expandable=*/false,
- kArenaAllocLSE),
singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)),
field_infos_(heap_location_collector_.GetNumberOfHeapLocations(),
allocator_.Adapter(kArenaAllocLSE)),
@@ -1860,8 +1664,7 @@ void LSEVisitor::MaterializeNonLoopPhis(PhiPlaceholder phi_placeholder, DataType
Value pred_value = ReplacementOrValue(heap_values_for_[predecessor->GetBlockId()][idx].value);
DCHECK(!pred_value.IsPureUnknown()) << pred_value << " block " << current_block->GetBlockId()
<< " pred: " << predecessor->GetBlockId();
- if (pred_value.NeedsNonLoopPhi() ||
- (current_phase_ == Phase::kPartialElimination && pred_value.IsMergedUnknown())) {
+ if (pred_value.NeedsNonLoopPhi()) {
// We need to process the Phi placeholder first.
work_queue.push_back(pred_value.GetPhiPlaceholder());
} else if (pred_value.IsDefault()) {
@@ -1892,12 +1695,6 @@ void LSEVisitor::VisitGetLocation(HInstruction* instruction, size_t idx) {
RecordFieldInfo(&instruction->GetFieldInfo(), idx);
}
DCHECK(record.value.IsUnknown() || record.value.Equals(ReplacementOrValue(record.value)));
- // If we are unknown, we either come from somewhere untracked or we can reconstruct the partial
- // value.
- DCHECK(!record.value.IsPureUnknown() ||
- heap_location_collector_.GetHeapLocation(idx)->GetReferenceInfo() == nullptr ||
- !heap_location_collector_.GetHeapLocation(idx)->GetReferenceInfo()->IsPartialSingleton())
- << "In " << GetGraph()->PrettyMethod() << ": " << record.value << " for " << *instruction;
intermediate_values_.insert({instruction, record.value});
loads_and_stores_.push_back({ instruction, idx });
if ((record.value.IsDefault() || record.value.NeedsNonLoopPhi()) &&
@@ -2306,9 +2103,7 @@ bool LSEVisitor::MaterializeLoopPhis(ArrayRef<const size_t> phi_placeholder_inde
for (HBasicBlock* predecessor : block->GetPredecessors()) {
Value value = ReplacementOrValue(heap_values_for_[predecessor->GetBlockId()][idx].value);
if (value.NeedsNonLoopPhi()) {
- DCHECK(current_phase_ == Phase::kLoadElimination ||
- current_phase_ == Phase::kPartialElimination)
- << current_phase_;
+ DCHECK(current_phase_ == Phase::kLoadElimination) << current_phase_;
MaterializeNonLoopPhis(value.GetPhiPlaceholder(), type);
value = Replacement(value);
}
@@ -2769,22 +2564,9 @@ void LSEVisitor::SearchPhiPlaceholdersForKeptStores() {
work_queue.push_back(index);
}
const ArenaVector<HBasicBlock*>& blocks = GetGraph()->GetBlocks();
- std::optional<ArenaBitVector> not_kept_stores;
- if (stats_) {
- not_kept_stores.emplace(GetGraph()->GetAllocator(),
- kept_stores_.GetBitSizeOf(),
- false,
- ArenaAllocKind::kArenaAllocLSE);
- }
while (!work_queue.empty()) {
uint32_t cur_phi_idx = work_queue.back();
PhiPlaceholder phi_placeholder = GetPhiPlaceholderAt(cur_phi_idx);
- // Only writes to partial-escapes need to be specifically kept.
- bool is_partial_kept_merged_unknown =
- kept_merged_unknowns_.IsBitSet(cur_phi_idx) &&
- heap_location_collector_.GetHeapLocation(phi_placeholder.GetHeapLocation())
- ->GetReferenceInfo()
- ->IsPartialSingleton();
work_queue.pop_back();
size_t idx = phi_placeholder.GetHeapLocation();
HBasicBlock* block = blocks[phi_placeholder.GetBlockId()];
@@ -2804,11 +2586,6 @@ void LSEVisitor::SearchPhiPlaceholdersForKeptStores() {
if (!stored_by.IsUnknown() && (i == idx || MayAliasOnBackEdge(block, idx, i))) {
if (stored_by.NeedsPhi()) {
size_t phi_placeholder_index = PhiPlaceholderIndex(stored_by);
- if (is_partial_kept_merged_unknown) {
- // Propagate merged-unknown keep since otherwise this might look
- // like a partial escape we can remove.
- kept_merged_unknowns_.SetBit(phi_placeholder_index);
- }
if (!phi_placeholders_to_search_for_kept_stores_.IsBitSet(phi_placeholder_index)) {
phi_placeholders_to_search_for_kept_stores_.SetBit(phi_placeholder_index);
work_queue.push_back(phi_placeholder_index);
@@ -2819,24 +2596,12 @@ void LSEVisitor::SearchPhiPlaceholdersForKeptStores() {
DCHECK(ri != nullptr) << "No heap value for " << stored_by.GetInstruction()->DebugName()
<< " id: " << stored_by.GetInstruction()->GetId() << " block: "
<< stored_by.GetInstruction()->GetBlock()->GetBlockId();
- if (!is_partial_kept_merged_unknown && IsPartialNoEscape(predecessor, idx)) {
- if (not_kept_stores) {
- not_kept_stores->SetBit(stored_by.GetInstruction()->GetId());
- }
- } else {
- kept_stores_.SetBit(stored_by.GetInstruction()->GetId());
- }
+ kept_stores_.SetBit(stored_by.GetInstruction()->GetId());
}
}
}
}
}
- if (not_kept_stores) {
- // a - b := (a & ~b)
- not_kept_stores->Subtract(&kept_stores_);
- auto num_removed = not_kept_stores->NumSetBits();
- MaybeRecordStat(stats_, MethodCompilationStat::kPartialStoreRemoved, num_removed);
- }
}
void LSEVisitor::UpdateValueRecordForStoreElimination(/*inout*/ValueRecord* value_record) {
@@ -3026,932 +2791,8 @@ void LSEVisitor::Run() {
// 4. Replace loads and remove unnecessary stores and singleton allocations.
FinishFullLSE();
-
- // 5. Move partial escapes down and fixup with PHIs.
- current_phase_ = Phase::kPartialElimination;
- MovePartialEscapes();
-}
-
-// Clear unknown loop-phi results. Here we'll be able to use partial-unknowns so we need to
-// retry all of them with more information about where they come from.
-void LSEVisitor::PrepareForPartialPhiComputation() {
- std::replace_if(
- phi_placeholder_replacements_.begin(),
- phi_placeholder_replacements_.end(),
- [](const Value& val) { return !val.IsDefault() && !val.IsInstruction(); },
- Value::Invalid());
-}
-
-class PartialLoadStoreEliminationHelper {
- public:
- PartialLoadStoreEliminationHelper(LSEVisitor* lse, ScopedArenaAllocator* alloc)
- : lse_(lse),
- alloc_(alloc),
- new_ref_phis_(alloc_->Adapter(kArenaAllocLSE)),
- heap_refs_(alloc_->Adapter(kArenaAllocLSE)),
- max_preds_per_block_((*std::max_element(GetGraph()->GetActiveBlocks().begin(),
- GetGraph()->GetActiveBlocks().end(),
- [](HBasicBlock* a, HBasicBlock* b) {
- return a->GetNumberOfPredecessors() <
- b->GetNumberOfPredecessors();
- }))
- ->GetNumberOfPredecessors()),
- materialization_blocks_(GetGraph()->GetBlocks().size() * max_preds_per_block_,
- nullptr,
- alloc_->Adapter(kArenaAllocLSE)),
- first_materialization_block_id_(GetGraph()->GetBlocks().size()) {
- size_t num_partial_singletons = lse_->heap_location_collector_.CountPartialSingletons();
- heap_refs_.reserve(num_partial_singletons);
- new_ref_phis_.reserve(num_partial_singletons * GetGraph()->GetBlocks().size());
- CollectInterestingHeapRefs();
- }
-
- ~PartialLoadStoreEliminationHelper() {
- if (heap_refs_.empty()) {
- return;
- }
- ReferenceTypePropagation rtp_fixup(GetGraph(),
- Handle<mirror::DexCache>(),
- /* is_first_run= */ false);
- rtp_fixup.Visit(ArrayRef<HInstruction* const>(new_ref_phis_));
- GetGraph()->ClearReachabilityInformation();
- GetGraph()->RecomputeDominatorTree();
- GetGraph()->ComputeReachabilityInformation();
- }
-
- class IdxToHeapLoc {
- public:
- explicit IdxToHeapLoc(const HeapLocationCollector* hlc) : collector_(hlc) {}
- HeapLocation* operator()(size_t idx) const {
- return collector_->GetHeapLocation(idx);
- }
-
- private:
- const HeapLocationCollector* collector_;
- };
-
-
- class HeapReferenceData {
- public:
- using LocIterator = IterationRange<TransformIterator<BitVector::IndexIterator, IdxToHeapLoc>>;
- HeapReferenceData(PartialLoadStoreEliminationHelper* helper,
- HNewInstance* new_inst,
- const ExecutionSubgraph* subgraph,
- ScopedArenaAllocator* alloc)
- : new_instance_(new_inst),
- helper_(helper),
- heap_locs_(alloc,
- helper->lse_->heap_location_collector_.GetNumberOfHeapLocations(),
- /* expandable= */ false,
- kArenaAllocLSE),
- materializations_(
- // We generally won't need to create too many materialization blocks and we can expand
- // this as needed so just start off with 2x.
- 2 * helper->lse_->GetGraph()->GetBlocks().size(),
- nullptr,
- alloc->Adapter(kArenaAllocLSE)),
- collector_(helper->lse_->heap_location_collector_),
- subgraph_(subgraph) {}
-
- LocIterator IterateLocations() {
- auto idxs = heap_locs_.Indexes();
- return MakeTransformRange(idxs, IdxToHeapLoc(&collector_));
- }
-
- void AddHeapLocation(size_t idx) {
- heap_locs_.SetBit(idx);
- }
-
- const ExecutionSubgraph* GetNoEscapeSubgraph() const {
- return subgraph_;
- }
-
- bool IsPostEscape(HBasicBlock* blk) {
- return std::any_of(
- subgraph_->GetExcludedCohorts().cbegin(),
- subgraph_->GetExcludedCohorts().cend(),
- [&](const ExecutionSubgraph::ExcludedCohort& ec) { return ec.PrecedesBlock(blk); });
- }
-
- bool InEscapeCohort(HBasicBlock* blk) {
- return std::any_of(
- subgraph_->GetExcludedCohorts().cbegin(),
- subgraph_->GetExcludedCohorts().cend(),
- [&](const ExecutionSubgraph::ExcludedCohort& ec) { return ec.ContainsBlock(blk); });
- }
-
- bool BeforeAllEscapes(HBasicBlock* b) {
- return std::none_of(subgraph_->GetExcludedCohorts().cbegin(),
- subgraph_->GetExcludedCohorts().cend(),
- [&](const ExecutionSubgraph::ExcludedCohort& ec) {
- return ec.PrecedesBlock(b) || ec.ContainsBlock(b);
- });
- }
-
- HNewInstance* OriginalNewInstance() const {
- return new_instance_;
- }
-
- // Collect and replace all uses. We need to perform this twice since we will
- // generate PHIs and additional uses as we create the default-values for
- // pred-gets. These values might be other references that are also being
- // partially eliminated. By running just the replacement part again we are
- // able to avoid having to keep another whole in-progress partial map
- // around. Since we will have already handled all the other uses in the
- // first pass the second one will be quite fast.
- void FixupUses(bool first_pass) {
- ScopedArenaAllocator saa(GetGraph()->GetArenaStack());
- // Replace uses with materialized values.
- ScopedArenaVector<InstructionUse<HInstruction>> to_replace(saa.Adapter(kArenaAllocLSE));
- ScopedArenaVector<HInstruction*> to_remove(saa.Adapter(kArenaAllocLSE));
- // Do we need to add a constructor-fence.
- ScopedArenaVector<InstructionUse<HConstructorFence>> constructor_fences(
- saa.Adapter(kArenaAllocLSE));
- ScopedArenaVector<InstructionUse<HInstruction>> to_predicate(saa.Adapter(kArenaAllocLSE));
-
- CollectReplacements(to_replace, to_remove, constructor_fences, to_predicate);
-
- if (!first_pass) {
- // If another partial creates new references they can only be in Phis or pred-get defaults
- // so they must be in the to_replace group.
- DCHECK(to_predicate.empty());
- DCHECK(constructor_fences.empty());
- DCHECK(to_remove.empty());
- }
-
- ReplaceInput(to_replace);
- RemoveAndReplaceInputs(to_remove);
- CreateConstructorFences(constructor_fences);
- PredicateInstructions(to_predicate);
-
- CHECK(OriginalNewInstance()->GetUses().empty())
- << OriginalNewInstance()->GetUses() << ", " << OriginalNewInstance()->GetEnvUses();
- }
-
- void AddMaterialization(HBasicBlock* blk, HInstruction* ins) {
- if (blk->GetBlockId() >= materializations_.size()) {
- // Make sure the materialization array is large enough, try to avoid
- // re-sizing too many times by giving extra space.
- materializations_.resize(blk->GetBlockId() * 2, nullptr);
- }
- DCHECK(materializations_[blk->GetBlockId()] == nullptr)
- << "Already have a materialization in block " << blk->GetBlockId() << ": "
- << *materializations_[blk->GetBlockId()] << " when trying to set materialization to "
- << *ins;
- materializations_[blk->GetBlockId()] = ins;
- LSE_VLOG << "In block " << blk->GetBlockId() << " materialization is " << *ins;
- helper_->NotifyNewMaterialization(ins);
- }
-
- bool HasMaterialization(HBasicBlock* blk) const {
- return blk->GetBlockId() < materializations_.size() &&
- materializations_[blk->GetBlockId()] != nullptr;
- }
-
- HInstruction* GetMaterialization(HBasicBlock* blk) const {
- if (materializations_.size() <= blk->GetBlockId() ||
- materializations_[blk->GetBlockId()] == nullptr) {
- // This must be a materialization block added after the partial LSE of
- // the current reference finished. Since every edge can only have at
- // most one materialization block added to it we can just check the
- // blocks predecessor.
- DCHECK(helper_->IsMaterializationBlock(blk));
- blk = helper_->FindDominatingNonMaterializationBlock(blk);
- DCHECK(!helper_->IsMaterializationBlock(blk));
- }
- DCHECK_GT(materializations_.size(), blk->GetBlockId());
- DCHECK(materializations_[blk->GetBlockId()] != nullptr);
- return materializations_[blk->GetBlockId()];
- }
-
- void GenerateMaterializationValueFromPredecessors(HBasicBlock* blk) {
- DCHECK(std::none_of(GetNoEscapeSubgraph()->GetExcludedCohorts().begin(),
- GetNoEscapeSubgraph()->GetExcludedCohorts().end(),
- [&](const ExecutionSubgraph::ExcludedCohort& cohort) {
- return cohort.IsEntryBlock(blk);
- }));
- DCHECK(!HasMaterialization(blk));
- if (blk->IsExitBlock()) {
- return;
- } else if (blk->IsLoopHeader()) {
- // See comment in execution_subgraph.h. Currently we act as though every
- // allocation for partial elimination takes place in the entry block.
- // This simplifies the analysis by making it so any escape cohort
- // expands to contain any loops it is a part of. This is something that
- // we should rectify at some point. In either case however we can still
- // special case the loop-header since (1) currently the loop can't have
- // any merges between different cohort entries since the pre-header will
- // be the earliest place entry can happen and (2) even if the analysis
- // is improved to consider lifetime of the object WRT loops any values
- // which would require loop-phis would have to make the whole loop
- // escape anyway.
- // This all means we can always use value from the pre-header when the
- // block is the loop-header and we didn't already create a
- // materialization block. (NB when we do improve the analysis we will
- // need to modify the materialization creation code to deal with this
- // correctly.)
- HInstruction* pre_header_val =
- GetMaterialization(blk->GetLoopInformation()->GetPreHeader());
- AddMaterialization(blk, pre_header_val);
- return;
- }
- ScopedArenaAllocator saa(GetGraph()->GetArenaStack());
- ScopedArenaVector<HInstruction*> pred_vals(saa.Adapter(kArenaAllocLSE));
- pred_vals.reserve(blk->GetNumberOfPredecessors());
- for (HBasicBlock* pred : blk->GetPredecessors()) {
- DCHECK(HasMaterialization(pred));
- pred_vals.push_back(GetMaterialization(pred));
- }
- GenerateMaterializationValueFromPredecessorsDirect(blk, pred_vals);
- }
-
- void GenerateMaterializationValueFromPredecessorsForEntry(
- HBasicBlock* entry, const ScopedArenaVector<HInstruction*>& pred_vals) {
- DCHECK(std::any_of(GetNoEscapeSubgraph()->GetExcludedCohorts().begin(),
- GetNoEscapeSubgraph()->GetExcludedCohorts().end(),
- [&](const ExecutionSubgraph::ExcludedCohort& cohort) {
- return cohort.IsEntryBlock(entry);
- }));
- GenerateMaterializationValueFromPredecessorsDirect(entry, pred_vals);
- }
-
- private:
- template <typename InstructionType>
- struct InstructionUse {
- InstructionType* instruction_;
- size_t index_;
- };
-
- void ReplaceInput(const ScopedArenaVector<InstructionUse<HInstruction>>& to_replace) {
- for (auto& [ins, idx] : to_replace) {
- HInstruction* merged_inst = GetMaterialization(ins->GetBlock());
- if (ins->IsPhi() && merged_inst->IsPhi() && ins->GetBlock() == merged_inst->GetBlock()) {
- // Phis we just pass through the appropriate inputs.
- ins->ReplaceInput(merged_inst->InputAt(idx), idx);
- } else {
- ins->ReplaceInput(merged_inst, idx);
- }
- }
- }
-
- void RemoveAndReplaceInputs(const ScopedArenaVector<HInstruction*>& to_remove) {
- for (HInstruction* ins : to_remove) {
- if (ins->GetBlock() == nullptr) {
- // Already dealt with.
- continue;
- }
- DCHECK(BeforeAllEscapes(ins->GetBlock())) << *ins;
- if (ins->IsInstanceFieldGet() || ins->IsInstanceFieldSet()) {
- bool instruction_has_users =
- ins->IsInstanceFieldGet() && (!ins->GetUses().empty() || !ins->GetEnvUses().empty());
- if (instruction_has_users) {
- // Make sure any remaining users of read are replaced.
- HInstruction* replacement =
- helper_->lse_->GetPartialValueAt(OriginalNewInstance(), ins);
- // NB ReplaceInput will remove a use from the list so this is
- // guaranteed to finish eventually.
- while (!ins->GetUses().empty()) {
- const HUseListNode<HInstruction*>& use = ins->GetUses().front();
- use.GetUser()->ReplaceInput(replacement, use.GetIndex());
- }
- while (!ins->GetEnvUses().empty()) {
- const HUseListNode<HEnvironment*>& use = ins->GetEnvUses().front();
- use.GetUser()->ReplaceInput(replacement, use.GetIndex());
- }
- } else {
- DCHECK(ins->GetUses().empty())
- << "Instruction has users!\n"
- << ins->DumpWithArgs() << "\nUsers are " << ins->GetUses();
- DCHECK(ins->GetEnvUses().empty())
- << "Instruction has users!\n"
- << ins->DumpWithArgs() << "\nUsers are " << ins->GetEnvUses();
- }
- ins->GetBlock()->RemoveInstruction(ins);
- } else {
- // Can only be obj == other, obj != other, obj == obj (!?) or, obj != obj (!?)
- // Since PHIs are escapes as far as LSE is concerned and we are before
- // any escapes these are the only 4 options.
- DCHECK(ins->IsEqual() || ins->IsNotEqual()) << *ins;
- HInstruction* replacement;
- if (UNLIKELY(ins->InputAt(0) == ins->InputAt(1))) {
- replacement = ins->IsEqual() ? GetGraph()->GetIntConstant(1)
- : GetGraph()->GetIntConstant(0);
- } else {
- replacement = ins->IsEqual() ? GetGraph()->GetIntConstant(0)
- : GetGraph()->GetIntConstant(1);
- }
- ins->ReplaceWith(replacement);
- ins->GetBlock()->RemoveInstruction(ins);
- }
- }
- }
-
- void CreateConstructorFences(
- const ScopedArenaVector<InstructionUse<HConstructorFence>>& constructor_fences) {
- if (!constructor_fences.empty()) {
- uint32_t pc = constructor_fences.front().instruction_->GetDexPc();
- for (auto& [cf, idx] : constructor_fences) {
- if (cf->GetInputs().size() == 1) {
- cf->GetBlock()->RemoveInstruction(cf);
- } else {
- cf->RemoveInputAt(idx);
- }
- }
- for (const ExecutionSubgraph::ExcludedCohort& ec :
- GetNoEscapeSubgraph()->GetExcludedCohorts()) {
- for (HBasicBlock* blk : ec.EntryBlocks()) {
- for (HBasicBlock* materializer :
- Filter(MakeIterationRange(blk->GetPredecessors()),
- [&](HBasicBlock* blk) { return helper_->IsMaterializationBlock(blk); })) {
- HInstruction* new_cf = new (GetGraph()->GetAllocator()) HConstructorFence(
- GetMaterialization(materializer), pc, GetGraph()->GetAllocator());
- materializer->InsertInstructionBefore(new_cf, materializer->GetLastInstruction());
- }
- }
- }
- }
- }
-
- void PredicateInstructions(
- const ScopedArenaVector<InstructionUse<HInstruction>>& to_predicate) {
- for (auto& [ins, idx] : to_predicate) {
- if (UNLIKELY(ins->GetBlock() == nullptr)) {
- // Already handled due to obj == obj;
- continue;
- } else if (ins->IsInstanceFieldGet()) {
- // IFieldGet[obj] => PredicatedIFieldGet[PartialValue, obj]
- HInstruction* new_fget = new (GetGraph()->GetAllocator()) HPredicatedInstanceFieldGet(
- ins->AsInstanceFieldGet(),
- GetMaterialization(ins->GetBlock()),
- helper_->lse_->GetPartialValueAt(OriginalNewInstance(), ins));
- MaybeRecordStat(helper_->lse_->stats_, MethodCompilationStat::kPredicatedLoadAdded);
- ins->GetBlock()->InsertInstructionBefore(new_fget, ins);
- if (ins->GetType() == DataType::Type::kReference) {
- // Reference info is the same
- new_fget->SetReferenceTypeInfoIfValid(ins->GetReferenceTypeInfo());
- }
- // In this phase, substitute instructions are used only for the predicated get
- // default values which are used only if the partial singleton did not escape,
- // so the out value of the `new_fget` for the relevant cases is the same as
- // the default value.
- // TODO: Use the default value for materializing default values used by
- // other predicated loads to avoid some unnecessary Phis. (This shall
- // complicate the search for replacement in `ReplacementOrValue()`.)
- DCHECK(helper_->lse_->substitute_instructions_for_loads_[ins->GetId()] == nullptr);
- helper_->lse_->substitute_instructions_for_loads_[ins->GetId()] = new_fget;
- ins->ReplaceWith(new_fget);
- ins->ReplaceEnvUsesDominatedBy(ins, new_fget);
- CHECK(ins->GetEnvUses().empty() && ins->GetUses().empty())
- << "Instruction: " << *ins << " uses: " << ins->GetUses()
- << ", env: " << ins->GetEnvUses();
- ins->GetBlock()->RemoveInstruction(ins);
- } else if (ins->IsInstanceFieldSet()) {
- // Any predicated sets shouldn't require movement.
- ins->AsInstanceFieldSet()->SetIsPredicatedSet();
- MaybeRecordStat(helper_->lse_->stats_, MethodCompilationStat::kPredicatedStoreAdded);
- HInstruction* merged_inst = GetMaterialization(ins->GetBlock());
- ins->ReplaceInput(merged_inst, idx);
- } else {
- // comparisons need to be split into 2.
- DCHECK(ins->IsEqual() || ins->IsNotEqual()) << "bad instruction " << *ins;
- bool this_is_first = idx == 0;
- if (ins->InputAt(0) == ins->InputAt(1)) {
- // This is a obj == obj or obj != obj.
- // No idea why anyone would do this but whatever.
- ins->ReplaceWith(GetGraph()->GetIntConstant(ins->IsEqual() ? 1 : 0));
- ins->GetBlock()->RemoveInstruction(ins);
- continue;
- } else {
- HInstruction* is_escaped = new (GetGraph()->GetAllocator())
- HNotEqual(GetMaterialization(ins->GetBlock()), GetGraph()->GetNullConstant());
- HInstruction* combine_inst =
- ins->IsEqual() ? static_cast<HInstruction*>(new (GetGraph()->GetAllocator()) HAnd(
- DataType::Type::kBool, is_escaped, ins))
- : static_cast<HInstruction*>(new (GetGraph()->GetAllocator()) HOr(
- DataType::Type::kBool, is_escaped, ins));
- ins->ReplaceInput(GetMaterialization(ins->GetBlock()), this_is_first ? 0 : 1);
- ins->GetBlock()->InsertInstructionBefore(is_escaped, ins);
- ins->GetBlock()->InsertInstructionAfter(combine_inst, ins);
- ins->ReplaceWith(combine_inst);
- combine_inst->ReplaceInput(ins, 1);
- }
- }
- }
- }
-
- // Figure out all the instructions we need to
- // fixup/replace/remove/duplicate. Since this requires an iteration of an
- // intrusive linked list we want to do it only once and collect all the data
- // here.
- void CollectReplacements(
- ScopedArenaVector<InstructionUse<HInstruction>>& to_replace,
- ScopedArenaVector<HInstruction*>& to_remove,
- ScopedArenaVector<InstructionUse<HConstructorFence>>& constructor_fences,
- ScopedArenaVector<InstructionUse<HInstruction>>& to_predicate) {
- size_t size = new_instance_->GetUses().SizeSlow();
- to_replace.reserve(size);
- to_remove.reserve(size);
- constructor_fences.reserve(size);
- to_predicate.reserve(size);
- for (auto& use : new_instance_->GetUses()) {
- HBasicBlock* blk =
- helper_->FindDominatingNonMaterializationBlock(use.GetUser()->GetBlock());
- if (InEscapeCohort(blk)) {
- LSE_VLOG << "Replacing " << *new_instance_ << " use in " << *use.GetUser() << " with "
- << *GetMaterialization(blk);
- to_replace.push_back({use.GetUser(), use.GetIndex()});
- } else if (IsPostEscape(blk)) {
- LSE_VLOG << "User " << *use.GetUser() << " after escapes!";
- // The fields + cmp are normal uses. Phi can only be here if it was
- // generated by full LSE so whatever store+load that created the phi
- // is the escape.
- if (use.GetUser()->IsPhi()) {
- to_replace.push_back({use.GetUser(), use.GetIndex()});
- } else {
- DCHECK(use.GetUser()->IsFieldAccess() ||
- use.GetUser()->IsEqual() ||
- use.GetUser()->IsNotEqual())
- << *use.GetUser() << "@" << use.GetIndex();
- to_predicate.push_back({use.GetUser(), use.GetIndex()});
- }
- } else if (use.GetUser()->IsConstructorFence()) {
- LSE_VLOG << "User " << *use.GetUser() << " being moved to materialization!";
- constructor_fences.push_back({use.GetUser()->AsConstructorFence(), use.GetIndex()});
- } else {
- LSE_VLOG << "User " << *use.GetUser() << " not contained in cohort!";
- to_remove.push_back(use.GetUser());
- }
- }
- DCHECK_EQ(
- to_replace.size() + to_remove.size() + constructor_fences.size() + to_predicate.size(),
- size);
- }
-
- void GenerateMaterializationValueFromPredecessorsDirect(
- HBasicBlock* blk, const ScopedArenaVector<HInstruction*>& pred_vals) {
- DCHECK(!pred_vals.empty());
- bool all_equal = std::all_of(pred_vals.begin() + 1, pred_vals.end(), [&](HInstruction* val) {
- return val == pred_vals.front();
- });
- if (LIKELY(all_equal)) {
- AddMaterialization(blk, pred_vals.front());
- } else {
- // Make a PHI for the predecessors.
- HPhi* phi = new (GetGraph()->GetAllocator()) HPhi(
- GetGraph()->GetAllocator(), kNoRegNumber, pred_vals.size(), DataType::Type::kReference);
- for (const auto& [ins, off] : ZipCount(MakeIterationRange(pred_vals))) {
- phi->SetRawInputAt(off, ins);
- }
- blk->AddPhi(phi);
- AddMaterialization(blk, phi);
- }
- }
-
- HGraph* GetGraph() const {
- return helper_->GetGraph();
- }
-
- HNewInstance* new_instance_;
- PartialLoadStoreEliminationHelper* helper_;
- ArenaBitVector heap_locs_;
- ScopedArenaVector<HInstruction*> materializations_;
- const HeapLocationCollector& collector_;
- const ExecutionSubgraph* subgraph_;
- };
-
- ArrayRef<HeapReferenceData> GetHeapRefs() {
- return ArrayRef<HeapReferenceData>(heap_refs_);
- }
-
- bool IsMaterializationBlock(HBasicBlock* blk) const {
- return blk->GetBlockId() >= first_materialization_block_id_;
- }
-
- HBasicBlock* GetOrCreateMaterializationBlock(HBasicBlock* entry, size_t pred_num) {
- size_t idx = GetMaterializationBlockIndex(entry, pred_num);
- HBasicBlock* blk = materialization_blocks_[idx];
- if (blk == nullptr) {
- blk = new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph());
- GetGraph()->AddBlock(blk);
- LSE_VLOG << "creating materialization block " << blk->GetBlockId() << " on edge "
- << entry->GetPredecessors()[pred_num]->GetBlockId() << "->" << entry->GetBlockId();
- blk->AddInstruction(new (GetGraph()->GetAllocator()) HGoto());
- materialization_blocks_[idx] = blk;
- }
- return blk;
- }
-
- HBasicBlock* GetMaterializationBlock(HBasicBlock* entry, size_t pred_num) {
- HBasicBlock* out = materialization_blocks_[GetMaterializationBlockIndex(entry, pred_num)];
- DCHECK(out != nullptr) << "No materialization block for edge " << entry->GetBlockId() << "->"
- << entry->GetPredecessors()[pred_num]->GetBlockId();
- return out;
- }
-
- IterationRange<ArenaVector<HBasicBlock*>::const_iterator> IterateMaterializationBlocks() {
- return MakeIterationRange(GetGraph()->GetBlocks().begin() + first_materialization_block_id_,
- GetGraph()->GetBlocks().end());
- }
-
- void FixupPartialObjectUsers() {
- for (PartialLoadStoreEliminationHelper::HeapReferenceData& ref_data : GetHeapRefs()) {
- // Use the materialized instances to replace original instance
- ref_data.FixupUses(/*first_pass=*/true);
- CHECK(ref_data.OriginalNewInstance()->GetUses().empty())
- << ref_data.OriginalNewInstance()->GetUses() << ", "
- << ref_data.OriginalNewInstance()->GetEnvUses();
- }
- // This can cause new uses to be created due to the creation of phis/pred-get defaults
- for (PartialLoadStoreEliminationHelper::HeapReferenceData& ref_data : GetHeapRefs()) {
- // Only need to handle new phis/pred-get defaults. DCHECK that's all we find.
- ref_data.FixupUses(/*first_pass=*/false);
- CHECK(ref_data.OriginalNewInstance()->GetUses().empty())
- << ref_data.OriginalNewInstance()->GetUses() << ", "
- << ref_data.OriginalNewInstance()->GetEnvUses();
- }
- }
-
- // Finds the first block which either is or dominates the given block which is
- // not a materialization block
- HBasicBlock* FindDominatingNonMaterializationBlock(HBasicBlock* blk) {
- if (LIKELY(!IsMaterializationBlock(blk))) {
- // Not a materialization block so itself.
- return blk;
- } else if (blk->GetNumberOfPredecessors() != 0) {
- // We're far enough along that the materialization blocks have been
- // inserted into the graph so no need to go searching.
- return blk->GetSinglePredecessor();
- }
- // Search through the materialization blocks to find where it will be
- // inserted.
- for (auto [mat, idx] : ZipCount(MakeIterationRange(materialization_blocks_))) {
- if (mat == blk) {
- size_t cur_pred_idx = idx % max_preds_per_block_;
- HBasicBlock* entry = GetGraph()->GetBlocks()[idx / max_preds_per_block_];
- return entry->GetPredecessors()[cur_pred_idx];
- }
- }
- LOG(FATAL) << "Unable to find materialization block position for " << blk->GetBlockId() << "!";
- return nullptr;
- }
-
- void InsertMaterializationBlocks() {
- for (auto [mat, idx] : ZipCount(MakeIterationRange(materialization_blocks_))) {
- if (mat == nullptr) {
- continue;
- }
- size_t cur_pred_idx = idx % max_preds_per_block_;
- HBasicBlock* entry = GetGraph()->GetBlocks()[idx / max_preds_per_block_];
- HBasicBlock* pred = entry->GetPredecessors()[cur_pred_idx];
- mat->InsertBetween(pred, entry);
- LSE_VLOG << "Adding materialization block " << mat->GetBlockId() << " on edge "
- << pred->GetBlockId() << "->" << entry->GetBlockId();
- }
- }
-
- // Replace any env-uses remaining of the partial singletons with the
- // appropriate phis and remove the instructions.
- void RemoveReplacedInstructions() {
- for (HeapReferenceData& ref_data : GetHeapRefs()) {
- CHECK(ref_data.OriginalNewInstance()->GetUses().empty())
- << ref_data.OriginalNewInstance()->GetUses() << ", "
- << ref_data.OriginalNewInstance()->GetEnvUses()
- << " inst is: " << ref_data.OriginalNewInstance();
- const auto& env_uses = ref_data.OriginalNewInstance()->GetEnvUses();
- while (!env_uses.empty()) {
- const HUseListNode<HEnvironment*>& use = env_uses.front();
- HInstruction* merged_inst =
- ref_data.GetMaterialization(use.GetUser()->GetHolder()->GetBlock());
- LSE_VLOG << "Replacing env use of " << *use.GetUser()->GetHolder() << "@" << use.GetIndex()
- << " with " << *merged_inst;
- use.GetUser()->ReplaceInput(merged_inst, use.GetIndex());
- }
- ref_data.OriginalNewInstance()->GetBlock()->RemoveInstruction(ref_data.OriginalNewInstance());
- }
- }
-
- // We need to make sure any allocations dominate their environment uses.
- // Technically we could probably remove the env-uses and be fine but this is easy.
- void ReorderMaterializationsForEnvDominance() {
- for (HBasicBlock* blk : IterateMaterializationBlocks()) {
- ScopedArenaAllocator alloc(alloc_->GetArenaStack());
- ArenaBitVector still_unsorted(
- &alloc, GetGraph()->GetCurrentInstructionId(), false, kArenaAllocLSE);
- // This is guaranteed to be very short (since we will abandon LSE if there
- // are >= kMaxNumberOfHeapLocations (32) heap locations so that is the
- // absolute maximum size this list can be) so doing a selection sort is
- // fine. This avoids the need to do a complicated recursive check to
- // ensure transitivity for std::sort.
- ScopedArenaVector<HNewInstance*> materializations(alloc.Adapter(kArenaAllocLSE));
- materializations.reserve(GetHeapRefs().size());
- for (HInstruction* ins :
- MakeSTLInstructionIteratorRange(HInstructionIterator(blk->GetInstructions()))) {
- if (ins->IsNewInstance()) {
- materializations.push_back(ins->AsNewInstance());
- still_unsorted.SetBit(ins->GetId());
- }
- }
- using Iter = ScopedArenaVector<HNewInstance*>::iterator;
- Iter unsorted_start = materializations.begin();
- Iter unsorted_end = materializations.end();
- // selection sort. Required since the only check we can easily perform a
- // is-before-all-unsorted check.
- while (unsorted_start != unsorted_end) {
- bool found_instruction = false;
- for (Iter candidate = unsorted_start; candidate != unsorted_end; ++candidate) {
- HNewInstance* ni = *candidate;
- if (std::none_of(ni->GetAllEnvironments().cbegin(),
- ni->GetAllEnvironments().cend(),
- [&](const HEnvironment* env) {
- return std::any_of(
- env->GetEnvInputs().cbegin(),
- env->GetEnvInputs().cend(),
- [&](const HInstruction* env_element) {
- return env_element != nullptr &&
- still_unsorted.IsBitSet(env_element->GetId());
- });
- })) {
- still_unsorted.ClearBit(ni->GetId());
- std::swap(*unsorted_start, *candidate);
- ++unsorted_start;
- found_instruction = true;
- break;
- }
- }
- CHECK(found_instruction) << "Unable to select next materialization instruction."
- << " Environments have a dependency loop!";
- }
- // Reverse so we as we prepend them we end up with the correct order.
- auto reverse_iter = MakeIterationRange(materializations.rbegin(), materializations.rend());
- for (HNewInstance* ins : reverse_iter) {
- if (blk->GetFirstInstruction() != ins) {
- // Don't do checks since that makes sure the move is safe WRT
- // ins->CanBeMoved which for NewInstance is false.
- ins->MoveBefore(blk->GetFirstInstruction(), /*do_checks=*/false);
- }
- }
- }
- }
-
- private:
- void CollectInterestingHeapRefs() {
- // Get all the partials we need to move around.
- for (size_t i = 0; i < lse_->heap_location_collector_.GetNumberOfHeapLocations(); ++i) {
- ReferenceInfo* ri = lse_->heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
- if (ri->IsPartialSingleton() &&
- ri->GetReference()->GetBlock() != nullptr &&
- ri->GetNoEscapeSubgraph()->ContainsBlock(ri->GetReference()->GetBlock())) {
- RecordHeapRefField(ri->GetReference()->AsNewInstance(), i);
- }
- }
- }
-
- void RecordHeapRefField(HNewInstance* ni, size_t loc) {
- DCHECK(ni != nullptr);
- // This is likely to be very short so just do a linear search.
- auto it = std::find_if(heap_refs_.begin(), heap_refs_.end(), [&](HeapReferenceData& data) {
- return data.OriginalNewInstance() == ni;
- });
- HeapReferenceData& cur_ref =
- (it == heap_refs_.end())
- ? heap_refs_.emplace_back(this,
- ni,
- lse_->heap_location_collector_.GetHeapLocation(loc)
- ->GetReferenceInfo()
- ->GetNoEscapeSubgraph(),
- alloc_)
- : *it;
- cur_ref.AddHeapLocation(loc);
- }
-
-
- void NotifyNewMaterialization(HInstruction* ins) {
- if (ins->IsPhi()) {
- new_ref_phis_.push_back(ins->AsPhi());
- }
- }
-
- size_t GetMaterializationBlockIndex(HBasicBlock* blk, size_t pred_num) const {
- DCHECK_LT(blk->GetBlockId(), first_materialization_block_id_)
- << "block is a materialization block!";
- DCHECK_LT(pred_num, max_preds_per_block_);
- return blk->GetBlockId() * max_preds_per_block_ + pred_num;
- }
-
- HGraph* GetGraph() const {
- return lse_->GetGraph();
- }
-
- LSEVisitor* lse_;
- ScopedArenaAllocator* alloc_;
- ScopedArenaVector<HInstruction*> new_ref_phis_;
- ScopedArenaVector<HeapReferenceData> heap_refs_;
- size_t max_preds_per_block_;
- // An array of (# of non-materialization blocks) * max_preds_per_block
- // arranged in block-id major order. Since we can only have at most one
- // materialization block on each edge this is the maximum possible number of
- // materialization blocks.
- ScopedArenaVector<HBasicBlock*> materialization_blocks_;
- size_t first_materialization_block_id_;
-
- friend void LSEVisitor::MovePartialEscapes();
-};
-
-// Work around c++ type checking annoyances with not being able to forward-declare inner types.
-class HeapRefHolder
- : public std::reference_wrapper<PartialLoadStoreEliminationHelper::HeapReferenceData> {};
-
-HInstruction* LSEVisitor::SetupPartialMaterialization(PartialLoadStoreEliminationHelper& helper,
- HeapRefHolder&& holder,
- size_t pred_idx,
- HBasicBlock* entry) {
- PartialLoadStoreEliminationHelper::HeapReferenceData& ref_data = holder.get();
- HBasicBlock* old_pred = entry->GetPredecessors()[pred_idx];
- HInstruction* new_inst = ref_data.OriginalNewInstance();
- if (UNLIKELY(!new_inst->GetBlock()->Dominates(entry))) {
- LSE_VLOG << "Initial materialization in non-dominating block " << entry->GetBlockId()
- << " is null!";
- return GetGraph()->GetNullConstant();
- }
- HBasicBlock* bb = helper.GetOrCreateMaterializationBlock(entry, pred_idx);
- CHECK(bb != nullptr) << "entry " << entry->GetBlockId() << " -> " << old_pred->GetBlockId();
- HNewInstance* repl_create = new_inst->Clone(GetGraph()->GetAllocator())->AsNewInstance();
- repl_create->SetPartialMaterialization();
- bb->InsertInstructionBefore(repl_create, bb->GetLastInstruction());
- repl_create->CopyEnvironmentFrom(new_inst->GetEnvironment());
- MaybeRecordStat(stats_, MethodCompilationStat::kPartialAllocationMoved);
- LSE_VLOG << "In blk " << bb->GetBlockId() << " initial materialization is " << *repl_create;
- ref_data.AddMaterialization(bb, repl_create);
- const FieldInfo* info = nullptr;
- for (const HeapLocation* loc : ref_data.IterateLocations()) {
- size_t loc_off = heap_location_collector_.GetHeapLocationIndex(loc);
- info = field_infos_[loc_off];
- DCHECK(loc->GetIndex() == nullptr);
- Value value = ReplacementOrValue(heap_values_for_[old_pred->GetBlockId()][loc_off].value);
- if (value.NeedsLoopPhi() || value.IsMergedUnknown()) {
- Value repl = phi_placeholder_replacements_[PhiPlaceholderIndex(value.GetPhiPlaceholder())];
- DCHECK(repl.IsDefault() || repl.IsInvalid() || repl.IsInstruction())
- << repl << " from " << value << " pred is " << old_pred->GetBlockId();
- if (!repl.IsInvalid()) {
- value = repl;
- } else {
- FullyMaterializePhi(value.GetPhiPlaceholder(), info->GetFieldType());
- value = phi_placeholder_replacements_[PhiPlaceholderIndex(value.GetPhiPlaceholder())];
- }
- } else if (value.NeedsNonLoopPhi()) {
- Value repl = phi_placeholder_replacements_[PhiPlaceholderIndex(value.GetPhiPlaceholder())];
- DCHECK(repl.IsDefault() || repl.IsInvalid() || repl.IsInstruction())
- << repl << " from " << value << " pred is " << old_pred->GetBlockId();
- if (!repl.IsInvalid()) {
- value = repl;
- } else {
- MaterializeNonLoopPhis(value.GetPhiPlaceholder(), info->GetFieldType());
- value = phi_placeholder_replacements_[PhiPlaceholderIndex(value.GetPhiPlaceholder())];
- }
- }
- DCHECK(value.IsDefault() || value.IsInstruction())
- << GetGraph()->PrettyMethod() << ": " << value;
-
- if (!value.IsDefault() &&
- // shadow$_klass_ doesn't need to be manually initialized.
- MemberOffset(loc->GetOffset()) != mirror::Object::ClassOffset()) {
- CHECK(info != nullptr);
- HInstruction* set_value =
- new (GetGraph()->GetAllocator()) HInstanceFieldSet(repl_create,
- value.GetInstruction(),
- field_infos_[loc_off]->GetField(),
- loc->GetType(),
- MemberOffset(loc->GetOffset()),
- false,
- field_infos_[loc_off]->GetFieldIndex(),
- loc->GetDeclaringClassDefIndex(),
- field_infos_[loc_off]->GetDexFile(),
- 0u);
- bb->InsertInstructionAfter(set_value, repl_create);
- LSE_VLOG << "Adding " << *set_value << " for materialization setup!";
- }
- }
- return repl_create;
}
-HInstruction* LSEVisitor::GetPartialValueAt(HNewInstance* orig_new_inst, HInstruction* read) {
- size_t loc = heap_location_collector_.GetFieldHeapLocation(orig_new_inst, &read->GetFieldInfo());
- Value pred = ReplacementOrValue(intermediate_values_.find(read)->second);
- LSE_VLOG << "using " << pred << " as default value for " << *read;
- if (pred.IsInstruction()) {
- return pred.GetInstruction();
- } else if (pred.IsMergedUnknown() || pred.NeedsPhi()) {
- FullyMaterializePhi(pred.GetPhiPlaceholder(),
- heap_location_collector_.GetHeapLocation(loc)->GetType());
- HInstruction* res = Replacement(pred).GetInstruction();
- LSE_VLOG << pred << " materialized to " << res->DumpWithArgs();
- return res;
- } else if (pred.IsDefault()) {
- HInstruction* res = GetDefaultValue(read->GetType());
- LSE_VLOG << pred << " materialized to " << res->DumpWithArgs();
- return res;
- }
- LOG(FATAL) << "Unable to find unescaped value at " << read->DumpWithArgs()
- << "! This should be impossible! Value is " << pred;
- UNREACHABLE();
-}
-
-void LSEVisitor::MovePartialEscapes() {
- if (!ShouldPerformPartialLSE()) {
- return;
- }
-
- ScopedArenaAllocator saa(allocator_.GetArenaStack());
- PartialLoadStoreEliminationHelper helper(this, &saa);
-
- // Since for PHIs we now will have more information (since we know the object
- // hasn't escaped) we need to clear the old phi-replacements where we weren't
- // able to find the value.
- PrepareForPartialPhiComputation();
-
- for (PartialLoadStoreEliminationHelper::HeapReferenceData& ref_data : helper.GetHeapRefs()) {
- LSE_VLOG << "Creating materializations for " << *ref_data.OriginalNewInstance();
- // Setup entry and exit blocks.
- for (const auto& excluded_cohort : ref_data.GetNoEscapeSubgraph()->GetExcludedCohorts()) {
- // Setup materialization blocks.
- for (HBasicBlock* entry : excluded_cohort.EntryBlocksReversePostOrder()) {
- // Setup entries.
- // TODO Assuming we correctly break critical edges every entry block
- // must have only a single predecessor so we could just put all this
- // stuff in there. OTOH simplifier can do it for us and this is simpler
- // to implement - giving clean separation between the original graph and
- // materialization blocks - so for now we might as well have these new
- // blocks.
- ScopedArenaAllocator pred_alloc(saa.GetArenaStack());
- ScopedArenaVector<HInstruction*> pred_vals(pred_alloc.Adapter(kArenaAllocLSE));
- pred_vals.reserve(entry->GetNumberOfPredecessors());
- for (const auto& [pred, pred_idx] :
- ZipCount(MakeIterationRange(entry->GetPredecessors()))) {
- DCHECK(!helper.IsMaterializationBlock(pred));
- if (excluded_cohort.IsEntryBlock(pred)) {
- pred_vals.push_back(ref_data.GetMaterialization(pred));
- continue;
- } else {
- pred_vals.push_back(SetupPartialMaterialization(helper, {ref_data}, pred_idx, entry));
- }
- }
- ref_data.GenerateMaterializationValueFromPredecessorsForEntry(entry, pred_vals);
- }
-
- // Setup exit block heap-values for later phi-generation.
- for (HBasicBlock* exit : excluded_cohort.ExitBlocks()) {
- // mark every exit of cohorts as having a value so we can easily
- // materialize the PHIs.
- // TODO By setting this we can easily use the normal MaterializeLoopPhis
- // (via FullyMaterializePhis) in order to generate the default-values
- // for predicated-gets. This has the unfortunate side effect of creating
- // somewhat more phis than are really needed (in some cases). We really
- // should try to eventually know that we can lower these PHIs to only
- // the non-escaping value in cases where it is possible. Currently this
- // is done to some extent in instruction_simplifier but we have more
- // information here to do the right thing.
- for (const HeapLocation* loc : ref_data.IterateLocations()) {
- size_t loc_off = heap_location_collector_.GetHeapLocationIndex(loc);
- // This Value::Default() is only used to fill in PHIs used as the
- // default value for PredicatedInstanceFieldGets. The actual value
- // stored there is meaningless since the Predicated-iget will use the
- // actual field value instead on these paths.
- heap_values_for_[exit->GetBlockId()][loc_off].value = Value::Default();
- }
- }
- }
-
- // string materialization through the graph.
- // // Visit RPO to PHI the materialized object through the cohort.
- for (HBasicBlock* blk : GetGraph()->GetReversePostOrder()) {
- // NB This doesn't include materialization blocks.
- DCHECK(!helper.IsMaterializationBlock(blk))
- << "Materialization blocks should not be in RPO yet.";
- if (ref_data.HasMaterialization(blk)) {
- continue;
- } else if (ref_data.BeforeAllEscapes(blk)) {
- ref_data.AddMaterialization(blk, GetGraph()->GetNullConstant());
- continue;
- } else {
- ref_data.GenerateMaterializationValueFromPredecessors(blk);
- }
- }
- }
-
- // Once we've generated all the materializations we can update the users.
- helper.FixupPartialObjectUsers();
-
- // Actually put materialization blocks into the graph
- helper.InsertMaterializationBlocks();
-
- // Get rid of the original instructions.
- helper.RemoveReplacedInstructions();
-
- // Ensure everything is ordered correctly in the materialization blocks. This
- // involves moving every NewInstance to the top and ordering them so that any
- // required env-uses are correctly ordered.
- helper.ReorderMaterializationsForEnvDominance();
-}
void LSEVisitor::FinishFullLSE() {
// Remove recorded load instructions that should be eliminated.
@@ -4006,9 +2847,8 @@ class LSEVisitorWrapper : public DeletableArenaObject<kArenaAllocLSE> {
public:
LSEVisitorWrapper(HGraph* graph,
const HeapLocationCollector& heap_location_collector,
- bool perform_partial_lse,
OptimizingCompilerStats* stats)
- : lse_visitor_(graph, heap_location_collector, perform_partial_lse, stats) {}
+ : lse_visitor_(graph, heap_location_collector, stats) {}
void Run() {
lse_visitor_.Run();
@@ -4018,7 +2858,7 @@ class LSEVisitorWrapper : public DeletableArenaObject<kArenaAllocLSE> {
LSEVisitor lse_visitor_;
};
-bool LoadStoreElimination::Run(bool enable_partial_lse) {
+bool LoadStoreElimination::Run() {
if (graph_->IsDebuggable()) {
// Debugger may set heap values or trigger deoptimization of callers.
// Skip this optimization.
@@ -4031,11 +2871,7 @@ bool LoadStoreElimination::Run(bool enable_partial_lse) {
// O(1) though.
graph_->ComputeReachabilityInformation();
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_,
- stats_,
- &allocator,
- enable_partial_lse ? LoadStoreAnalysisType::kFull
- : LoadStoreAnalysisType::kBasic);
+ LoadStoreAnalysis lsa(graph_, stats_, &allocator);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
if (heap_location_collector.GetNumberOfHeapLocations() == 0) {
@@ -4050,8 +2886,8 @@ bool LoadStoreElimination::Run(bool enable_partial_lse) {
return false;
}
- std::unique_ptr<LSEVisitorWrapper> lse_visitor(new (&allocator) LSEVisitorWrapper(
- graph_, heap_location_collector, enable_partial_lse, stats_));
+ std::unique_ptr<LSEVisitorWrapper> lse_visitor(
+ new (&allocator) LSEVisitorWrapper(graph_, heap_location_collector, stats_));
lse_visitor->Run();
return true;
}
diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h
index 42de803ebd..e77168547d 100644
--- a/compiler/optimizing/load_store_elimination.h
+++ b/compiler/optimizing/load_store_elimination.h
@@ -26,10 +26,6 @@ class SideEffectsAnalysis;
class LoadStoreElimination : public HOptimization {
public:
- // Whether or not we should attempt partial Load-store-elimination which
- // requires additional blocks and predicated instructions.
- static constexpr bool kEnablePartialLSE = false;
-
// Controls whether to enable VLOG(compiler) logs explaining the transforms taking place.
static constexpr bool kVerboseLoggingMode = false;
@@ -38,12 +34,7 @@ class LoadStoreElimination : public HOptimization {
const char* name = kLoadStoreEliminationPassName)
: HOptimization(graph, name, stats) {}
- bool Run() override {
- return Run(kEnablePartialLSE);
- }
-
- // Exposed for testing.
- bool Run(bool enable_partial_lse);
+ bool Run();
static constexpr const char* kLoadStoreEliminationPassName = "load_store_elimination";
diff --git a/compiler/optimizing/load_store_elimination_test.cc b/compiler/optimizing/load_store_elimination_test.cc
index eb0711343d..0775051eb4 100644
--- a/compiler/optimizing/load_store_elimination_test.cc
+++ b/compiler/optimizing/load_store_elimination_test.cc
@@ -68,47 +68,27 @@ class LoadStoreEliminationTestBase : public SuperTest, public OptimizingUnitTest
}
}
- void PerformLSE(bool with_partial = true) {
+ void PerformLSE() {
graph_->BuildDominatorTree();
LoadStoreElimination lse(graph_, /*stats=*/nullptr);
- lse.Run(with_partial);
+ lse.Run();
std::ostringstream oss;
EXPECT_TRUE(CheckGraph(oss)) << oss.str();
}
- void PerformLSEWithPartial(const AdjacencyListGraph& blks) {
- // PerformLSE expects this to be empty.
+ void PerformLSE(const AdjacencyListGraph& blks) {
+ // PerformLSE expects this to be empty, and the creation of
+ // an `AdjacencyListGraph` computes it.
graph_->ClearDominanceInformation();
if (kDebugLseTests) {
LOG(INFO) << "Pre LSE " << blks;
}
- PerformLSE(/*with_partial=*/ true);
+ PerformLSE();
if (kDebugLseTests) {
LOG(INFO) << "Post LSE " << blks;
}
}
- void PerformLSENoPartial(const AdjacencyListGraph& blks) {
- // PerformLSE expects this to be empty.
- graph_->ClearDominanceInformation();
- if (kDebugLseTests) {
- LOG(INFO) << "Pre LSE " << blks;
- }
- PerformLSE(/*with_partial=*/ false);
- if (kDebugLseTests) {
- LOG(INFO) << "Post LSE " << blks;
- }
- }
-
- void PerformSimplifications(const AdjacencyListGraph& blks) {
- InstructionSimplifier simp(graph_, /*codegen=*/nullptr);
- simp.Run();
-
- if (kDebugLseTests) {
- LOG(INFO) << "Post simplification " << blks;
- }
- }
-
// Create instructions shared among tests.
void CreateEntryBlockInstructions() {
HInstruction* c1 = graph_->GetIntConstant(1);
@@ -327,190 +307,6 @@ std::ostream& operator<<(std::ostream& os, const TestOrder& ord) {
}
}
-class OrderDependentTestGroup
- : public LoadStoreEliminationTestBase<CommonCompilerTestWithParam<TestOrder>> {};
-
-// Various configs we can use for testing. Currently used in PartialComparison tests.
-struct PartialComparisonKind {
- public:
- enum class Type : uint8_t { kEquals, kNotEquals };
- enum class Target : uint8_t { kNull, kValue, kSelf };
- enum class Position : uint8_t { kLeft, kRight };
-
- const Type type_;
- const Target target_;
- const Position position_;
-
- bool IsDefinitelyFalse() const {
- return !IsPossiblyTrue();
- }
- bool IsPossiblyFalse() const {
- return !IsDefinitelyTrue();
- }
- bool IsDefinitelyTrue() const {
- if (target_ == Target::kSelf) {
- return type_ == Type::kEquals;
- } else if (target_ == Target::kNull) {
- return type_ == Type::kNotEquals;
- } else {
- return false;
- }
- }
- bool IsPossiblyTrue() const {
- if (target_ == Target::kSelf) {
- return type_ == Type::kEquals;
- } else if (target_ == Target::kNull) {
- return type_ == Type::kNotEquals;
- } else {
- return true;
- }
- }
- std::ostream& Dump(std::ostream& os) const {
- os << "PartialComparisonKind{" << (type_ == Type::kEquals ? "kEquals" : "kNotEquals") << ", "
- << (target_ == Target::kNull ? "kNull" : (target_ == Target::kSelf ? "kSelf" : "kValue"))
- << ", " << (position_ == Position::kLeft ? "kLeft" : "kRight") << "}";
- return os;
- }
-};
-
-std::ostream& operator<<(std::ostream& os, const PartialComparisonKind& comp) {
- return comp.Dump(os);
-}
-
-class PartialComparisonTestGroup
- : public LoadStoreEliminationTestBase<CommonCompilerTestWithParam<PartialComparisonKind>> {
- public:
- enum class ComparisonPlacement {
- kBeforeEscape,
- kInEscape,
- kAfterEscape,
- };
- void CheckFinalInstruction(HInstruction* ins, ComparisonPlacement placement) {
- using Target = PartialComparisonKind::Target;
- using Type = PartialComparisonKind::Type;
- using Position = PartialComparisonKind::Position;
- PartialComparisonKind kind = GetParam();
- if (ins->IsIntConstant()) {
- if (kind.IsDefinitelyTrue()) {
- EXPECT_TRUE(ins->AsIntConstant()->IsTrue()) << kind << " " << *ins;
- } else if (kind.IsDefinitelyFalse()) {
- EXPECT_TRUE(ins->AsIntConstant()->IsFalse()) << kind << " " << *ins;
- } else {
- EXPECT_EQ(placement, ComparisonPlacement::kBeforeEscape);
- EXPECT_EQ(kind.target_, Target::kValue);
- // We are before escape so value is not the object
- if (kind.type_ == Type::kEquals) {
- EXPECT_TRUE(ins->AsIntConstant()->IsFalse()) << kind << " " << *ins;
- } else {
- EXPECT_TRUE(ins->AsIntConstant()->IsTrue()) << kind << " " << *ins;
- }
- }
- return;
- }
- EXPECT_NE(placement, ComparisonPlacement::kBeforeEscape)
- << "For comparisons before escape we should always be able to transform into a constant."
- << " Instead we got:" << std::endl << ins->DumpWithArgs();
- if (placement == ComparisonPlacement::kInEscape) {
- // Should be the same type.
- ASSERT_TRUE(ins->IsEqual() || ins->IsNotEqual()) << *ins;
- HInstruction* other = kind.position_ == Position::kLeft ? ins->AsBinaryOperation()->GetRight()
- : ins->AsBinaryOperation()->GetLeft();
- if (kind.target_ == Target::kSelf) {
- EXPECT_INS_EQ(ins->AsBinaryOperation()->GetLeft(), ins->AsBinaryOperation()->GetRight())
- << " ins is: " << *ins;
- } else if (kind.target_ == Target::kNull) {
- EXPECT_INS_EQ(other, graph_->GetNullConstant()) << " ins is: " << *ins;
- } else {
- EXPECT_TRUE(other->IsStaticFieldGet()) << " ins is: " << *ins;
- }
- if (kind.type_ == Type::kEquals) {
- EXPECT_TRUE(ins->IsEqual()) << *ins;
- } else {
- EXPECT_TRUE(ins->IsNotEqual()) << *ins;
- }
- } else {
- ASSERT_EQ(placement, ComparisonPlacement::kAfterEscape);
- if (kind.type_ == Type::kEquals) {
- // obj == <anything> can only be true if (1) it's obj == obj or (2) obj has escaped.
- ASSERT_TRUE(ins->IsAnd()) << ins->DumpWithArgs();
- EXPECT_TRUE(ins->InputAt(1)->IsEqual()) << ins->DumpWithArgs();
- } else {
- // obj != <anything> is true if (2) obj has escaped.
- ASSERT_TRUE(ins->IsOr()) << ins->DumpWithArgs();
- EXPECT_TRUE(ins->InputAt(1)->IsNotEqual()) << ins->DumpWithArgs();
- }
- // Check the first part of AND is the obj-has-escaped
- ASSERT_TRUE(ins->InputAt(0)->IsNotEqual()) << ins->DumpWithArgs();
- EXPECT_TRUE(ins->InputAt(0)->InputAt(0)->IsPhi()) << ins->DumpWithArgs();
- EXPECT_TRUE(ins->InputAt(0)->InputAt(1)->IsNullConstant()) << ins->DumpWithArgs();
- // Check the second part of AND is the eq other
- EXPECT_INS_EQ(ins->InputAt(1)->InputAt(kind.position_ == Position::kLeft ? 0 : 1),
- ins->InputAt(0)->InputAt(0))
- << ins->DumpWithArgs();
- }
- }
-
- struct ComparisonInstructions {
- void AddSetup(HBasicBlock* blk) const {
- for (HInstruction* i : setup_instructions_) {
- blk->AddInstruction(i);
- }
- }
-
- void AddEnvironment(HEnvironment* env) const {
- for (HInstruction* i : setup_instructions_) {
- if (i->NeedsEnvironment()) {
- i->CopyEnvironmentFrom(env);
- }
- }
- }
-
- const std::vector<HInstruction*> setup_instructions_;
- HInstruction* const cmp_;
- };
-
- ComparisonInstructions GetComparisonInstructions(HInstruction* partial) {
- PartialComparisonKind kind = GetParam();
- std::vector<HInstruction*> setup;
- HInstruction* target_other;
- switch (kind.target_) {
- case PartialComparisonKind::Target::kSelf:
- target_other = partial;
- break;
- case PartialComparisonKind::Target::kNull:
- target_other = graph_->GetNullConstant();
- break;
- case PartialComparisonKind::Target::kValue: {
- HInstruction* cls = MakeClassLoad();
- HInstruction* static_read =
- new (GetAllocator()) HStaticFieldGet(cls,
- /* field= */ nullptr,
- DataType::Type::kReference,
- /* field_offset= */ MemberOffset(40),
- /* is_volatile= */ false,
- /* field_idx= */ 0,
- /* declaring_class_def_index= */ 0,
- graph_->GetDexFile(),
- /* dex_pc= */ 0);
- setup.push_back(cls);
- setup.push_back(static_read);
- target_other = static_read;
- break;
- }
- }
- HInstruction* target_left;
- HInstruction* target_right;
- std::tie(target_left, target_right) = kind.position_ == PartialComparisonKind::Position::kLeft
- ? std::pair{partial, target_other}
- : std::pair{target_other, partial};
- HInstruction* cmp =
- kind.type_ == PartialComparisonKind::Type::kEquals
- ? static_cast<HInstruction*>(new (GetAllocator()) HEqual(target_left, target_right))
- : static_cast<HInstruction*>(new (GetAllocator()) HNotEqual(target_left, target_right));
- return {setup, cmp};
- }
-};
-
TEST_F(LoadStoreEliminationTest, ArrayGetSetElimination) {
CreateTestControlFlowGraph();
@@ -2083,7 +1879,7 @@ TEST_F(LoadStoreEliminationTest, PartialUnknownMerge) {
SetupExit(exit);
- PerformLSENoPartial(blks);
+ PerformLSE(blks);
EXPECT_INS_RETAINED(read_bottom);
EXPECT_INS_RETAINED(write_c1);
@@ -2098,84 +1894,6 @@ TEST_F(LoadStoreEliminationTest, PartialUnknownMerge) {
// // LEFT
// obj.field = 1;
// call_func(obj);
-// foo_r = obj.field
-// } else {
-// // TO BE ELIMINATED
-// obj.field = 2;
-// // RIGHT
-// // TO BE ELIMINATED
-// foo_l = obj.field;
-// }
-// EXIT
-// return PHI(foo_l, foo_r)
-TEST_F(LoadStoreEliminationTest, PartialLoadElimination) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit_REAL",
- { { "entry", "left" },
- { "entry", "right" },
- { "left", "exit" },
- { "right", "exit" },
- { "exit", "exit_REAL" } }));
- HBasicBlock* entry = blks.Get("entry");
- HBasicBlock* left = blks.Get("left");
- HBasicBlock* right = blks.Get("right");
- HBasicBlock* exit = blks.Get("exit");
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* c2 = graph_->GetIntConstant(2);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* read_left = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(16));
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(write_left);
- left->AddInstruction(call_left);
- left->AddInstruction(read_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(16));
- HInstruction* read_right = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(16));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(read_right);
- right->AddInstruction(goto_right);
-
- HPhi* phi_final = MakePhi({read_left, read_right});
- HInstruction* return_exit = new (GetAllocator()) HReturn(phi_final);
- exit->AddPhi(phi_final);
- exit->AddInstruction(return_exit);
-
- // PerformLSE expects this to be empty.
- graph_->ClearDominanceInformation();
- PerformLSE();
-
- ASSERT_TRUE(IsRemoved(read_right));
- ASSERT_FALSE(IsRemoved(read_left));
- ASSERT_FALSE(IsRemoved(phi_final));
- ASSERT_TRUE(phi_final->GetInputs()[1] == c2);
- ASSERT_TRUE(phi_final->GetInputs()[0] == read_left);
- ASSERT_TRUE(IsRemoved(write_right));
-}
-
-// // ENTRY
-// obj = new Obj();
-// if (parameter_value) {
-// // LEFT
-// obj.field = 1;
-// call_func(obj);
// // We don't know what obj.field is now we aren't able to eliminate the read below!
// } else {
// // DO NOT ELIMINATE
@@ -2231,7 +1949,7 @@ TEST_F(LoadStoreEliminationTest, PartialLoadPreserved) {
exit->AddInstruction(read_bottom);
exit->AddInstruction(return_exit);
- PerformLSENoPartial(blks);
+ PerformLSE(blks);
EXPECT_INS_RETAINED(read_bottom) << *read_bottom;
EXPECT_INS_RETAINED(write_right) << *write_right;
@@ -2322,7 +2040,7 @@ TEST_F(LoadStoreEliminationTest, PartialLoadPreserved2) {
exit->AddInstruction(read_bottom);
exit->AddInstruction(return_exit);
- PerformLSENoPartial(blks);
+ PerformLSE(blks);
EXPECT_INS_RETAINED(read_bottom);
EXPECT_INS_RETAINED(write_right_first);
@@ -2334,2090 +2052,6 @@ TEST_F(LoadStoreEliminationTest, PartialLoadPreserved2) {
// if (parameter_value) {
// // LEFT
// // DO NOT ELIMINATE
-// escape(obj);
-// obj.field = 1;
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// }
-// EXIT
-// ELIMINATE
-// return obj.field
-TEST_F(LoadStoreEliminationTest, PartialLoadElimination2) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* c2 = graph_->GetIntConstant(2);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(write_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- // PerformLSE expects this to be empty.
- graph_->ClearDominanceInformation();
- PerformLSE();
-
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_REMOVED(write_right);
- EXPECT_INS_RETAINED(write_left);
- EXPECT_INS_RETAINED(call_left);
-}
-
-template<typename Iter, typename Func>
-typename Iter::value_type FindOrNull(Iter begin, Iter end, Func func) {
- static_assert(std::is_pointer_v<typename Iter::value_type>);
- auto it = std::find_if(begin, end, func);
- if (it == end) {
- return nullptr;
- } else {
- return *it;
- }
-}
-
-// // ENTRY
-// Obj new_inst = new Obj();
-// new_inst.foo = 12;
-// Obj obj;
-// Obj out;
-// int first;
-// if (param0) {
-// // ESCAPE_ROUTE
-// if (param1) {
-// // LEFT_START
-// if (param2) {
-// // LEFT_LEFT
-// obj = new_inst;
-// } else {
-// // LEFT_RIGHT
-// obj = obj_param;
-// }
-// // LEFT_MERGE
-// // technically the phi is enough to cause an escape but might as well be
-// // thorough.
-// // obj = phi[new_inst, param]
-// escape(obj);
-// out = obj;
-// } else {
-// // RIGHT
-// out = obj_param;
-// }
-// // EXIT
-// // Can't do anything with this since we don't have good tracking for the heap-locations
-// // out = phi[param, phi[new_inst, param]]
-// first = out.foo
-// } else {
-// new_inst.foo = 15;
-// first = 13;
-// }
-// // first = phi[out.foo, 13]
-// return first + new_inst.foo;
-TEST_F(LoadStoreEliminationTest, PartialPhiPropagation) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "escape_route"},
- {"entry", "noescape_route"},
- {"escape_route", "left"},
- {"escape_route", "right"},
- {"left", "left_left"},
- {"left", "left_right"},
- {"left_left", "left_merge"},
- {"left_right", "left_merge"},
- {"left_merge", "escape_end"},
- {"right", "escape_end"},
- {"escape_end", "breturn"},
- {"noescape_route", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
- GET_BLOCK(left_left);
- GET_BLOCK(left_right);
- GET_BLOCK(left_merge);
- GET_BLOCK(escape_end);
- GET_BLOCK(escape_route);
- GET_BLOCK(noescape_route);
-#undef GET_BLOCK
- EnsurePredecessorOrder(escape_end, {left_merge, right});
- EnsurePredecessorOrder(left_merge, {left_left, left_right});
- EnsurePredecessorOrder(breturn, {escape_end, noescape_route});
- HInstruction* param0 = MakeParam(DataType::Type::kBool);
- HInstruction* param1 = MakeParam(DataType::Type::kBool);
- HInstruction* param2 = MakeParam(DataType::Type::kBool);
- HInstruction* obj_param = MakeParam(DataType::Type::kReference);
- HInstruction* c12 = graph_->GetIntConstant(12);
- HInstruction* c13 = graph_->GetIntConstant(13);
- HInstruction* c15 = graph_->GetIntConstant(15);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* store = MakeIFieldSet(new_inst, c12, MemberOffset(32));
- HInstruction* if_param0 = new (GetAllocator()) HIf(param0);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(store);
- entry->AddInstruction(if_param0);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* store_noescape = MakeIFieldSet(new_inst, c15, MemberOffset(32));
- noescape_route->AddInstruction(store_noescape);
- noescape_route->AddInstruction(new (GetAllocator()) HGoto());
-
- escape_route->AddInstruction(new (GetAllocator()) HIf(param1));
-
- HInstruction* if_left = new (GetAllocator()) HIf(param2);
- left->AddInstruction(if_left);
-
- HInstruction* goto_left_left = new (GetAllocator()) HGoto();
- left_left->AddInstruction(goto_left_left);
-
- HInstruction* goto_left_right = new (GetAllocator()) HGoto();
- left_right->AddInstruction(goto_left_right);
-
- HPhi* left_phi = MakePhi({obj_param, new_inst});
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { left_phi });
- HInstruction* goto_left_merge = new (GetAllocator()) HGoto();
- left_merge->AddPhi(left_phi);
- left_merge->AddInstruction(call_left);
- left_merge->AddInstruction(goto_left_merge);
- left_phi->SetCanBeNull(true);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(goto_right);
-
- HPhi* escape_end_phi = MakePhi({left_phi, obj_param});
- HInstruction* read_escape_end =
- MakeIFieldGet(escape_end_phi, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* goto_escape_end = new (GetAllocator()) HGoto();
- escape_end->AddPhi(escape_end_phi);
- escape_end->AddInstruction(read_escape_end);
- escape_end->AddInstruction(goto_escape_end);
-
- HPhi* return_phi = MakePhi({read_escape_end, c13});
- HInstruction* read_exit = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* add_exit = new (GetAllocator()) HAdd(DataType::Type::kInt32, return_phi, read_exit);
- HInstruction* return_exit = new (GetAllocator()) HReturn(add_exit);
- breturn->AddPhi(return_phi);
- breturn->AddInstruction(read_exit);
- breturn->AddInstruction(add_exit);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_);
- std::vector<HPhi*> all_return_phis;
- std::tie(all_return_phis) = FindAllInstructions<HPhi>(graph_, breturn);
- EXPECT_EQ(all_return_phis.size(), 3u);
- EXPECT_INS_RETAINED(return_phi);
- EXPECT_TRUE(std::find(all_return_phis.begin(), all_return_phis.end(), return_phi) !=
- all_return_phis.end());
- HPhi* instance_phi =
- FindOrNull(all_return_phis.begin(), all_return_phis.end(), [&](HPhi* phi) {
- return phi != return_phi && phi->GetType() == DataType::Type::kReference;
- });
- ASSERT_NE(instance_phi, nullptr);
- HPhi* value_phi = FindOrNull(all_return_phis.begin(), all_return_phis.end(), [&](HPhi* phi) {
- return phi != return_phi && phi->GetType() == DataType::Type::kInt32;
- });
- ASSERT_NE(value_phi, nullptr);
- EXPECT_INS_EQ(
- instance_phi->InputAt(0),
- FindSingleInstruction<HNewInstance>(graph_, escape_route->GetSinglePredecessor()));
- // Check materialize block
- EXPECT_INS_EQ(FindSingleInstruction<HInstanceFieldSet>(
- graph_, escape_route->GetSinglePredecessor())
- ->InputAt(1),
- c12);
-
- EXPECT_INS_EQ(instance_phi->InputAt(1), graph_->GetNullConstant());
- EXPECT_INS_EQ(value_phi->InputAt(0), graph_->GetIntConstant(0));
- EXPECT_INS_EQ(value_phi->InputAt(1), c15);
- EXPECT_INS_REMOVED(store_noescape);
- EXPECT_INS_EQ(pred_get->GetTarget(), instance_phi);
- EXPECT_INS_EQ(pred_get->GetDefaultValue(), value_phi);
-}
-
-// // ENTRY
-// // To be moved
-// // NB Order important. By having alloc and store of obj1 before obj2 that
-// // ensure we'll build the materialization for obj1 first (just due to how
-// // we iterate.)
-// obj1 = new Obj();
-// obj2 = new Obj(); // has env[obj1]
-// // Swap the order of these
-// obj1.foo = param_obj1;
-// obj2.foo = param_obj2;
-// if (param1) {
-// // LEFT
-// obj2.foo = obj1;
-// if (param2) {
-// // LEFT_LEFT
-// escape(obj2);
-// } else {}
-// } else {}
-// return select(param3, obj1.foo, obj2.foo);
-// EXIT
-TEST_P(OrderDependentTestGroup, PredicatedUse) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "left_left"},
- {"left", "left_right"},
- {"left_left", "left_end"},
- {"left_right", "left_end"},
- {"left_end", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(right);
- GET_BLOCK(left);
- GET_BLOCK(left_left);
- GET_BLOCK(left_right);
- GET_BLOCK(left_end);
-#undef GET_BLOCK
- TestOrder order = GetParam();
- EnsurePredecessorOrder(breturn, {left_end, right});
- EnsurePredecessorOrder(left_end, {left_left, left_right});
- HInstruction* param1 = MakeParam(DataType::Type::kBool);
- HInstruction* param2 = MakeParam(DataType::Type::kBool);
- HInstruction* param3 = MakeParam(DataType::Type::kBool);
- HInstruction* param_obj1 = MakeParam(DataType::Type::kReference);
- HInstruction* param_obj2 = MakeParam(DataType::Type::kReference);
-
- HInstruction* cls1 = MakeClassLoad();
- HInstruction* cls2 = MakeClassLoad();
- HInstruction* new_inst1 = MakeNewInstance(cls1);
- HInstruction* new_inst2 = MakeNewInstance(cls2);
- HInstruction* store1 = MakeIFieldSet(new_inst1, param_obj1, MemberOffset(32));
- HInstruction* store2 = MakeIFieldSet(new_inst2, param_obj2, MemberOffset(32));
- HInstruction* null_const = graph_->GetNullConstant();
- HInstruction* if_inst = new (GetAllocator()) HIf(param1);
- entry->AddInstruction(cls1);
- entry->AddInstruction(cls2);
- entry->AddInstruction(new_inst1);
- entry->AddInstruction(new_inst2);
- if (order == TestOrder::kSameAsAlloc) {
- entry->AddInstruction(store1);
- entry->AddInstruction(store2);
- } else {
- entry->AddInstruction(store2);
- entry->AddInstruction(store1);
- }
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls1, {});
- cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
- new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
- new_inst2->CopyEnvironmentFrom(cls1->GetEnvironment());
-
- // This is the escape of new_inst1
- HInstruction* store_left = MakeIFieldSet(new_inst2, new_inst1, MemberOffset(32));
- HInstruction* if_left = new (GetAllocator()) HIf(param2);
- left->AddInstruction(store_left);
- left->AddInstruction(if_left);
-
- HInstruction* call_left_left = MakeInvoke(DataType::Type::kVoid, { new_inst2 });
- HInstruction* goto_left_left = new (GetAllocator()) HGoto();
- left_left->AddInstruction(call_left_left);
- left_left->AddInstruction(goto_left_left);
- call_left_left->CopyEnvironmentFrom(new_inst2->GetEnvironment());
-
- left_right->AddInstruction(new (GetAllocator()) HGoto());
- left_end->AddInstruction(new (GetAllocator()) HGoto());
-
- right->AddInstruction(new (GetAllocator()) HGoto());
-
- // Used to distinguish the pred-gets without having to dig through the
- // multiple phi layers.
- constexpr uint32_t kRead1DexPc = 10;
- constexpr uint32_t kRead2DexPc = 20;
- HInstruction* read1 =
- MakeIFieldGet(new_inst1, DataType::Type::kReference, MemberOffset(32), kRead1DexPc);
- read1->SetReferenceTypeInfo(
- ReferenceTypeInfo::CreateUnchecked(graph_->GetHandleCache()->GetObjectClassHandle(), false));
- HInstruction* read2 =
- MakeIFieldGet(new_inst2, DataType::Type::kReference, MemberOffset(32), kRead2DexPc);
- read2->SetReferenceTypeInfo(
- ReferenceTypeInfo::CreateUnchecked(graph_->GetHandleCache()->GetObjectClassHandle(), false));
- HInstruction* sel_return = new (GetAllocator()) HSelect(param3, read1, read2, 0);
- HInstruction* return_exit = new (GetAllocator()) HReturn(sel_return);
- breturn->AddInstruction(read1);
- breturn->AddInstruction(read2);
- breturn->AddInstruction(sel_return);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_RETAINED(call_left_left);
- EXPECT_INS_REMOVED(read1);
- EXPECT_INS_REMOVED(read2);
- EXPECT_INS_REMOVED(new_inst1);
- EXPECT_INS_REMOVED(new_inst2);
- EXPECT_TRUE(new_inst1->GetUses().empty()) << *new_inst1 << " " << new_inst1->GetUses();
- EXPECT_TRUE(new_inst2->GetUses().empty()) << *new_inst2 << " " << new_inst2->GetUses();
- EXPECT_INS_RETAINED(sel_return);
- // Make sure the selector is the same
- EXPECT_INS_EQ(sel_return->InputAt(2), param3);
- std::vector<HPredicatedInstanceFieldGet*> pred_gets;
- std::tie(pred_gets) = FindAllInstructions<HPredicatedInstanceFieldGet>(graph_, breturn);
- HPredicatedInstanceFieldGet* pred1 = FindOrNull(pred_gets.begin(), pred_gets.end(), [&](auto i) {
- return i->GetDexPc() == kRead1DexPc;
- });
- HPredicatedInstanceFieldGet* pred2 = FindOrNull(pred_gets.begin(), pred_gets.end(), [&](auto i) {
- return i->GetDexPc() == kRead2DexPc;
- });
- ASSERT_NE(pred1, nullptr);
- ASSERT_NE(pred2, nullptr);
- EXPECT_INS_EQ(sel_return->InputAt(0), pred2);
- EXPECT_INS_EQ(sel_return->InputAt(1), pred1);
- // Check targets
- EXPECT_TRUE(pred1->GetTarget()->IsPhi()) << pred1->DumpWithArgs();
- EXPECT_TRUE(pred2->GetTarget()->IsPhi()) << pred2->DumpWithArgs();
- HInstruction* mat1 = FindSingleInstruction<HNewInstance>(graph_, left->GetSinglePredecessor());
- HInstruction* mat2 =
- FindSingleInstruction<HNewInstance>(graph_, left_left->GetSinglePredecessor());
- EXPECT_INS_EQ(pred1->GetTarget()->InputAt(0), mat1);
- EXPECT_INS_EQ(pred1->GetTarget()->InputAt(1), null_const);
- EXPECT_TRUE(pred2->GetTarget()->InputAt(0)->IsPhi()) << pred2->DumpWithArgs();
- EXPECT_INS_EQ(pred2->GetTarget()->InputAt(0)->InputAt(0), mat2);
- EXPECT_INS_EQ(pred2->GetTarget()->InputAt(0)->InputAt(1), null_const);
- EXPECT_INS_EQ(pred2->GetTarget()->InputAt(1), null_const);
- // Check default values.
- EXPECT_TRUE(pred1->GetDefaultValue()->IsPhi()) << pred1->DumpWithArgs();
- EXPECT_TRUE(pred2->GetDefaultValue()->IsPhi()) << pred2->DumpWithArgs();
- EXPECT_INS_EQ(pred1->GetDefaultValue()->InputAt(0), null_const);
- EXPECT_INS_EQ(pred1->GetDefaultValue()->InputAt(1), param_obj1);
- EXPECT_TRUE(pred2->GetDefaultValue()->InputAt(0)->IsPhi()) << pred2->DumpWithArgs();
- EXPECT_INS_EQ(pred2->GetDefaultValue()->InputAt(0)->InputAt(0), null_const);
- EXPECT_INS_EQ(pred2->GetDefaultValue()->InputAt(0)->InputAt(1), mat1);
- EXPECT_INS_EQ(pred2->GetDefaultValue()->InputAt(1), param_obj2);
-}
-
-// // ENTRY
-// // To be moved
-// // NB Order important. By having alloc and store of obj1 before obj2 that
-// // ensure we'll build the materialization for obj1 first (just due to how
-// // we iterate.)
-// obj1 = new Obj();
-// obj.foo = 12;
-// obj2 = new Obj(); // has env[obj1]
-// obj2.foo = 15;
-// if (param1) {
-// // LEFT
-// // Need to update env to nullptr
-// escape(obj1/2);
-// if (param2) {
-// // LEFT_LEFT
-// escape(obj2/1);
-// } else {}
-// } else {}
-// return obj1.foo + obj2.foo;
-// EXIT
-TEST_P(OrderDependentTestGroup, PredicatedEnvUse) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "left_left"},
- {"left", "left_right"},
- {"left_left", "left_end"},
- {"left_right", "left_end"},
- {"left_end", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(right);
- GET_BLOCK(left);
- GET_BLOCK(left_left);
- GET_BLOCK(left_right);
- GET_BLOCK(left_end);
-#undef GET_BLOCK
- TestOrder order = GetParam();
- EnsurePredecessorOrder(breturn, {left_end, right});
- EnsurePredecessorOrder(left_end, {left_left, left_right});
- HInstruction* param1 = MakeParam(DataType::Type::kBool);
- HInstruction* param2 = MakeParam(DataType::Type::kBool);
- HInstruction* c12 = graph_->GetIntConstant(12);
- HInstruction* c15 = graph_->GetIntConstant(15);
-
- HInstruction* cls1 = MakeClassLoad();
- HInstruction* cls2 = MakeClassLoad();
- HInstruction* new_inst1 = MakeNewInstance(cls1);
- HInstruction* store1 = MakeIFieldSet(new_inst1, c12, MemberOffset(32));
- HInstruction* new_inst2 = MakeNewInstance(cls2);
- HInstruction* store2 = MakeIFieldSet(new_inst2, c15, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(param1);
- entry->AddInstruction(cls1);
- entry->AddInstruction(cls2);
- entry->AddInstruction(new_inst1);
- entry->AddInstruction(store1);
- entry->AddInstruction(new_inst2);
- entry->AddInstruction(store2);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls1, {});
- cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
- new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
- ManuallyBuildEnvFor(new_inst2, {new_inst1});
-
- HInstruction* first_inst = new_inst1;
- HInstruction* second_inst = new_inst2;
-
- if (order == TestOrder::kReverseOfAlloc) {
- std::swap(first_inst, second_inst);
- }
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { first_inst });
- HInstruction* if_left = new (GetAllocator()) HIf(param2);
- left->AddInstruction(call_left);
- left->AddInstruction(if_left);
- call_left->CopyEnvironmentFrom(new_inst2->GetEnvironment());
-
- HInstruction* call_left_left = MakeInvoke(DataType::Type::kVoid, { second_inst });
- HInstruction* goto_left_left = new (GetAllocator()) HGoto();
- left_left->AddInstruction(call_left_left);
- left_left->AddInstruction(goto_left_left);
- call_left_left->CopyEnvironmentFrom(new_inst2->GetEnvironment());
-
- left_right->AddInstruction(new (GetAllocator()) HGoto());
- left_end->AddInstruction(new (GetAllocator()) HGoto());
-
- right->AddInstruction(new (GetAllocator()) HGoto());
-
- HInstruction* read1 = MakeIFieldGet(new_inst1, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* read2 = MakeIFieldGet(new_inst2, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* add_return = new (GetAllocator()) HAdd(DataType::Type::kInt32, read1, read2);
- HInstruction* return_exit = new (GetAllocator()) HReturn(add_return);
- breturn->AddInstruction(read1);
- breturn->AddInstruction(read2);
- breturn->AddInstruction(add_return);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- HNewInstance* moved_new_inst1;
- HInstanceFieldSet* moved_set1;
- HNewInstance* moved_new_inst2;
- HInstanceFieldSet* moved_set2;
- HBasicBlock* first_mat_block = left->GetSinglePredecessor();
- HBasicBlock* second_mat_block = left_left->GetSinglePredecessor();
- if (order == TestOrder::kReverseOfAlloc) {
- std::swap(first_mat_block, second_mat_block);
- }
- std::tie(moved_new_inst1, moved_set1) =
- FindSingleInstructions<HNewInstance, HInstanceFieldSet>(graph_, first_mat_block);
- std::tie(moved_new_inst2, moved_set2) =
- FindSingleInstructions<HNewInstance, HInstanceFieldSet>(graph_, second_mat_block);
- std::vector<HPredicatedInstanceFieldGet*> pred_gets;
- std::vector<HPhi*> phis;
- std::tie(pred_gets, phis) = FindAllInstructions<HPredicatedInstanceFieldGet, HPhi>(graph_);
- EXPECT_NE(moved_new_inst1, nullptr);
- EXPECT_NE(moved_new_inst2, nullptr);
- EXPECT_NE(moved_set1, nullptr);
- EXPECT_NE(moved_set2, nullptr);
- EXPECT_INS_EQ(moved_set1->InputAt(1), c12);
- EXPECT_INS_EQ(moved_set2->InputAt(1), c15);
- EXPECT_INS_RETAINED(call_left);
- EXPECT_INS_RETAINED(call_left_left);
- EXPECT_INS_REMOVED(store1);
- EXPECT_INS_REMOVED(store2);
- EXPECT_INS_REMOVED(read1);
- EXPECT_INS_REMOVED(read2);
- EXPECT_INS_EQ(moved_new_inst2->GetEnvironment()->GetInstructionAt(0),
- order == TestOrder::kSameAsAlloc
- ? moved_new_inst1
- : static_cast<HInstruction*>(graph_->GetNullConstant()));
-}
-
-// // ENTRY
-// obj1 = new Obj1();
-// obj2 = new Obj2();
-// val1 = 3;
-// val2 = 13;
-// // The exact order the stores are written affects what the order we perform
-// // partial LSE on the values
-// obj1/2.field = val1/2;
-// obj2/1.field = val2/1;
-// if (parameter_value) {
-// // LEFT
-// escape(obj1);
-// escape(obj2);
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj1.field = 2;
-// obj2.field = 12;
-// }
-// EXIT
-// predicated-ELIMINATE
-// return obj1.field + obj2.field
-TEST_P(OrderDependentTestGroup, FieldSetOrderEnv) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- TestOrder order = GetParam();
- EnsurePredecessorOrder(breturn, {left, right});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c12 = graph_->GetIntConstant(12);
- HInstruction* c13 = graph_->GetIntConstant(13);
-
- HInstruction* cls1 = MakeClassLoad();
- HInstruction* cls2 = MakeClassLoad();
- HInstruction* new_inst1 = MakeNewInstance(cls1);
- HInstruction* new_inst2 = MakeNewInstance(cls2);
- HInstruction* write_entry1 = MakeIFieldSet(new_inst1, c3, MemberOffset(32));
- HInstruction* write_entry2 = MakeIFieldSet(new_inst2, c13, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls1);
- entry->AddInstruction(cls2);
- entry->AddInstruction(new_inst1);
- entry->AddInstruction(new_inst2);
- if (order == TestOrder::kSameAsAlloc) {
- entry->AddInstruction(write_entry1);
- entry->AddInstruction(write_entry2);
- } else {
- entry->AddInstruction(write_entry2);
- entry->AddInstruction(write_entry1);
- }
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls1, {});
- cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
- new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
- ManuallyBuildEnvFor(new_inst2, {new_inst1});
-
- HInstruction* call_left1 = MakeInvoke(DataType::Type::kVoid, { new_inst1 });
- HInstruction* call_left2 = MakeInvoke(DataType::Type::kVoid, { new_inst2 });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left1);
- left->AddInstruction(call_left2);
- left->AddInstruction(goto_left);
- call_left1->CopyEnvironmentFrom(cls1->GetEnvironment());
- call_left2->CopyEnvironmentFrom(cls1->GetEnvironment());
-
- HInstruction* write_right1 = MakeIFieldSet(new_inst1, c2, MemberOffset(32));
- HInstruction* write_right2 = MakeIFieldSet(new_inst2, c12, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right1);
- right->AddInstruction(write_right2);
- right->AddInstruction(goto_right);
-
- HInstruction* read_bottom1 = MakeIFieldGet(new_inst1, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* read_bottom2 = MakeIFieldGet(new_inst2, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* combine =
- new (GetAllocator()) HAdd(DataType::Type::kInt32, read_bottom1, read_bottom2);
- HInstruction* return_exit = new (GetAllocator()) HReturn(combine);
- breturn->AddInstruction(read_bottom1);
- breturn->AddInstruction(read_bottom2);
- breturn->AddInstruction(combine);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_REMOVED(write_entry1);
- EXPECT_INS_REMOVED(write_entry2);
- EXPECT_INS_REMOVED(read_bottom1);
- EXPECT_INS_REMOVED(read_bottom2);
- EXPECT_INS_REMOVED(write_right1);
- EXPECT_INS_REMOVED(write_right2);
- EXPECT_INS_RETAINED(call_left1);
- EXPECT_INS_RETAINED(call_left2);
- std::vector<HPhi*> merges;
- std::vector<HPredicatedInstanceFieldGet*> pred_gets;
- std::vector<HNewInstance*> materializations;
- std::tie(merges, pred_gets) =
- FindAllInstructions<HPhi, HPredicatedInstanceFieldGet>(graph_, breturn);
- std::tie(materializations) = FindAllInstructions<HNewInstance>(graph_);
- ASSERT_EQ(merges.size(), 4u);
- ASSERT_EQ(pred_gets.size(), 2u);
- ASSERT_EQ(materializations.size(), 2u);
- HPhi* merge_value_return1 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->InputAt(1) == c2;
- });
- HPhi* merge_value_return2 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->InputAt(1) == c12;
- });
- HNewInstance* mat_alloc1 = FindOrNull(materializations.begin(),
- materializations.end(),
- [&](HNewInstance* n) { return n->InputAt(0) == cls1; });
- HNewInstance* mat_alloc2 = FindOrNull(materializations.begin(),
- materializations.end(),
- [&](HNewInstance* n) { return n->InputAt(0) == cls2; });
- ASSERT_NE(mat_alloc1, nullptr);
- ASSERT_NE(mat_alloc2, nullptr);
- HPhi* merge_alloc1 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kReference && p->InputAt(0) == mat_alloc1;
- });
- HPhi* merge_alloc2 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kReference && p->InputAt(0) == mat_alloc2;
- });
- ASSERT_NE(merge_alloc1, nullptr);
- HPredicatedInstanceFieldGet* pred_get1 =
- FindOrNull(pred_gets.begin(), pred_gets.end(), [&](HPredicatedInstanceFieldGet* pg) {
- return pg->GetTarget() == merge_alloc1;
- });
- ASSERT_NE(merge_alloc2, nullptr);
- HPredicatedInstanceFieldGet* pred_get2 =
- FindOrNull(pred_gets.begin(), pred_gets.end(), [&](HPredicatedInstanceFieldGet* pg) {
- return pg->GetTarget() == merge_alloc2;
- });
- ASSERT_NE(merge_value_return1, nullptr);
- ASSERT_NE(merge_value_return2, nullptr);
- EXPECT_INS_EQ(merge_alloc1->InputAt(1), graph_->GetNullConstant());
- EXPECT_INS_EQ(merge_alloc2->InputAt(1), graph_->GetNullConstant());
- ASSERT_NE(pred_get1, nullptr);
- EXPECT_INS_EQ(pred_get1->GetTarget(), merge_alloc1);
- EXPECT_INS_EQ(pred_get1->GetDefaultValue(), merge_value_return1)
- << " pred-get is: " << *pred_get1;
- EXPECT_INS_EQ(merge_value_return1->InputAt(0), graph_->GetIntConstant(0))
- << " merge val is: " << *merge_value_return1;
- EXPECT_INS_EQ(merge_value_return1->InputAt(1), c2) << " merge val is: " << *merge_value_return1;
- ASSERT_NE(pred_get2, nullptr);
- EXPECT_INS_EQ(pred_get2->GetTarget(), merge_alloc2);
- EXPECT_INS_EQ(pred_get2->GetDefaultValue(), merge_value_return2)
- << " pred-get is: " << *pred_get2;
- EXPECT_INS_EQ(merge_value_return2->InputAt(0), graph_->GetIntConstant(0))
- << " merge val is: " << *merge_value_return1;
- EXPECT_INS_EQ(merge_value_return2->InputAt(1), c12) << " merge val is: " << *merge_value_return1;
- EXPECT_INS_EQ(mat_alloc2->GetEnvironment()->GetInstructionAt(0), mat_alloc1);
-}
-
-// // TODO We can compile this better if we are better able to understand lifetimes.
-// // ENTRY
-// obj1 = new Obj1();
-// obj2 = new Obj2();
-// // The exact order the stores are written affects what the order we perform
-// // partial LSE on the values
-// obj{1,2}.var = param_obj;
-// obj{2,1}.var = param_obj;
-// if (param_1) {
-// // EARLY_RETURN
-// return;
-// }
-// // escape of obj1
-// obj2.var = obj1;
-// if (param_2) {
-// // escape of obj2 with a materialization that uses obj1
-// escape(obj2);
-// }
-// // EXIT
-// return;
-TEST_P(OrderDependentTestGroup, MaterializationMovedUse) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "early_return"},
- {"early_return", "exit"},
- {"entry", "escape_1"},
- {"escape_1", "escape_2"},
- {"escape_1", "escape_1_crit_break"},
- {"escape_1_crit_break", "exit"},
- {"escape_2", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(early_return);
- GET_BLOCK(escape_1);
- GET_BLOCK(escape_1_crit_break);
- GET_BLOCK(escape_2);
-#undef GET_BLOCK
- TestOrder order = GetParam();
- HInstruction* param_1 = MakeParam(DataType::Type::kBool);
- HInstruction* param_2 = MakeParam(DataType::Type::kBool);
- HInstruction* param_obj = MakeParam(DataType::Type::kReference);
-
- HInstruction* cls1 = MakeClassLoad();
- HInstruction* cls2 = MakeClassLoad();
- HInstruction* new_inst1 = MakeNewInstance(cls1);
- HInstruction* new_inst2 = MakeNewInstance(cls2);
- HInstruction* write_entry1 = MakeIFieldSet(new_inst1, param_obj, MemberOffset(32));
- HInstruction* write_entry2 = MakeIFieldSet(new_inst2, param_obj, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(param_1);
- entry->AddInstruction(cls1);
- entry->AddInstruction(cls2);
- entry->AddInstruction(new_inst1);
- entry->AddInstruction(new_inst2);
- if (order == TestOrder::kSameAsAlloc) {
- entry->AddInstruction(write_entry1);
- entry->AddInstruction(write_entry2);
- } else {
- entry->AddInstruction(write_entry2);
- entry->AddInstruction(write_entry1);
- }
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls1, {});
- cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
- new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
- new_inst2->CopyEnvironmentFrom(cls1->GetEnvironment());
-
- early_return->AddInstruction(new (GetAllocator()) HReturnVoid());
-
- HInstruction* escape_1_set = MakeIFieldSet(new_inst2, new_inst1, MemberOffset(32));
- HInstruction* escape_1_if = new (GetAllocator()) HIf(param_2);
- escape_1->AddInstruction(escape_1_set);
- escape_1->AddInstruction(escape_1_if);
-
- escape_1_crit_break->AddInstruction(new (GetAllocator()) HReturnVoid());
-
- HInstruction* escape_2_call = MakeInvoke(DataType::Type::kVoid, {new_inst2});
- HInstruction* escape_2_return = new (GetAllocator()) HReturnVoid();
- escape_2->AddInstruction(escape_2_call);
- escape_2->AddInstruction(escape_2_return);
- escape_2_call->CopyEnvironmentFrom(cls1->GetEnvironment());
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_REMOVED(new_inst1);
- EXPECT_INS_REMOVED(new_inst2);
- EXPECT_INS_REMOVED(write_entry1);
- EXPECT_INS_REMOVED(write_entry2);
- EXPECT_INS_REMOVED(escape_1_set);
- EXPECT_INS_RETAINED(escape_2_call);
-
- HInstruction* obj1_mat =
- FindSingleInstruction<HNewInstance>(graph_, escape_1->GetSinglePredecessor());
- HInstruction* obj1_set =
- FindSingleInstruction<HInstanceFieldSet>(graph_, escape_1->GetSinglePredecessor());
- HInstruction* obj2_mat =
- FindSingleInstruction<HNewInstance>(graph_, escape_2->GetSinglePredecessor());
- HInstruction* obj2_set =
- FindSingleInstruction<HInstanceFieldSet>(graph_, escape_2->GetSinglePredecessor());
- ASSERT_TRUE(obj1_mat != nullptr);
- ASSERT_TRUE(obj2_mat != nullptr);
- ASSERT_TRUE(obj1_set != nullptr);
- ASSERT_TRUE(obj2_set != nullptr);
- EXPECT_INS_EQ(obj1_set->InputAt(0), obj1_mat);
- EXPECT_INS_EQ(obj1_set->InputAt(1), param_obj);
- EXPECT_INS_EQ(obj2_set->InputAt(0), obj2_mat);
- EXPECT_INS_EQ(obj2_set->InputAt(1), obj1_mat);
-}
-
-INSTANTIATE_TEST_SUITE_P(LoadStoreEliminationTest,
- OrderDependentTestGroup,
- testing::Values(TestOrder::kSameAsAlloc, TestOrder::kReverseOfAlloc));
-
-// // ENTRY
-// // To be moved
-// obj = new Obj();
-// obj.foo = 12;
-// if (parameter_value) {
-// // LEFT
-// escape(obj);
-// } else {}
-// EXIT
-TEST_F(LoadStoreEliminationTest, MovePredicatedAlloc) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"right", "breturn"},
- {"left", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left, right});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c12 = graph_->GetIntConstant(12);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* store = MakeIFieldSet(new_inst, c12, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(store);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- right->AddInstruction(new (GetAllocator()) HGoto());
-
- HInstruction* return_exit = new (GetAllocator()) HReturnVoid();
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- HNewInstance* moved_new_inst = nullptr;
- HInstanceFieldSet* moved_set = nullptr;
- std::tie(moved_new_inst, moved_set) =
- FindSingleInstructions<HNewInstance, HInstanceFieldSet>(graph_);
- EXPECT_NE(moved_new_inst, nullptr);
- EXPECT_NE(moved_set, nullptr);
- EXPECT_INS_RETAINED(call_left);
- // store removed or moved.
- EXPECT_NE(store->GetBlock(), entry);
- // New-inst removed or moved.
- EXPECT_NE(new_inst->GetBlock(), entry);
- EXPECT_INS_EQ(moved_set->InputAt(0), moved_new_inst);
- EXPECT_INS_EQ(moved_set->InputAt(1), c12);
-}
-
-// // ENTRY
-// // To be moved
-// obj = new Obj();
-// obj.foo = 12;
-// if (parameter_value) {
-// // LEFT
-// escape(obj);
-// }
-// EXIT
-// int a = obj.foo;
-// obj.foo = 13;
-// noescape();
-// int b = obj.foo;
-// obj.foo = 14;
-// noescape();
-// int c = obj.foo;
-// obj.foo = 15;
-// noescape();
-// return a + b + c
-TEST_F(LoadStoreEliminationTest, MutiPartialLoadStore) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"right", "breturn"},
- {"left", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left, right});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c12 = graph_->GetIntConstant(12);
- HInstruction* c13 = graph_->GetIntConstant(13);
- HInstruction* c14 = graph_->GetIntConstant(14);
- HInstruction* c15 = graph_->GetIntConstant(15);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* store = MakeIFieldSet(new_inst, c12, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(store);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(goto_right);
-
- HInstruction* a_val = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* a_reset = MakeIFieldSet(new_inst, c13, MemberOffset(32));
- HInstruction* a_noescape = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* b_val = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* b_reset = MakeIFieldSet(new_inst, c14, MemberOffset(32));
- HInstruction* b_noescape = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* c_val = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* c_reset = MakeIFieldSet(new_inst, c15, MemberOffset(32));
- HInstruction* c_noescape = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* add_1_exit = new (GetAllocator()) HAdd(DataType::Type::kInt32, a_val, b_val);
- HInstruction* add_2_exit = new (GetAllocator()) HAdd(DataType::Type::kInt32, c_val, add_1_exit);
- HInstruction* return_exit = new (GetAllocator()) HReturn(add_2_exit);
- breturn->AddInstruction(a_val);
- breturn->AddInstruction(a_reset);
- breturn->AddInstruction(a_noescape);
- breturn->AddInstruction(b_val);
- breturn->AddInstruction(b_reset);
- breturn->AddInstruction(b_noescape);
- breturn->AddInstruction(c_val);
- breturn->AddInstruction(c_reset);
- breturn->AddInstruction(c_noescape);
- breturn->AddInstruction(add_1_exit);
- breturn->AddInstruction(add_2_exit);
- breturn->AddInstruction(return_exit);
- ManuallyBuildEnvFor(a_noescape, {new_inst, a_val});
- ManuallyBuildEnvFor(b_noescape, {new_inst, a_val, b_val});
- ManuallyBuildEnvFor(c_noescape, {new_inst, a_val, b_val, c_val});
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- HNewInstance* moved_new_inst = nullptr;
- HInstanceFieldSet* moved_set = nullptr;
- std::tie(moved_new_inst, moved_set) =
- FindSingleInstructions<HNewInstance, HInstanceFieldSet>(graph_, left->GetSinglePredecessor());
- std::vector<HPredicatedInstanceFieldGet*> pred_gets;
- std::vector<HInstanceFieldSet*> pred_sets;
- std::vector<HPhi*> return_phis;
- std::tie(return_phis, pred_gets, pred_sets) =
- FindAllInstructions<HPhi, HPredicatedInstanceFieldGet, HInstanceFieldSet>(graph_, breturn);
- ASSERT_EQ(return_phis.size(), 2u);
- HPhi* inst_phi = return_phis[0];
- HPhi* val_phi = return_phis[1];
- if (inst_phi->GetType() != DataType::Type::kReference) {
- std::swap(inst_phi, val_phi);
- }
- ASSERT_NE(moved_new_inst, nullptr);
- EXPECT_INS_EQ(inst_phi->InputAt(0), moved_new_inst);
- EXPECT_INS_EQ(inst_phi->InputAt(1), graph_->GetNullConstant());
- EXPECT_INS_EQ(val_phi->InputAt(0), graph_->GetIntConstant(0));
- EXPECT_EQ(val_phi->InputAt(1), c12);
- ASSERT_EQ(pred_gets.size(), 3u);
- ASSERT_EQ(pred_gets.size(), pred_sets.size());
- std::vector<HInstruction*> set_values{c13, c14, c15};
- std::vector<HInstruction*> get_values{val_phi, c13, c14};
- ASSERT_NE(moved_set, nullptr);
- EXPECT_INS_EQ(moved_set->InputAt(0), moved_new_inst);
- EXPECT_INS_EQ(moved_set->InputAt(1), c12);
- EXPECT_INS_RETAINED(call_left);
- // store removed or moved.
- EXPECT_NE(store->GetBlock(), entry);
- // New-inst removed or moved.
- EXPECT_NE(new_inst->GetBlock(), entry);
- for (auto [get, val] : ZipLeft(MakeIterationRange(pred_gets), MakeIterationRange(get_values))) {
- EXPECT_INS_EQ(get->GetDefaultValue(), val);
- }
- for (auto [set, val] : ZipLeft(MakeIterationRange(pred_sets), MakeIterationRange(set_values))) {
- EXPECT_INS_EQ(set->InputAt(1), val);
- EXPECT_TRUE(set->GetIsPredicatedSet()) << *set;
- }
- EXPECT_INS_RETAINED(a_noescape);
- EXPECT_INS_RETAINED(b_noescape);
- EXPECT_INS_RETAINED(c_noescape);
- EXPECT_INS_EQ(add_1_exit->InputAt(0), pred_gets[0]);
- EXPECT_INS_EQ(add_1_exit->InputAt(1), pred_gets[1]);
- EXPECT_INS_EQ(add_2_exit->InputAt(0), pred_gets[2]);
-
- EXPECT_EQ(a_noescape->GetEnvironment()->Size(), 2u);
- EXPECT_INS_EQ(a_noescape->GetEnvironment()->GetInstructionAt(0), inst_phi);
- EXPECT_INS_EQ(a_noescape->GetEnvironment()->GetInstructionAt(1), pred_gets[0]);
- EXPECT_EQ(b_noescape->GetEnvironment()->Size(), 3u);
- EXPECT_INS_EQ(b_noescape->GetEnvironment()->GetInstructionAt(0), inst_phi);
- EXPECT_INS_EQ(b_noescape->GetEnvironment()->GetInstructionAt(1), pred_gets[0]);
- EXPECT_INS_EQ(b_noescape->GetEnvironment()->GetInstructionAt(2), pred_gets[1]);
- EXPECT_EQ(c_noescape->GetEnvironment()->Size(), 4u);
- EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(0), inst_phi);
- EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(1), pred_gets[0]);
- EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(2), pred_gets[1]);
- EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(3), pred_gets[2]);
-}
-
-// // ENTRY
-// // To be moved
-// obj = new Obj();
-// obj.foo = 12;
-// int a = obj.foo;
-// obj.foo = 13;
-// noescape();
-// int b = obj.foo;
-// obj.foo = 14;
-// noescape();
-// int c = obj.foo;
-// obj.foo = 15;
-// noescape();
-// if (parameter_value) {
-// // LEFT
-// escape(obj);
-// }
-// EXIT
-// return a + b + c + obj.foo
-TEST_F(LoadStoreEliminationTest, MutiPartialLoadStore2) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- // Need to have an actual entry block since we check env-layout and the way we
- // add constants would screw this up otherwise.
- AdjacencyListGraph blks(SetupFromAdjacencyList("start",
- "exit",
- {{"start", "entry"},
- {"entry", "left"},
- {"entry", "right"},
- {"right", "breturn"},
- {"left", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(start);
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left, right});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c12 = graph_->GetIntConstant(12);
- HInstruction* c13 = graph_->GetIntConstant(13);
- HInstruction* c14 = graph_->GetIntConstant(14);
- HInstruction* c15 = graph_->GetIntConstant(15);
-
- HInstruction* start_suspend = new (GetAllocator()) HSuspendCheck();
- HInstruction* start_goto = new (GetAllocator()) HGoto();
-
- start->AddInstruction(start_suspend);
- start->AddInstruction(start_goto);
- ManuallyBuildEnvFor(start_suspend, {});
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* store = MakeIFieldSet(new_inst, c12, MemberOffset(32));
-
- HInstruction* a_val = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* a_reset = MakeIFieldSet(new_inst, c13, MemberOffset(32));
- HInstruction* a_noescape = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* b_val = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* b_reset = MakeIFieldSet(new_inst, c14, MemberOffset(32));
- HInstruction* b_noescape = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* c_val = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* c_reset = MakeIFieldSet(new_inst, c15, MemberOffset(32));
- HInstruction* c_noescape = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(store);
- entry->AddInstruction(a_val);
- entry->AddInstruction(a_reset);
- entry->AddInstruction(a_noescape);
- entry->AddInstruction(b_val);
- entry->AddInstruction(b_reset);
- entry->AddInstruction(b_noescape);
- entry->AddInstruction(c_val);
- entry->AddInstruction(c_reset);
- entry->AddInstruction(c_noescape);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
- ManuallyBuildEnvFor(a_noescape, {new_inst, a_val});
- ManuallyBuildEnvFor(b_noescape, {new_inst, a_val, b_val});
- ManuallyBuildEnvFor(c_noescape, {new_inst, a_val, b_val, c_val});
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(c_noescape->GetEnvironment());
-
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(goto_right);
-
- HInstruction* val_exit = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* add_1_exit = new (GetAllocator()) HAdd(DataType::Type::kInt32, a_val, b_val);
- HInstruction* add_2_exit = new (GetAllocator()) HAdd(DataType::Type::kInt32, c_val, add_1_exit);
- HInstruction* add_3_exit =
- new (GetAllocator()) HAdd(DataType::Type::kInt32, val_exit, add_2_exit);
- HInstruction* return_exit = new (GetAllocator()) HReturn(add_3_exit);
- breturn->AddInstruction(val_exit);
- breturn->AddInstruction(add_1_exit);
- breturn->AddInstruction(add_2_exit);
- breturn->AddInstruction(add_3_exit);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- HNewInstance* moved_new_inst = nullptr;
- HInstanceFieldSet* moved_set = nullptr;
- std::tie(moved_new_inst, moved_set) =
- FindSingleInstructions<HNewInstance, HInstanceFieldSet>(graph_, left->GetSinglePredecessor());
- std::vector<HPredicatedInstanceFieldGet*> pred_gets;
- std::vector<HInstanceFieldSet*> pred_sets;
- std::vector<HPhi*> return_phis;
- std::tie(return_phis, pred_gets, pred_sets) =
- FindAllInstructions<HPhi, HPredicatedInstanceFieldGet, HInstanceFieldSet>(graph_, breturn);
- ASSERT_EQ(return_phis.size(), 2u);
- HPhi* inst_phi = return_phis[0];
- HPhi* val_phi = return_phis[1];
- if (inst_phi->GetType() != DataType::Type::kReference) {
- std::swap(inst_phi, val_phi);
- }
- ASSERT_NE(moved_new_inst, nullptr);
- EXPECT_INS_EQ(inst_phi->InputAt(0), moved_new_inst);
- EXPECT_INS_EQ(inst_phi->InputAt(1), graph_->GetNullConstant());
- EXPECT_INS_EQ(val_phi->InputAt(0), graph_->GetIntConstant(0));
- EXPECT_INS_EQ(val_phi->InputAt(1), c15);
- ASSERT_EQ(pred_gets.size(), 1u);
- ASSERT_EQ(pred_sets.size(), 0u);
- ASSERT_NE(moved_set, nullptr);
- EXPECT_INS_EQ(moved_set->InputAt(0), moved_new_inst);
- EXPECT_INS_EQ(moved_set->InputAt(1), c15);
- EXPECT_INS_RETAINED(call_left);
- // store removed or moved.
- EXPECT_NE(store->GetBlock(), entry);
- // New-inst removed or moved.
- EXPECT_NE(new_inst->GetBlock(), entry);
- EXPECT_INS_REMOVED(a_val);
- EXPECT_INS_REMOVED(b_val);
- EXPECT_INS_REMOVED(c_val);
- EXPECT_INS_RETAINED(a_noescape);
- EXPECT_INS_RETAINED(b_noescape);
- EXPECT_INS_RETAINED(c_noescape);
- EXPECT_INS_EQ(add_1_exit->InputAt(0), c12);
- EXPECT_INS_EQ(add_1_exit->InputAt(1), c13);
- EXPECT_INS_EQ(add_2_exit->InputAt(0), c14);
- EXPECT_INS_EQ(add_2_exit->InputAt(1), add_1_exit);
- EXPECT_INS_EQ(add_3_exit->InputAt(0), pred_gets[0]);
- EXPECT_INS_EQ(pred_gets[0]->GetDefaultValue(), val_phi);
- EXPECT_INS_EQ(add_3_exit->InputAt(1), add_2_exit);
- EXPECT_EQ(a_noescape->GetEnvironment()->Size(), 2u);
- EXPECT_INS_EQ(a_noescape->GetEnvironment()->GetInstructionAt(0), graph_->GetNullConstant());
- EXPECT_INS_EQ(a_noescape->GetEnvironment()->GetInstructionAt(1), c12);
- EXPECT_EQ(b_noescape->GetEnvironment()->Size(), 3u);
- EXPECT_INS_EQ(b_noescape->GetEnvironment()->GetInstructionAt(0), graph_->GetNullConstant());
- EXPECT_INS_EQ(b_noescape->GetEnvironment()->GetInstructionAt(1), c12);
- EXPECT_INS_EQ(b_noescape->GetEnvironment()->GetInstructionAt(2), c13);
- EXPECT_EQ(c_noescape->GetEnvironment()->Size(), 4u);
- EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(0), graph_->GetNullConstant());
- EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(1), c12);
- EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(2), c13);
- EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(3), c14);
-}
-
-// // ENTRY
-// // To be moved
-// obj = new Obj();
-// // Transforms required for creation non-trivial and unimportant
-// if (parameter_value) {
-// obj.foo = 10
-// } else {
-// obj.foo = 12;
-// }
-// if (parameter_value_2) {
-// escape(obj);
-// }
-// EXIT
-TEST_F(LoadStoreEliminationTest, MovePredicatedAlloc2) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left_set"},
- {"entry", "right_set"},
- {"left_set", "merge_crit_break"},
- {"right_set", "merge_crit_break"},
- {"merge_crit_break", "merge"},
- {"merge", "escape"},
- {"escape", "breturn"},
- {"merge", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left_set);
- GET_BLOCK(right_set);
- GET_BLOCK(merge);
- GET_BLOCK(merge_crit_break);
- GET_BLOCK(escape);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {merge, escape});
- EnsurePredecessorOrder(merge_crit_break, {left_set, right_set});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* bool_value_2 = MakeParam(DataType::Type::kBool);
- HInstruction* c10 = graph_->GetIntConstant(10);
- HInstruction* c12 = graph_->GetIntConstant(12);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* store_left = MakeIFieldSet(new_inst, c10, MemberOffset(32));
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left_set->AddInstruction(store_left);
- left_set->AddInstruction(goto_left);
-
- HInstruction* store_right = MakeIFieldSet(new_inst, c12, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right_set->AddInstruction(store_right);
- right_set->AddInstruction(goto_right);
-
- merge_crit_break->AddInstruction(new (GetAllocator()) HGoto());
- HInstruction* if_merge = new (GetAllocator()) HIf(bool_value_2);
- merge->AddInstruction(if_merge);
-
- HInstruction* escape_instruction = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* escape_goto = new (GetAllocator()) HGoto();
- escape->AddInstruction(escape_instruction);
- escape->AddInstruction(escape_goto);
- escape_instruction->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* return_exit = new (GetAllocator()) HReturnVoid();
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- HNewInstance* moved_new_inst;
- HInstanceFieldSet* moved_set;
- std::tie(moved_new_inst, moved_set) =
- FindSingleInstructions<HNewInstance, HInstanceFieldSet>(graph_);
- HPhi* merge_phi = FindSingleInstruction<HPhi>(graph_, merge_crit_break);
- HPhi* alloc_phi = FindSingleInstruction<HPhi>(graph_, breturn);
- EXPECT_INS_EQ(moved_new_inst, moved_set->InputAt(0));
- ASSERT_NE(alloc_phi, nullptr);
- EXPECT_EQ(alloc_phi->InputAt(0), graph_->GetNullConstant())
- << alloc_phi->GetBlock()->GetPredecessors()[0]->GetBlockId() << " " << *alloc_phi;
- EXPECT_TRUE(alloc_phi->InputAt(1)->IsNewInstance()) << *alloc_phi;
- ASSERT_NE(merge_phi, nullptr);
- EXPECT_EQ(merge_phi->InputCount(), 2u);
- EXPECT_INS_EQ(merge_phi->InputAt(0), c10);
- EXPECT_INS_EQ(merge_phi->InputAt(1), c12);
- EXPECT_TRUE(merge_phi->GetUses().HasExactlyOneElement());
- EXPECT_INS_EQ(merge_phi->GetUses().front().GetUser(), moved_set);
- EXPECT_INS_RETAINED(escape_instruction);
- EXPECT_INS_EQ(escape_instruction->InputAt(0), moved_new_inst);
- // store removed or moved.
- EXPECT_NE(store_left->GetBlock(), left_set);
- EXPECT_NE(store_right->GetBlock(), left_set);
- // New-inst removed or moved.
- EXPECT_NE(new_inst->GetBlock(), entry);
-}
-
-// // ENTRY
-// // To be moved
-// obj = new Obj();
-// switch(args) {
-// default:
-// return obj.a;
-// case b:
-// obj.a = 5; break;
-// case c:
-// obj.b = 4; break;
-// }
-// escape(obj);
-// return obj.a;
-// EXIT
-TEST_F(LoadStoreEliminationTest, MovePredicatedAlloc3) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "early_return"},
- {"entry", "set_one"},
- {"entry", "set_two"},
- {"early_return", "exit"},
- {"set_one", "escape"},
- {"set_two", "escape"},
- {"escape", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(escape);
- GET_BLOCK(early_return);
- GET_BLOCK(set_one);
- GET_BLOCK(set_two);
-#undef GET_BLOCK
- EnsurePredecessorOrder(escape, {set_one, set_two});
- HInstruction* int_val = MakeParam(DataType::Type::kInt32);
- HInstruction* c0 = graph_->GetIntConstant(0);
- HInstruction* c4 = graph_->GetIntConstant(4);
- HInstruction* c5 = graph_->GetIntConstant(5);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, int_val);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(switch_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* store_one = MakeIFieldSet(new_inst, c4, MemberOffset(32));
- HInstruction* goto_one = new (GetAllocator()) HGoto();
- set_one->AddInstruction(store_one);
- set_one->AddInstruction(goto_one);
-
- HInstruction* store_two = MakeIFieldSet(new_inst, c5, MemberOffset(32));
- HInstruction* goto_two = new (GetAllocator()) HGoto();
- set_two->AddInstruction(store_two);
- set_two->AddInstruction(goto_two);
-
- HInstruction* read_early = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_early = new (GetAllocator()) HReturn(read_early);
- early_return->AddInstruction(read_early);
- early_return->AddInstruction(return_early);
-
- HInstruction* escape_instruction = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* read_escape = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_escape = new (GetAllocator()) HReturn(read_escape);
- escape->AddInstruction(escape_instruction);
- escape->AddInstruction(read_escape);
- escape->AddInstruction(return_escape);
- escape_instruction->CopyEnvironmentFrom(cls->GetEnvironment());
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- // Each escaping switch path gets its own materialization block.
- // Blocks:
- // early_return(5) -> [exit(4)]
- // entry(3) -> [early_return(5), <Unnamed>(9), <Unnamed>(10)]
- // escape(8) -> [exit(4)]
- // exit(4) -> []
- // set_one(6) -> [escape(8)]
- // set_two(7) -> [escape(8)]
- // <Unnamed>(10) -> [set_two(7)]
- // <Unnamed>(9) -> [set_one(6)]
- HBasicBlock* materialize_one = set_one->GetSinglePredecessor();
- HBasicBlock* materialize_two = set_two->GetSinglePredecessor();
- HNewInstance* materialization_ins_one =
- FindSingleInstruction<HNewInstance>(graph_, materialize_one);
- HNewInstance* materialization_ins_two =
- FindSingleInstruction<HNewInstance>(graph_, materialize_two);
- HPhi* new_phi = FindSingleInstruction<HPhi>(graph_, escape);
- EXPECT_NE(materialization_ins_one, nullptr);
- EXPECT_NE(materialization_ins_two, nullptr);
- EXPECT_EQ(materialization_ins_one, new_phi->InputAt(0))
- << *materialization_ins_one << " vs " << *new_phi;
- EXPECT_EQ(materialization_ins_two, new_phi->InputAt(1))
- << *materialization_ins_two << " vs " << *new_phi;
-
- EXPECT_INS_RETAINED(escape_instruction);
- EXPECT_INS_RETAINED(read_escape);
- EXPECT_EQ(read_escape->InputAt(0), new_phi) << *new_phi << " vs " << *read_escape->InputAt(0);
- EXPECT_EQ(store_one->InputAt(0), materialization_ins_one);
- EXPECT_EQ(store_two->InputAt(0), materialization_ins_two);
- EXPECT_EQ(escape_instruction->InputAt(0), new_phi);
- EXPECT_INS_REMOVED(read_early);
- EXPECT_EQ(return_early->InputAt(0), c0);
-}
-
-// // ENTRY
-// // To be moved
-// obj = new Obj();
-// switch(args) {
-// case a:
-// // set_one_and_escape
-// obj.a = 5;
-// escape(obj);
-// // FALLTHROUGH
-// case c:
-// // set_two
-// obj.a = 4; break;
-// default:
-// return obj.a;
-// }
-// escape(obj);
-// return obj.a;
-// EXIT
-TEST_F(LoadStoreEliminationTest, MovePredicatedAlloc4) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- // Break the critical edge between entry and set_two with the
- // set_two_critical_break node. Graph simplification would do this for us if
- // we didn't do it manually. This way we have a nice-name for debugging and
- // testing.
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "early_return"},
- {"entry", "set_one_and_escape"},
- {"entry", "set_two_critical_break"},
- {"set_two_critical_break", "set_two"},
- {"early_return", "exit"},
- {"set_one_and_escape", "set_two"},
- {"set_two", "escape"},
- {"escape", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(escape);
- GET_BLOCK(early_return);
- GET_BLOCK(set_one_and_escape);
- GET_BLOCK(set_two);
- GET_BLOCK(set_two_critical_break);
-#undef GET_BLOCK
- EnsurePredecessorOrder(set_two, {set_one_and_escape, set_two_critical_break});
- HInstruction* int_val = MakeParam(DataType::Type::kInt32);
- HInstruction* c0 = graph_->GetIntConstant(0);
- HInstruction* c4 = graph_->GetIntConstant(4);
- HInstruction* c5 = graph_->GetIntConstant(5);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, int_val);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(switch_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* store_one = MakeIFieldSet(new_inst, c4, MemberOffset(32));
- HInstruction* escape_one = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_one = new (GetAllocator()) HGoto();
- set_one_and_escape->AddInstruction(store_one);
- set_one_and_escape->AddInstruction(escape_one);
- set_one_and_escape->AddInstruction(goto_one);
- escape_one->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_crit_break = new (GetAllocator()) HGoto();
- set_two_critical_break->AddInstruction(goto_crit_break);
-
- HInstruction* store_two = MakeIFieldSet(new_inst, c5, MemberOffset(32));
- HInstruction* goto_two = new (GetAllocator()) HGoto();
- set_two->AddInstruction(store_two);
- set_two->AddInstruction(goto_two);
-
- HInstruction* read_early = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_early = new (GetAllocator()) HReturn(read_early);
- early_return->AddInstruction(read_early);
- early_return->AddInstruction(return_early);
-
- HInstruction* escape_instruction = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* read_escape = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_escape = new (GetAllocator()) HReturn(read_escape);
- escape->AddInstruction(escape_instruction);
- escape->AddInstruction(read_escape);
- escape->AddInstruction(return_escape);
- escape_instruction->CopyEnvironmentFrom(cls->GetEnvironment());
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_REMOVED(read_early);
- EXPECT_EQ(return_early->InputAt(0), c0);
- // Each escaping switch path gets its own materialization block.
- // Blocks:
- // early_return(5) -> [exit(4)]
- // entry(3) -> [early_return(5), <Unnamed>(10), <Unnamed>(11)]
- // escape(9) -> [exit(4)]
- // exit(4) -> []
- // set_one_and_escape(6) -> [set_two(8)]
- // set_two(8) -> [escape(9)]
- // set_two_critical_break(7) -> [set_two(8)]
- // <Unnamed>(11) -> [set_two_critical_break(7)]
- // <Unnamed>(10) -> [set_one_and_escape(6)]
- HBasicBlock* materialize_one = set_one_and_escape->GetSinglePredecessor();
- HBasicBlock* materialize_two = set_two_critical_break->GetSinglePredecessor();
- HNewInstance* materialization_ins_one =
- FindSingleInstruction<HNewInstance>(graph_, materialize_one);
- HNewInstance* materialization_ins_two =
- FindSingleInstruction<HNewInstance>(graph_, materialize_two);
- HPhi* new_phi = FindSingleInstruction<HPhi>(graph_, set_two);
- ASSERT_NE(new_phi, nullptr);
- ASSERT_NE(materialization_ins_one, nullptr);
- ASSERT_NE(materialization_ins_two, nullptr);
- EXPECT_INS_EQ(materialization_ins_one, new_phi->InputAt(0));
- EXPECT_INS_EQ(materialization_ins_two, new_phi->InputAt(1));
-
- EXPECT_INS_EQ(store_one->InputAt(0), materialization_ins_one);
- EXPECT_INS_EQ(store_two->InputAt(0), new_phi) << *store_two << " vs " << *new_phi;
- EXPECT_INS_EQ(escape_instruction->InputAt(0), new_phi);
- EXPECT_INS_RETAINED(escape_one);
- EXPECT_INS_EQ(escape_one->InputAt(0), materialization_ins_one);
- EXPECT_INS_RETAINED(escape_instruction);
- EXPECT_INS_RETAINED(read_escape);
- EXPECT_EQ(read_escape->InputAt(0), new_phi) << *new_phi << " vs " << *read_escape->InputAt(0);
-}
-
-// // ENTRY
-// // To be moved
-// obj = new Obj();
-// switch(args) {
-// case a:
-// // set_one
-// obj.a = 5;
-// // nb passthrough
-// case c:
-// // set_two_and_escape
-// obj.a += 4;
-// escape(obj);
-// break;
-// default:
-// obj.a = 10;
-// }
-// return obj.a;
-// EXIT
-TEST_F(LoadStoreEliminationTest, MovePredicatedAlloc5) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- // Break the critical edge between entry and set_two with the
- // set_two_critical_break node. Graph simplification would do this for us if
- // we didn't do it manually. This way we have a nice-name for debugging and
- // testing.
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "set_noescape"},
- {"entry", "set_one"},
- {"entry", "set_two_critical_break"},
- {"set_two_critical_break", "set_two_and_escape"},
- {"set_noescape", "breturn"},
- {"set_one", "set_two_and_escape"},
- {"set_two_and_escape", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(set_noescape);
- GET_BLOCK(set_one);
- GET_BLOCK(set_two_and_escape);
- GET_BLOCK(set_two_critical_break);
-#undef GET_BLOCK
- EnsurePredecessorOrder(set_two_and_escape, {set_one, set_two_critical_break});
- EnsurePredecessorOrder(breturn, {set_two_and_escape, set_noescape});
- HInstruction* int_val = MakeParam(DataType::Type::kInt32);
- HInstruction* c0 = graph_->GetIntConstant(0);
- HInstruction* c4 = graph_->GetIntConstant(4);
- HInstruction* c5 = graph_->GetIntConstant(5);
- HInstruction* c10 = graph_->GetIntConstant(10);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, int_val);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(switch_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* store_one = MakeIFieldSet(new_inst, c5, MemberOffset(32));
- HInstruction* goto_one = new (GetAllocator()) HGoto();
- set_one->AddInstruction(store_one);
- set_one->AddInstruction(goto_one);
-
- HInstruction* goto_crit_break = new (GetAllocator()) HGoto();
- set_two_critical_break->AddInstruction(goto_crit_break);
-
- HInstruction* get_two = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* add_two = new (GetAllocator()) HAdd(DataType::Type::kInt32, get_two, c4);
- HInstruction* store_two = MakeIFieldSet(new_inst, add_two, MemberOffset(32));
- HInstruction* escape_two = MakeInvoke(DataType::Type::kVoid, {new_inst});
- HInstruction* goto_two = new (GetAllocator()) HGoto();
- set_two_and_escape->AddInstruction(get_two);
- set_two_and_escape->AddInstruction(add_two);
- set_two_and_escape->AddInstruction(store_two);
- set_two_and_escape->AddInstruction(escape_two);
- set_two_and_escape->AddInstruction(goto_two);
- escape_two->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* store_noescape = MakeIFieldSet(new_inst, c10, MemberOffset(32));
- HInstruction* goto_noescape = new (GetAllocator()) HGoto();
- set_noescape->AddInstruction(store_noescape);
- set_noescape->AddInstruction(goto_noescape);
-
- HInstruction* read_breturn = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_breturn = new (GetAllocator()) HReturn(read_breturn);
- breturn->AddInstruction(read_breturn);
- breturn->AddInstruction(return_breturn);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- // Normal LSE can get rid of these two.
- EXPECT_INS_REMOVED(store_one);
- EXPECT_INS_REMOVED(get_two);
- EXPECT_INS_RETAINED(add_two);
- EXPECT_TRUE(add_two->InputAt(0)->IsPhi());
- EXPECT_INS_EQ(add_two->InputAt(0)->InputAt(0), c5);
- EXPECT_INS_EQ(add_two->InputAt(0)->InputAt(1), c0);
- EXPECT_INS_EQ(add_two->InputAt(1), c4);
-
- HBasicBlock* materialize_one = set_one->GetSinglePredecessor();
- HBasicBlock* materialize_two = set_two_critical_break->GetSinglePredecessor();
- HNewInstance* materialization_ins_one =
- FindSingleInstruction<HNewInstance>(graph_, materialize_one);
- HNewInstance* materialization_ins_two =
- FindSingleInstruction<HNewInstance>(graph_, materialize_two);
- std::vector<HPhi*> phis;
- std::tie(phis) = FindAllInstructions<HPhi>(graph_, set_two_and_escape);
- HPhi* new_phi = FindOrNull(
- phis.begin(), phis.end(), [&](auto p) { return p->GetType() == DataType::Type::kReference; });
- ASSERT_NE(new_phi, nullptr);
- ASSERT_NE(materialization_ins_one, nullptr);
- ASSERT_NE(materialization_ins_two, nullptr);
- EXPECT_INS_EQ(materialization_ins_one, new_phi->InputAt(0));
- EXPECT_INS_EQ(materialization_ins_two, new_phi->InputAt(1));
-
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- EXPECT_TRUE(pred_get->GetTarget()->IsPhi());
- EXPECT_INS_EQ(pred_get->GetTarget()->InputAt(0), new_phi);
- EXPECT_INS_EQ(pred_get->GetTarget()->InputAt(1), graph_->GetNullConstant());
-
- EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(0), c0);
- EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(1), c10);
-}
-
-// // ENTRY
-// obj = new Obj();
-// if (parameter_value) {
-// // LEFT
-// // DO NOT ELIMINATE
-// obj.field = 1;
-// escape(obj);
-// return obj.field;
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// return obj.field;
-// }
-// EXIT
-TEST_F(LoadStoreEliminationTest, PartialLoadElimination3) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList(
- "entry",
- "exit",
- {{"entry", "left"}, {"entry", "right"}, {"left", "exit"}, {"right", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* c2 = graph_->GetIntConstant(2);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* read_left = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_left = new (GetAllocator()) HReturn(read_left);
- left->AddInstruction(write_left);
- left->AddInstruction(call_left);
- left->AddInstruction(read_left);
- left->AddInstruction(return_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* read_right = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_right = new (GetAllocator()) HReturn(read_right);
- right->AddInstruction(write_right);
- right->AddInstruction(read_right);
- right->AddInstruction(return_right);
-
- SetupExit(exit);
-
- // PerformLSE expects this to be empty.
- graph_->ClearDominanceInformation();
- PerformLSE();
-
- EXPECT_INS_REMOVED(read_right);
- EXPECT_INS_REMOVED(write_right);
- EXPECT_INS_RETAINED(write_left);
- EXPECT_INS_RETAINED(call_left);
- EXPECT_INS_RETAINED(read_left);
-}
-
-// // ENTRY
-// obj = new Obj();
-// if (parameter_value) {
-// // LEFT
-// // DO NOT ELIMINATE
-// obj.field = 1;
-// while (true) {
-// bool esc = escape(obj);
-// // DO NOT ELIMINATE
-// obj.field = 3;
-// if (esc) break;
-// }
-// // ELIMINATE.
-// return obj.field;
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// return obj.field;
-// }
-// EXIT
-TEST_F(LoadStoreEliminationTest, PartialLoadElimination4) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "entry_post"},
- {"entry_post", "right"},
- {"right", "exit"},
- {"entry_post", "left_pre"},
- {"left_pre", "left_loop"},
- {"left_loop", "left_loop"},
- {"left_loop", "left_finish"},
- {"left_finish", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(entry_post);
- GET_BLOCK(exit);
- GET_BLOCK(left_pre);
- GET_BLOCK(left_loop);
- GET_BLOCK(left_finish);
- GET_BLOCK(right);
-#undef GET_BLOCK
- // Left-loops first successor is the break.
- if (left_loop->GetSuccessors()[0] != left_finish) {
- left_loop->SwapSuccessors();
- }
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* goto_entry = new (GetAllocator()) HGoto();
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(goto_entry);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry_post->AddInstruction(if_inst);
-
- HInstruction* write_left_pre = MakeIFieldSet(new_inst, c1, MemberOffset(32));
- HInstruction* goto_left_pre = new (GetAllocator()) HGoto();
- left_pre->AddInstruction(write_left_pre);
- left_pre->AddInstruction(goto_left_pre);
-
- HInstruction* suspend_left_loop = new (GetAllocator()) HSuspendCheck();
- HInstruction* call_left_loop = MakeInvoke(DataType::Type::kBool, { new_inst });
- HInstruction* write_left_loop = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* if_left_loop = new (GetAllocator()) HIf(call_left_loop);
- left_loop->AddInstruction(suspend_left_loop);
- left_loop->AddInstruction(call_left_loop);
- left_loop->AddInstruction(write_left_loop);
- left_loop->AddInstruction(if_left_loop);
- suspend_left_loop->CopyEnvironmentFrom(cls->GetEnvironment());
- call_left_loop->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* read_left_end = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_left_end = new (GetAllocator()) HReturn(read_left_end);
- left_finish->AddInstruction(read_left_end);
- left_finish->AddInstruction(return_left_end);
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* read_right = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_right = new (GetAllocator()) HReturn(read_right);
- right->AddInstruction(write_right);
- right->AddInstruction(read_right);
- right->AddInstruction(return_right);
-
- SetupExit(exit);
-
- // PerformLSE expects this to be empty.
- graph_->ClearDominanceInformation();
- PerformLSE();
-
- EXPECT_INS_RETAINED(write_left_pre);
- EXPECT_INS_REMOVED(read_right);
- EXPECT_INS_REMOVED(write_right);
- EXPECT_INS_RETAINED(write_left_loop);
- EXPECT_INS_RETAINED(call_left_loop);
- EXPECT_INS_REMOVED(read_left_end);
-}
-
-// // ENTRY
-// obj = new Obj();
-// if (parameter_value) {
-// // LEFT
-// // DO NOT ELIMINATE
-// escape(obj);
-// obj.field = 1;
-// } else {
-// // RIGHT
-// // obj hasn't escaped so it's invisible.
-// // ELIMINATE
-// obj.field = 2;
-// noescape();
-// }
-// EXIT
-// ELIMINATE
-// return obj.field
-TEST_F(LoadStoreEliminationTest, PartialLoadElimination5) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* c2 = graph_->GetIntConstant(2);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(write_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* call_right = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(call_right);
- right->AddInstruction(goto_right);
- call_right->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- // PerformLSE expects this to be empty.
- graph_->ClearDominanceInformation();
- PerformLSE();
-
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_REMOVED(write_right);
- EXPECT_INS_RETAINED(write_left);
- EXPECT_INS_RETAINED(call_left);
- EXPECT_INS_RETAINED(call_right);
-}
-
-// // ENTRY
-// obj = new Obj();
-// // Eliminate this one. Object hasn't escaped yet so it's safe.
-// obj.field = 3;
-// noescape();
-// if (parameter_value) {
-// // LEFT
-// // DO NOT ELIMINATE
-// obj.field = 5;
-// escape(obj);
-// obj.field = 1;
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// }
-// EXIT
-// ELIMINATE
-// return obj.fid
-TEST_F(LoadStoreEliminationTest, PartialLoadElimination6) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c5 = graph_->GetIntConstant(5);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* call_entry = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_entry);
- entry->AddInstruction(call_entry);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
- call_entry->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_left_start = MakeIFieldSet(new_inst, c5, MemberOffset(32));
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(write_left_start);
- left->AddInstruction(call_left);
- left->AddInstruction(write_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- // PerformLSE expects this to be empty.
- graph_->ClearDominanceInformation();
- PerformLSE();
-
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_REMOVED(write_right);
- EXPECT_INS_REMOVED(write_entry);
- EXPECT_INS_RETAINED(write_left_start);
- EXPECT_INS_RETAINED(write_left);
- EXPECT_INS_RETAINED(call_left);
- EXPECT_INS_RETAINED(call_entry);
-}
-
-// // ENTRY
-// obj = new Obj();
-// if (parameter_value) {
-// // LEFT
-// // DO NOT ELIMINATE
// obj.field = 1;
// while (true) {
// bool esc = escape(obj);
@@ -4485,7 +2119,7 @@ TEST_F(LoadStoreEliminationTest, PartialLoadPreserved3) {
left_pre->AddInstruction(goto_left_pre);
HInstruction* suspend_left_loop = new (GetAllocator()) HSuspendCheck();
- HInstruction* call_left_loop = MakeInvoke(DataType::Type::kBool, { new_inst });
+ HInstruction* call_left_loop = MakeInvoke(DataType::Type::kBool, {new_inst});
HInstruction* if_left_loop = new (GetAllocator()) HIf(call_left_loop);
left_loop->AddInstruction(suspend_left_loop);
left_loop->AddInstruction(call_left_loop);
@@ -4510,7 +2144,7 @@ TEST_F(LoadStoreEliminationTest, PartialLoadPreserved3) {
SetupExit(exit);
- PerformLSENoPartial(blks);
+ PerformLSE(blks);
EXPECT_INS_RETAINED(write_left_pre) << *write_left_pre;
EXPECT_INS_RETAINED(read_return) << *read_return;
@@ -4602,7 +2236,7 @@ TEST_F(LoadStoreEliminationTest, DISABLED_PartialLoadPreserved4) {
call_left_loop->CopyEnvironmentFrom(cls->GetEnvironment());
HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* call_right = MakeInvoke(DataType::Type::kBool, { new_inst });
+ HInstruction* call_right = MakeInvoke(DataType::Type::kBool, {new_inst});
HInstruction* goto_right = new (GetAllocator()) HGoto();
right->AddInstruction(write_right);
right->AddInstruction(call_right);
@@ -4616,7 +2250,7 @@ TEST_F(LoadStoreEliminationTest, DISABLED_PartialLoadPreserved4) {
SetupExit(exit);
- PerformLSENoPartial(blks);
+ PerformLSE(blks);
EXPECT_INS_RETAINED(read_return);
EXPECT_INS_RETAINED(write_right);
@@ -4702,7 +2336,7 @@ TEST_F(LoadStoreEliminationTest, PartialLoadPreserved5) {
SetupExit(exit);
- PerformLSENoPartial(blks);
+ PerformLSE(blks);
EXPECT_INS_RETAINED(read_bottom);
EXPECT_INS_RETAINED(write_right);
@@ -4785,7 +2419,7 @@ TEST_F(LoadStoreEliminationTest, DISABLED_PartialLoadPreserved6) {
SetupExit(exit);
- PerformLSENoPartial(blks);
+ PerformLSE(blks);
EXPECT_INS_REMOVED(read_bottom);
EXPECT_INS_REMOVED(write_right);
@@ -4794,3882 +2428,4 @@ TEST_F(LoadStoreEliminationTest, DISABLED_PartialLoadPreserved6) {
EXPECT_INS_RETAINED(call_left);
EXPECT_INS_RETAINED(call_entry);
}
-
-// // ENTRY
-// // MOVED TO MATERIALIZATION BLOCK
-// obj = new Obj();
-// ELIMINATE, moved to materialization block. Kept by escape.
-// obj.field = 3;
-// // Make sure this graph isn't broken
-// if (obj ==/!= (STATIC.VALUE|obj|null)) {
-// // partial_BLOCK
-// // REMOVE (either from unreachable or normal PHI creation)
-// obj.field = 4;
-// }
-// if (parameter_value) {
-// // LEFT
-// // DO NOT ELIMINATE
-// escape(obj);
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// }
-// EXIT
-// PREDICATED GET
-// return obj.field
-TEST_P(PartialComparisonTestGroup, PartialComparisonBeforeCohort) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "first_block"},
- {"first_block", "critical_break"},
- {"first_block", "partial"},
- {"partial", "merge"},
- {"critical_break", "merge"},
- {"merge", "left"},
- {"merge", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(first_block);
- GET_BLOCK(merge);
- GET_BLOCK(partial);
- GET_BLOCK(critical_break);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c4 = graph_->GetIntConstant(4);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- ComparisonInstructions cmp_instructions = GetComparisonInstructions(new_inst);
- HInstruction* if_inst = new (GetAllocator()) HIf(cmp_instructions.cmp_);
- first_block->AddInstruction(cls);
- first_block->AddInstruction(new_inst);
- first_block->AddInstruction(write_entry);
- cmp_instructions.AddSetup(first_block);
- first_block->AddInstruction(cmp_instructions.cmp_);
- first_block->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- cmp_instructions.AddEnvironment(cls->GetEnvironment());
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_partial = MakeIFieldSet(new_inst, c4, MemberOffset(32));
- HInstruction* goto_partial = new (GetAllocator()) HGoto();
- partial->AddInstruction(write_partial);
- partial->AddInstruction(goto_partial);
-
- HInstruction* goto_crit_break = new (GetAllocator()) HGoto();
- critical_break->AddInstruction(goto_crit_break);
-
- HInstruction* if_merge = new (GetAllocator()) HIf(bool_value);
- merge->AddInstruction(if_merge);
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- std::vector<HPhi*> merges;
- HPredicatedInstanceFieldGet* pred_get;
- HInstanceFieldSet* init_set;
- std::tie(pred_get, init_set) =
- FindSingleInstructions<HPredicatedInstanceFieldGet, HInstanceFieldSet>(graph_);
- std::tie(merges) = FindAllInstructions<HPhi>(graph_);
- ASSERT_EQ(merges.size(), 3u);
- HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->GetBlock() == breturn;
- });
- HPhi* merge_value_top = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->GetBlock() != breturn;
- });
- HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
- return p->GetType() == DataType::Type::kReference;
- });
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_REMOVED(write_entry);
- EXPECT_INS_REMOVED(write_partial);
- EXPECT_INS_RETAINED(call_left);
- CheckFinalInstruction(if_inst->InputAt(0), ComparisonPlacement::kBeforeEscape);
- EXPECT_INS_EQ(init_set->InputAt(1), merge_value_top);
- EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
- EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return);
-}
-
-// // ENTRY
-// // MOVED TO MATERIALIZATION BLOCK
-// obj = new Obj();
-// ELIMINATE, moved to materialization block. Kept by escape.
-// obj.field = 3;
-// // Make sure this graph isn't broken
-// if (parameter_value) {
-// if (obj ==/!= (STATIC.VALUE|obj|null)) {
-// // partial_BLOCK
-// obj.field = 4;
-// }
-// // LEFT
-// // DO NOT ELIMINATE
-// escape(obj);
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// }
-// EXIT
-// PREDICATED GET
-// return obj.field
-TEST_P(PartialComparisonTestGroup, PartialComparisonInCohortBeforeEscape) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left_begin"},
- {"left_begin", "partial"},
- {"left_begin", "left_crit_break"},
- {"left_crit_break", "left"},
- {"partial", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(partial);
- GET_BLOCK(left_begin);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(left_crit_break);
- GET_BLOCK(right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(left, {left_crit_break, partial});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c4 = graph_->GetIntConstant(4);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_entry);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- ComparisonInstructions cmp_instructions = GetComparisonInstructions(new_inst);
- HInstruction* if_left_begin = new (GetAllocator()) HIf(cmp_instructions.cmp_);
- cmp_instructions.AddSetup(left_begin);
- left_begin->AddInstruction(cmp_instructions.cmp_);
- left_begin->AddInstruction(if_left_begin);
- cmp_instructions.AddEnvironment(cls->GetEnvironment());
-
- left_crit_break->AddInstruction(new (GetAllocator()) HGoto());
-
- HInstruction* write_partial = MakeIFieldSet(new_inst, c4, MemberOffset(32));
- HInstruction* goto_partial = new (GetAllocator()) HGoto();
- partial->AddInstruction(write_partial);
- partial->AddInstruction(goto_partial);
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- std::vector<HPhi*> merges;
- HInstanceFieldSet* init_set =
- FindSingleInstruction<HInstanceFieldSet>(graph_, left_begin->GetSinglePredecessor());
- HInstanceFieldSet* partial_set = FindSingleInstruction<HInstanceFieldSet>(graph_, partial);
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_);
- std::tie(merges) = FindAllInstructions<HPhi>(graph_);
- ASSERT_EQ(merges.size(), 2u);
- HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32;
- });
- HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
- return p->GetType() == DataType::Type::kReference;
- });
- EXPECT_EQ(merge_value_return->GetBlock(), breturn)
- << blks.GetName(merge_value_return->GetBlock());
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_REMOVED(write_entry);
- EXPECT_INS_RETAINED(write_partial);
- EXPECT_INS_RETAINED(call_left);
- CheckFinalInstruction(if_left_begin->InputAt(0), ComparisonPlacement::kInEscape);
- EXPECT_INS_EQ(init_set->InputAt(1), c3);
- EXPECT_INS_EQ(partial_set->InputAt(0), init_set->InputAt(0));
- EXPECT_INS_EQ(partial_set->InputAt(1), c4);
- EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
- EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return);
-}
-
-// // ENTRY
-// // MOVED TO MATERIALIZATION BLOCK
-// obj = new Obj();
-// ELIMINATE, moved to materialization block. Kept by escape.
-// obj.field = 3;
-// // Make sure this graph isn't broken
-// if (parameter_value) {
-// // LEFT
-// // DO NOT ELIMINATE
-// escape(obj);
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// }
-// if (obj ==/!= (STATIC.VALUE|obj|null)) {
-// // partial_BLOCK
-// obj.field = 4;
-// }
-// EXIT
-// PREDICATED GET
-// return obj.field
-TEST_P(PartialComparisonTestGroup, PartialComparisonAfterCohort) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "merge"},
- {"right", "merge"},
- {"merge", "critical_break"},
- {"critical_break", "breturn"},
- {"merge", "partial"},
- {"partial", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(partial);
- GET_BLOCK(critical_break);
- GET_BLOCK(merge);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {critical_break, partial});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c4 = graph_->GetIntConstant(4);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_entry);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- ComparisonInstructions cmp_instructions = GetComparisonInstructions(new_inst);
- HInstruction* if_merge = new (GetAllocator()) HIf(cmp_instructions.cmp_);
- cmp_instructions.AddSetup(merge);
- merge->AddInstruction(cmp_instructions.cmp_);
- merge->AddInstruction(if_merge);
- cmp_instructions.AddEnvironment(cls->GetEnvironment());
-
- HInstanceFieldSet* write_partial = MakeIFieldSet(new_inst, c4, MemberOffset(32));
- HInstruction* goto_partial = new (GetAllocator()) HGoto();
- partial->AddInstruction(write_partial);
- partial->AddInstruction(goto_partial);
-
- HInstruction* goto_crit_break = new (GetAllocator()) HGoto();
- critical_break->AddInstruction(goto_crit_break);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- std::vector<HPhi*> merges;
- HInstanceFieldSet* init_set =
- FindSingleInstruction<HInstanceFieldSet>(graph_, left->GetSinglePredecessor());
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_);
- std::tie(merges) = FindAllInstructions<HPhi>(graph_);
- ASSERT_EQ(merges.size(), 3u);
- HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->GetBlock() == breturn;
- });
- HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
- return p->GetType() == DataType::Type::kReference;
- });
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_REMOVED(write_entry);
- EXPECT_INS_RETAINED(write_partial);
- EXPECT_TRUE(write_partial->GetIsPredicatedSet());
- EXPECT_INS_RETAINED(call_left);
- CheckFinalInstruction(if_merge->InputAt(0), ComparisonPlacement::kAfterEscape);
- EXPECT_INS_EQ(init_set->InputAt(1), c3);
- ASSERT_TRUE(write_partial->InputAt(0)->IsPhi());
- EXPECT_INS_EQ(write_partial->InputAt(0)->InputAt(0), init_set->InputAt(0));
- EXPECT_INS_EQ(write_partial->InputAt(1), c4);
- EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
- EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return);
-}
-
-// // ENTRY
-// // MOVED TO MATERIALIZATION BLOCK
-// obj = new Obj();
-// ELIMINATE, moved to materialization block. Kept by escape.
-// obj.field = 3;
-// // Make sure this graph isn't broken
-// if (parameter_value) {
-// // LEFT
-// // DO NOT ELIMINATE
-// escape(obj);
-// if (obj ==/!= (STATIC.VALUE|obj|null)) {
-// // partial_BLOCK
-// obj.field = 4;
-// }
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// }
-// EXIT
-// PREDICATED GET
-// return obj.field
-TEST_P(PartialComparisonTestGroup, PartialComparisonInCohortAfterEscape) {
- PartialComparisonKind kind = GetParam();
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"left", "partial"},
- {"partial", "left_end"},
- {"left", "left_crit_break"},
- {"left_crit_break", "left_end"},
- {"left_end", "breturn"},
- {"entry", "right"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(partial);
- GET_BLOCK(left_end);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(left_crit_break);
- GET_BLOCK(right);
-#undef GET_BLOCK
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c4 = graph_->GetIntConstant(4);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_entry);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- ComparisonInstructions cmp_instructions = GetComparisonInstructions(new_inst);
- HIf* if_left = new (GetAllocator()) HIf(cmp_instructions.cmp_);
- left->AddInstruction(call_left);
- cmp_instructions.AddSetup(left);
- left->AddInstruction(cmp_instructions.cmp_);
- left->AddInstruction(if_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
- cmp_instructions.AddEnvironment(cls->GetEnvironment());
- if (if_left->IfTrueSuccessor() != partial) {
- left->SwapSuccessors();
- }
-
- HInstruction* write_partial = MakeIFieldSet(new_inst, c4, MemberOffset(32));
- HInstruction* goto_partial = new (GetAllocator()) HGoto();
- partial->AddInstruction(write_partial);
- partial->AddInstruction(goto_partial);
-
- HInstruction* goto_left_crit_break = new (GetAllocator()) HGoto();
- left_crit_break->AddInstruction(goto_left_crit_break);
-
- HInstruction* goto_left_end = new (GetAllocator()) HGoto();
- left_end->AddInstruction(goto_left_end);
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- std::vector<HPhi*> merges;
- std::vector<HInstanceFieldSet*> sets;
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_);
- std::tie(merges, sets) = FindAllInstructions<HPhi, HInstanceFieldSet>(graph_);
- ASSERT_EQ(merges.size(), 2u);
- ASSERT_EQ(sets.size(), 2u);
- HInstanceFieldSet* init_set = FindOrNull(sets.begin(), sets.end(), [&](HInstanceFieldSet* s) {
- return s->GetBlock()->GetSingleSuccessor() == left;
- });
- EXPECT_INS_EQ(init_set->InputAt(1), c3);
- HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->GetBlock() == breturn;
- });
- HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
- return p->GetType() == DataType::Type::kReference;
- });
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_REMOVED(write_entry);
- if (kind.IsPossiblyTrue()) {
- EXPECT_INS_RETAINED(write_partial);
- EXPECT_TRUE(std::find(sets.begin(), sets.end(), write_partial) != sets.end());
- }
- EXPECT_INS_RETAINED(call_left);
- CheckFinalInstruction(if_left->InputAt(0), ComparisonPlacement::kInEscape);
- EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
- EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return);
-}
-
-INSTANTIATE_TEST_SUITE_P(
- LoadStoreEliminationTest,
- PartialComparisonTestGroup,
- testing::Values(PartialComparisonKind{PartialComparisonKind::Type::kEquals,
- PartialComparisonKind::Target::kNull,
- PartialComparisonKind::Position::kLeft},
- PartialComparisonKind{PartialComparisonKind::Type::kEquals,
- PartialComparisonKind::Target::kNull,
- PartialComparisonKind::Position::kRight},
- PartialComparisonKind{PartialComparisonKind::Type::kEquals,
- PartialComparisonKind::Target::kValue,
- PartialComparisonKind::Position::kLeft},
- PartialComparisonKind{PartialComparisonKind::Type::kEquals,
- PartialComparisonKind::Target::kValue,
- PartialComparisonKind::Position::kRight},
- PartialComparisonKind{PartialComparisonKind::Type::kEquals,
- PartialComparisonKind::Target::kSelf,
- PartialComparisonKind::Position::kLeft},
- PartialComparisonKind{PartialComparisonKind::Type::kNotEquals,
- PartialComparisonKind::Target::kNull,
- PartialComparisonKind::Position::kLeft},
- PartialComparisonKind{PartialComparisonKind::Type::kNotEquals,
- PartialComparisonKind::Target::kNull,
- PartialComparisonKind::Position::kRight},
- PartialComparisonKind{PartialComparisonKind::Type::kNotEquals,
- PartialComparisonKind::Target::kSelf,
- PartialComparisonKind::Position::kLeft},
- PartialComparisonKind{PartialComparisonKind::Type::kNotEquals,
- PartialComparisonKind::Target::kValue,
- PartialComparisonKind::Position::kLeft},
- PartialComparisonKind{PartialComparisonKind::Type::kNotEquals,
- PartialComparisonKind::Target::kValue,
- PartialComparisonKind::Position::kRight}));
-
-// // ENTRY
-// obj = new Obj();
-// if (parameter_value) {
-// // LEFT
-// escape(obj);
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// }
-// EXIT
-// predicated-ELIMINATE
-// obj.field = 3;
-TEST_F(LoadStoreEliminationTest, PredicatedStore1) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- InitGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left, right});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* null_const = graph_->GetNullConstant();
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstanceFieldSet* write_bottom = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturnVoid();
- breturn->AddInstruction(write_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_RETAINED(write_bottom);
- EXPECT_TRUE(write_bottom->GetIsPredicatedSet());
- EXPECT_INS_REMOVED(write_right);
- EXPECT_INS_RETAINED(call_left);
- HPhi* merge_alloc = FindSingleInstruction<HPhi>(graph_, breturn);
- ASSERT_NE(merge_alloc, nullptr);
- EXPECT_TRUE(merge_alloc->InputAt(0)->IsNewInstance()) << *merge_alloc;
- EXPECT_EQ(merge_alloc->InputAt(0)->InputAt(0), cls) << *merge_alloc << " cls? " << *cls;
- EXPECT_EQ(merge_alloc->InputAt(1), null_const);
-}
-
-// // ENTRY
-// obj = new Obj();
-// obj.field = 3;
-// if (parameter_value) {
-// // LEFT
-// escape(obj);
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// }
-// // MERGE
-// if (second_param) {
-// // NON_ESCAPE
-// obj.field = 1;
-// noescape();
-// }
-// EXIT
-// predicated-ELIMINATE
-// obj.field = 4;
-TEST_F(LoadStoreEliminationTest, PredicatedStore2) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "merge"},
- {"right", "merge"},
- {"merge", "non_escape"},
- {"non_escape", "breturn"},
- {"merge", "merge_crit_break"},
- {"merge_crit_break", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
- GET_BLOCK(merge);
- GET_BLOCK(merge_crit_break);
- GET_BLOCK(non_escape);
-#undef GET_BLOCK
- EnsurePredecessorOrder(merge, {left, right});
- EnsurePredecessorOrder(breturn, {merge_crit_break, non_escape});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* bool_value2 = MakeParam(DataType::Type::kBool);
- HInstruction* null_const = graph_->GetNullConstant();
- HInstruction* c1 = graph_->GetIntConstant(3);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c4 = graph_->GetIntConstant(4);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_entry);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* merge_if = new (GetAllocator()) HIf(bool_value2);
- merge->AddInstruction(merge_if);
-
- merge_crit_break->AddInstruction(new (GetAllocator()) HGoto());
-
- HInstruction* write_non_escape = MakeIFieldSet(new_inst, c1, MemberOffset(32));
- HInstruction* non_escape_call = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* non_escape_goto = new (GetAllocator()) HGoto();
- non_escape->AddInstruction(write_non_escape);
- non_escape->AddInstruction(non_escape_call);
- non_escape->AddInstruction(non_escape_goto);
- non_escape_call->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstanceFieldSet* write_bottom = MakeIFieldSet(new_inst, c4, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturnVoid();
- breturn->AddInstruction(write_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_RETAINED(write_bottom);
- EXPECT_TRUE(write_bottom->GetIsPredicatedSet()) << *write_bottom;
- EXPECT_INS_REMOVED(write_right);
- EXPECT_INS_RETAINED(call_left);
- HInstanceFieldSet* pred_set = FindSingleInstruction<HInstanceFieldSet>(graph_, breturn);
- HPhi* merge_alloc = FindSingleInstruction<HPhi>(graph_);
- ASSERT_NE(merge_alloc, nullptr);
- EXPECT_TRUE(merge_alloc->InputAt(0)->IsNewInstance()) << *merge_alloc;
- EXPECT_INS_EQ(merge_alloc->InputAt(0)->InputAt(0), cls) << " phi is: " << *merge_alloc;
- EXPECT_INS_EQ(merge_alloc->InputAt(1), null_const);
- ASSERT_NE(pred_set, nullptr);
- EXPECT_TRUE(pred_set->GetIsPredicatedSet()) << *pred_set;
- EXPECT_INS_EQ(pred_set->InputAt(0), merge_alloc);
-}
-
-// // ENTRY
-// obj = new Obj();
-// obj.field = 3;
-// if (parameter_value) {
-// // LEFT
-// escape(obj);
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// }
-// EXIT
-// predicated-ELIMINATE
-// return obj.field
-TEST_F(LoadStoreEliminationTest, PredicatedLoad1) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left, right});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* null_const = graph_->GetNullConstant();
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_entry);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_REMOVED(write_right);
- EXPECT_INS_RETAINED(call_left);
- std::vector<HPhi*> merges;
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- std::tie(merges) = FindAllInstructions<HPhi>(graph_, breturn);
- ASSERT_EQ(merges.size(), 2u);
- HPhi* merge_value_return = FindOrNull(
- merges.begin(), merges.end(), [](HPhi* p) { return p->GetType() == DataType::Type::kInt32; });
- HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
- return p->GetType() == DataType::Type::kReference;
- });
- ASSERT_NE(merge_alloc, nullptr);
- EXPECT_TRUE(merge_alloc->InputAt(0)->IsNewInstance()) << *merge_alloc;
- EXPECT_EQ(merge_alloc->InputAt(0)->InputAt(0), cls) << *merge_alloc << " cls? " << *cls;
- EXPECT_EQ(merge_alloc->InputAt(1), null_const);
- ASSERT_NE(pred_get, nullptr);
- EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
- EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return) << " pred-get is: " << *pred_get;
- EXPECT_INS_EQ(merge_value_return->InputAt(0), graph_->GetIntConstant(0))
- << " merge val is: " << *merge_value_return;
- EXPECT_INS_EQ(merge_value_return->InputAt(1), c2) << " merge val is: " << *merge_value_return;
-}
-
-// // ENTRY
-// obj1 = new Obj1();
-// obj2 = new Obj2();
-// obj1.field = 3;
-// obj2.field = 13;
-// if (parameter_value) {
-// // LEFT
-// escape(obj1);
-// escape(obj2);
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj1.field = 2;
-// obj2.field = 12;
-// }
-// EXIT
-// predicated-ELIMINATE
-// return obj1.field + obj2.field
-TEST_F(LoadStoreEliminationTest, MultiPredicatedLoad1) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left, right});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c12 = graph_->GetIntConstant(12);
- HInstruction* c13 = graph_->GetIntConstant(13);
-
- HInstruction* cls1 = MakeClassLoad();
- HInstruction* cls2 = MakeClassLoad();
- HInstruction* new_inst1 = MakeNewInstance(cls1);
- HInstruction* new_inst2 = MakeNewInstance(cls2);
- HInstruction* write_entry1 = MakeIFieldSet(new_inst1, c3, MemberOffset(32));
- HInstruction* write_entry2 = MakeIFieldSet(new_inst2, c13, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls1);
- entry->AddInstruction(cls2);
- entry->AddInstruction(new_inst1);
- entry->AddInstruction(new_inst2);
- entry->AddInstruction(write_entry1);
- entry->AddInstruction(write_entry2);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls1, {});
- cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
- new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
- new_inst2->CopyEnvironmentFrom(cls1->GetEnvironment());
-
- HInstruction* call_left1 = MakeInvoke(DataType::Type::kVoid, { new_inst1 });
- HInstruction* call_left2 = MakeInvoke(DataType::Type::kVoid, { new_inst2 });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left1);
- left->AddInstruction(call_left2);
- left->AddInstruction(goto_left);
- call_left1->CopyEnvironmentFrom(cls1->GetEnvironment());
- call_left2->CopyEnvironmentFrom(cls1->GetEnvironment());
-
- HInstruction* write_right1 = MakeIFieldSet(new_inst1, c2, MemberOffset(32));
- HInstruction* write_right2 = MakeIFieldSet(new_inst2, c12, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right1);
- right->AddInstruction(write_right2);
- right->AddInstruction(goto_right);
-
- HInstruction* read_bottom1 = MakeIFieldGet(new_inst1, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* read_bottom2 = MakeIFieldGet(new_inst2, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* combine =
- new (GetAllocator()) HAdd(DataType::Type::kInt32, read_bottom1, read_bottom2);
- HInstruction* return_exit = new (GetAllocator()) HReturn(combine);
- breturn->AddInstruction(read_bottom1);
- breturn->AddInstruction(read_bottom2);
- breturn->AddInstruction(combine);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_REMOVED(read_bottom1);
- EXPECT_INS_REMOVED(read_bottom2);
- EXPECT_INS_REMOVED(write_right1);
- EXPECT_INS_REMOVED(write_right2);
- EXPECT_INS_RETAINED(call_left1);
- EXPECT_INS_RETAINED(call_left2);
- std::vector<HPhi*> merges;
- std::vector<HPredicatedInstanceFieldGet*> pred_gets;
- std::tie(merges, pred_gets) =
- FindAllInstructions<HPhi, HPredicatedInstanceFieldGet>(graph_, breturn);
- ASSERT_EQ(merges.size(), 4u);
- ASSERT_EQ(pred_gets.size(), 2u);
- HPhi* merge_value_return1 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->InputAt(1) == c2;
- });
- HPhi* merge_value_return2 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->InputAt(1) == c12;
- });
- HPhi* merge_alloc1 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kReference &&
- p->InputAt(0)->IsNewInstance() &&
- p->InputAt(0)->InputAt(0) == cls1;
- });
- HPhi* merge_alloc2 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kReference &&
- p->InputAt(0)->IsNewInstance() &&
- p->InputAt(0)->InputAt(0) == cls2;
- });
- ASSERT_NE(merge_alloc1, nullptr);
- ASSERT_NE(merge_alloc2, nullptr);
- EXPECT_EQ(merge_alloc1->InputAt(1), graph_->GetNullConstant());
- EXPECT_EQ(merge_alloc2->InputAt(1), graph_->GetNullConstant());
- HPredicatedInstanceFieldGet* pred_get1 =
- FindOrNull(pred_gets.begin(), pred_gets.end(), [&](HPredicatedInstanceFieldGet* pg) {
- return pg->GetTarget() == merge_alloc1;
- });
- HPredicatedInstanceFieldGet* pred_get2 =
- FindOrNull(pred_gets.begin(), pred_gets.end(), [&](HPredicatedInstanceFieldGet* pg) {
- return pg->GetTarget() == merge_alloc2;
- });
- ASSERT_NE(pred_get1, nullptr);
- EXPECT_INS_EQ(pred_get1->GetTarget(), merge_alloc1);
- EXPECT_INS_EQ(pred_get1->GetDefaultValue(), merge_value_return1)
- << " pred-get is: " << *pred_get1;
- EXPECT_INS_EQ(merge_value_return1->InputAt(0), graph_->GetIntConstant(0))
- << " merge val is: " << *merge_value_return1;
- EXPECT_INS_EQ(merge_value_return1->InputAt(1), c2) << " merge val is: " << *merge_value_return1;
- ASSERT_NE(pred_get2, nullptr);
- EXPECT_INS_EQ(pred_get2->GetTarget(), merge_alloc2);
- EXPECT_INS_EQ(pred_get2->GetDefaultValue(), merge_value_return2)
- << " pred-get is: " << *pred_get2;
- EXPECT_INS_EQ(merge_value_return2->InputAt(0), graph_->GetIntConstant(0))
- << " merge val is: " << *merge_value_return1;
- EXPECT_INS_EQ(merge_value_return2->InputAt(1), c12) << " merge val is: " << *merge_value_return1;
-}
-
-// // ENTRY
-// obj1 = new Obj1();
-// obj2 = new Obj2();
-// obj1.field = 3;
-// obj2.field = 13;
-// if (parameter_value) {
-// // LEFT
-// escape(obj1);
-// // ELIMINATE
-// obj2.field = 12;
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj1.field = 2;
-// escape(obj2);
-// }
-// EXIT
-// predicated-ELIMINATE
-// return obj1.field + obj2.field
-TEST_F(LoadStoreEliminationTest, MultiPredicatedLoad2) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left, right});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c12 = graph_->GetIntConstant(12);
- HInstruction* c13 = graph_->GetIntConstant(13);
-
- HInstruction* cls1 = MakeClassLoad();
- HInstruction* cls2 = MakeClassLoad();
- HInstruction* new_inst1 = MakeNewInstance(cls1);
- HInstruction* new_inst2 = MakeNewInstance(cls2);
- HInstruction* write_entry1 = MakeIFieldSet(new_inst1, c3, MemberOffset(32));
- HInstruction* write_entry2 = MakeIFieldSet(new_inst2, c13, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls1);
- entry->AddInstruction(cls2);
- entry->AddInstruction(new_inst1);
- entry->AddInstruction(new_inst2);
- entry->AddInstruction(write_entry1);
- entry->AddInstruction(write_entry2);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls1, {});
- cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
- new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
- new_inst2->CopyEnvironmentFrom(cls1->GetEnvironment());
-
- HInstruction* call_left1 = MakeInvoke(DataType::Type::kVoid, { new_inst1 });
- HInstruction* write_left2 = MakeIFieldSet(new_inst2, c12, MemberOffset(32));
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left1);
- left->AddInstruction(write_left2);
- left->AddInstruction(goto_left);
- call_left1->CopyEnvironmentFrom(cls1->GetEnvironment());
-
- HInstruction* write_right1 = MakeIFieldSet(new_inst1, c2, MemberOffset(32));
- HInstruction* call_right2 = MakeInvoke(DataType::Type::kVoid, { new_inst2 });
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right1);
- right->AddInstruction(call_right2);
- right->AddInstruction(goto_right);
- call_right2->CopyEnvironmentFrom(cls1->GetEnvironment());
-
- HInstruction* read_bottom1 = MakeIFieldGet(new_inst1, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* read_bottom2 = MakeIFieldGet(new_inst2, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* combine =
- new (GetAllocator()) HAdd(DataType::Type::kInt32, read_bottom1, read_bottom2);
- HInstruction* return_exit = new (GetAllocator()) HReturn(combine);
- breturn->AddInstruction(read_bottom1);
- breturn->AddInstruction(read_bottom2);
- breturn->AddInstruction(combine);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_REMOVED(read_bottom1);
- EXPECT_INS_REMOVED(read_bottom2);
- EXPECT_INS_REMOVED(write_right1);
- EXPECT_INS_REMOVED(write_left2);
- EXPECT_INS_RETAINED(call_left1);
- EXPECT_INS_RETAINED(call_right2);
- std::vector<HPhi*> merges;
- std::vector<HPredicatedInstanceFieldGet*> pred_gets;
- std::tie(merges, pred_gets) =
- FindAllInstructions<HPhi, HPredicatedInstanceFieldGet>(graph_, breturn);
- ASSERT_EQ(merges.size(), 4u);
- ASSERT_EQ(pred_gets.size(), 2u);
- HPhi* merge_value_return1 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->InputAt(1) == c2;
- });
- HPhi* merge_value_return2 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->InputAt(0) == c12;
- });
- HPhi* merge_alloc1 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kReference && p->InputAt(1)->IsNullConstant();
- });
- HPhi* merge_alloc2 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kReference && p->InputAt(0)->IsNullConstant();
- });
- ASSERT_NE(merge_alloc1, nullptr);
- ASSERT_NE(merge_alloc2, nullptr);
- EXPECT_TRUE(merge_alloc1->InputAt(0)->IsNewInstance()) << *merge_alloc1;
- EXPECT_INS_EQ(merge_alloc1->InputAt(0)->InputAt(0), cls1) << *merge_alloc1;
- EXPECT_INS_EQ(merge_alloc1->InputAt(1), graph_->GetNullConstant());
- EXPECT_TRUE(merge_alloc2->InputAt(1)->IsNewInstance()) << *merge_alloc2;
- EXPECT_INS_EQ(merge_alloc2->InputAt(1)->InputAt(0), cls2) << *merge_alloc2;
- EXPECT_INS_EQ(merge_alloc2->InputAt(0), graph_->GetNullConstant());
- HPredicatedInstanceFieldGet* pred_get1 =
- FindOrNull(pred_gets.begin(), pred_gets.end(), [&](HPredicatedInstanceFieldGet* pg) {
- return pg->GetTarget() == merge_alloc1;
- });
- HPredicatedInstanceFieldGet* pred_get2 =
- FindOrNull(pred_gets.begin(), pred_gets.end(), [&](HPredicatedInstanceFieldGet* pg) {
- return pg->GetTarget() == merge_alloc2;
- });
- ASSERT_NE(pred_get1, nullptr);
- EXPECT_INS_EQ(pred_get1->GetTarget(), merge_alloc1);
- EXPECT_INS_EQ(pred_get1->GetDefaultValue(), merge_value_return1)
- << " pred-get is: " << *pred_get1;
- EXPECT_INS_EQ(merge_value_return1->InputAt(0), graph_->GetIntConstant(0))
- << " merge val is: " << *merge_value_return1;
- EXPECT_INS_EQ(merge_value_return1->InputAt(1), c2) << " merge val is: " << *merge_value_return1;
- ASSERT_NE(pred_get2, nullptr);
- EXPECT_INS_EQ(pred_get2->GetTarget(), merge_alloc2);
- EXPECT_INS_EQ(pred_get2->GetDefaultValue(), merge_value_return2)
- << " pred-get is: " << *pred_get2;
- EXPECT_INS_EQ(merge_value_return2->InputAt(1), graph_->GetIntConstant(0))
- << " merge val is: " << *merge_value_return1;
- EXPECT_INS_EQ(merge_value_return2->InputAt(0), c12) << " merge val is: " << *merge_value_return1;
-}
-
-// Based on structure seen in `java.util.List
-// java.util.Collections.checkedList(java.util.List, java.lang.Class)`
-// Incorrect accounting would cause attempts to materialize both obj1 and obj2
-// in each of the materialization blocks.
-// // ENTRY
-// Obj obj;
-// if (param1) {
-// // needs to be moved after param2 check
-// obj1 = new Obj1();
-// obj1.foo = 33;
-// if (param2) {
-// return obj1.foo;
-// }
-// obj = obj1;
-// } else {
-// obj2 = new Obj2();
-// obj2.foo = 44;
-// if (param2) {
-// return obj2.foo;
-// }
-// obj = obj2;
-// }
-// EXIT
-// // obj = PHI[obj1, obj2]
-// // NB The phi acts as an escape for both obj1 and obj2 meaning as far as the
-// // LSA is concerned the escape frontier is left_crit_break->breturn and
-// // right_crit_break->breturn for both even though only one of the objects is
-// // actually live at each edge.
-// // TODO In the future we really should track liveness through PHIs which would
-// // allow us to entirely remove the allocation in this test.
-// return obj.foo;
-TEST_F(LoadStoreEliminationTest, MultiPredicatedLoad3) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"left", "left_end"},
- {"left_end", "breturn"},
- {"left", "left_exit_early"},
- {"left_exit_early", "exit"},
- {"entry", "right"},
- {"right", "right_end"},
- {"right_end", "breturn"},
- {"right", "right_exit_early"},
- {"right_exit_early", "exit"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(left_end);
- GET_BLOCK(left_exit_early);
- GET_BLOCK(right);
- GET_BLOCK(right_end);
- GET_BLOCK(right_exit_early);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left_end, right_end});
- HInstruction* param1 = MakeParam(DataType::Type::kBool);
- HInstruction* param2 = MakeParam(DataType::Type::kBool);
- HInstruction* c33 = graph_->GetIntConstant(33);
- HInstruction* c44 = graph_->GetIntConstant(44);
-
- HInstruction* if_inst = new (GetAllocator()) HIf(param1);
- entry->AddInstruction(if_inst);
-
- HInstruction* cls1 = MakeClassLoad();
- HInstruction* new_inst1 = MakeNewInstance(cls1);
- HInstruction* write1 = MakeIFieldSet(new_inst1, c33, MemberOffset(32));
- HInstruction* if_left = new (GetAllocator()) HIf(param2);
- left->AddInstruction(cls1);
- left->AddInstruction(new_inst1);
- left->AddInstruction(write1);
- left->AddInstruction(if_left);
- ManuallyBuildEnvFor(cls1, {});
- new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
-
- left_end->AddInstruction(new (GetAllocator()) HGoto());
-
- HInstruction* early_exit_left_read =
- MakeIFieldGet(new_inst1, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* early_exit_left_return = new (GetAllocator()) HReturn(early_exit_left_read);
- left_exit_early->AddInstruction(early_exit_left_read);
- left_exit_early->AddInstruction(early_exit_left_return);
-
- HInstruction* cls2 = MakeClassLoad();
- HInstruction* new_inst2 = MakeNewInstance(cls2);
- HInstruction* write2 = MakeIFieldSet(new_inst2, c44, MemberOffset(32));
- HInstruction* if_right = new (GetAllocator()) HIf(param2);
- right->AddInstruction(cls2);
- right->AddInstruction(new_inst2);
- right->AddInstruction(write2);
- right->AddInstruction(if_right);
- cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
- new_inst2->CopyEnvironmentFrom(cls2->GetEnvironment());
-
- right_end->AddInstruction(new (GetAllocator()) HGoto());
-
- HInstruction* early_exit_right_read =
- MakeIFieldGet(new_inst2, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* early_exit_right_return = new (GetAllocator()) HReturn(early_exit_right_read);
- right_exit_early->AddInstruction(early_exit_right_read);
- right_exit_early->AddInstruction(early_exit_right_return);
-
- HPhi* bottom_phi = MakePhi({new_inst1, new_inst2});
- HInstruction* read_bottom = MakeIFieldGet(bottom_phi, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddPhi(bottom_phi);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_REMOVED(early_exit_left_read);
- EXPECT_INS_REMOVED(early_exit_right_read);
- EXPECT_INS_RETAINED(bottom_phi);
- EXPECT_INS_RETAINED(read_bottom);
- EXPECT_INS_EQ(early_exit_left_return->InputAt(0), c33);
- EXPECT_INS_EQ(early_exit_right_return->InputAt(0), c44);
- // These assert there is only 1 HNewInstance in the given blocks.
- HNewInstance* moved_ni1 =
- FindSingleInstruction<HNewInstance>(graph_, left_end->GetSinglePredecessor());
- HNewInstance* moved_ni2 =
- FindSingleInstruction<HNewInstance>(graph_, right_end->GetSinglePredecessor());
- ASSERT_NE(moved_ni1, nullptr);
- ASSERT_NE(moved_ni2, nullptr);
- EXPECT_INS_EQ(bottom_phi->InputAt(0), moved_ni1);
- EXPECT_INS_EQ(bottom_phi->InputAt(1), moved_ni2);
-}
-
-// // ENTRY
-// obj = new Obj();
-// if (param1) {
-// obj.field = 3;
-// noescape();
-// } else {
-// obj.field = 2;
-// noescape();
-// }
-// int abc;
-// if (parameter_value) {
-// // LEFT
-// abc = 4;
-// escape(obj);
-// } else {
-// // RIGHT
-// // ELIMINATE
-// noescape();
-// abc = obj.field + 4;
-// }
-// abc = phi
-// EXIT
-// predicated-ELIMINATE
-// return obj.field + abc
-TEST_F(LoadStoreEliminationTest, PredicatedLoad4) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "start_left"},
- {"entry", "start_right"},
- {"start_left", "mid"},
- {"start_right", "mid"},
- {"mid", "left"},
- {"mid", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
- GET_BLOCK(mid);
- GET_BLOCK(start_left);
- GET_BLOCK(start_right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left, right});
- EnsurePredecessorOrder(mid, {start_left, start_right});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* bool_value2 = MakeParam(DataType::Type::kBool);
- HInstruction* null_const = graph_->GetNullConstant();
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c4 = graph_->GetIntConstant(4);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_start_left = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* call_start_left = MakeInvoke(DataType::Type::kVoid, { });
- start_left->AddInstruction(write_start_left);
- start_left->AddInstruction(call_start_left);
- start_left->AddInstruction(new (GetAllocator()) HGoto());
- call_start_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_start_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* call_start_right = MakeInvoke(DataType::Type::kVoid, { });
- start_right->AddInstruction(write_start_right);
- start_right->AddInstruction(call_start_right);
- start_right->AddInstruction(new (GetAllocator()) HGoto());
- call_start_right->CopyEnvironmentFrom(cls->GetEnvironment());
-
- mid->AddInstruction(new (GetAllocator()) HIf(bool_value2));
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_right = MakeInvoke(DataType::Type::kVoid, { });
- HInstruction* read_right = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* add_right = new (GetAllocator()) HAdd(DataType::Type::kInt32, read_right, c4);
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(call_right);
- right->AddInstruction(read_right);
- right->AddInstruction(add_right);
- right->AddInstruction(goto_right);
- call_right->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HPhi* phi_bottom = MakePhi({c4, add_right});
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* add_bottom =
- new (GetAllocator()) HAdd(DataType::Type::kInt32, read_bottom, phi_bottom);
- HInstruction* return_exit = new (GetAllocator()) HReturn(add_bottom);
- breturn->AddPhi(phi_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(add_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_REMOVED(read_right);
- EXPECT_INS_RETAINED(call_left);
- EXPECT_INS_RETAINED(call_right);
- EXPECT_INS_RETAINED(call_start_left);
- EXPECT_INS_RETAINED(call_start_right);
- std::vector<HPhi*> merges;
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- std::tie(merges) = FindAllInstructions<HPhi>(graph_, breturn);
- ASSERT_EQ(merges.size(), 3u);
- HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p != phi_bottom && p->GetType() == DataType::Type::kInt32;
- });
- HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
- return p->GetType() == DataType::Type::kReference;
- });
- ASSERT_NE(merge_alloc, nullptr);
- EXPECT_TRUE(merge_alloc->InputAt(0)->IsNewInstance()) << *merge_alloc;
- EXPECT_EQ(merge_alloc->InputAt(0)->InputAt(0), cls) << *merge_alloc << " cls? " << *cls;
- EXPECT_EQ(merge_alloc->InputAt(1), null_const);
- ASSERT_NE(pred_get, nullptr);
- EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
- EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return) << " pred-get is: " << *pred_get;
- EXPECT_INS_EQ(merge_value_return->InputAt(0), graph_->GetIntConstant(0))
- << " merge val is: " << *merge_value_return;
- EXPECT_INS_EQ(merge_value_return->InputAt(1), FindSingleInstruction<HPhi>(graph_, mid))
- << " merge val is: " << *merge_value_return;
-}
-
-// Based on structure seen in `java.util.Set java.util.Collections$UnmodifiableMap.entrySet()`
-// We end up having to update a PHI generated by normal LSE.
-// // ENTRY
-// Obj obj_init = param_obj.BAR;
-// if (param1) {
-// Obj other = new Obj();
-// other.foo = 42;
-// if (param2) {
-// return other.foo;
-// } else {
-// param_obj.BAR = other;
-// }
-// } else { }
-// EXIT
-// LSE Turns this into PHI[obj_init, other]
-// read_bottom = param_obj.BAR;
-// // won't be changed. The escape happens with .BAR set so this is in escaping cohort.
-// return read_bottom.foo;
-TEST_F(LoadStoreEliminationTest, MultiPredicatedLoad4) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"left", "left_early_return"},
- {"left_early_return", "exit"},
- {"left", "left_write_escape"},
- {"left_write_escape", "breturn"},
- {"entry", "right"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(left_early_return);
- GET_BLOCK(left_write_escape);
- GET_BLOCK(right);
-#undef GET_BLOCK
- MemberOffset foo_offset = MemberOffset(32);
- MemberOffset bar_offset = MemberOffset(20);
- EnsurePredecessorOrder(breturn, {left_write_escape, right});
- HInstruction* c42 = graph_->GetIntConstant(42);
- HInstruction* param1 = MakeParam(DataType::Type::kBool);
- HInstruction* param2 = MakeParam(DataType::Type::kBool);
- HInstruction* param_obj = MakeParam(DataType::Type::kReference);
-
- HInstruction* get_initial = MakeIFieldGet(param_obj, DataType::Type::kReference, bar_offset);
- HInstruction* if_inst = new (GetAllocator()) HIf(param1);
- entry->AddInstruction(get_initial);
- entry->AddInstruction(if_inst);
-
- HInstruction* cls1 = MakeClassLoad();
- HInstruction* new_inst1 = MakeNewInstance(cls1);
- HInstruction* write1 = MakeIFieldSet(new_inst1, c42, foo_offset);
- HInstruction* if_left = new (GetAllocator()) HIf(param2);
- left->AddInstruction(cls1);
- left->AddInstruction(new_inst1);
- left->AddInstruction(write1);
- left->AddInstruction(if_left);
- ManuallyBuildEnvFor(cls1, {});
- new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
-
- HInstruction* read_early_return = MakeIFieldGet(new_inst1, DataType::Type::kInt32, foo_offset);
- HInstruction* return_early = new (GetAllocator()) HReturn(read_early_return);
- left_early_return->AddInstruction(read_early_return);
- left_early_return->AddInstruction(return_early);
-
- HInstruction* write_escape = MakeIFieldSet(param_obj, new_inst1, bar_offset);
- HInstruction* write_goto = new (GetAllocator()) HGoto();
- left_write_escape->AddInstruction(write_escape);
- left_write_escape->AddInstruction(write_goto);
-
- right->AddInstruction(new (GetAllocator()) HGoto());
-
- HInstruction* read_bottom = MakeIFieldGet(param_obj, DataType::Type::kReference, bar_offset);
- HInstruction* final_read = MakeIFieldGet(read_bottom, DataType::Type::kInt32, foo_offset);
- HInstruction* return_exit = new (GetAllocator()) HReturn(final_read);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(final_read);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_REMOVED(read_early_return);
- EXPECT_INS_EQ(return_early->InputAt(0), c42);
- EXPECT_INS_RETAINED(final_read);
- HNewInstance* moved_ni =
- FindSingleInstruction<HNewInstance>(graph_, left_write_escape->GetSinglePredecessor());
- EXPECT_TRUE(final_read->InputAt(0)->IsPhi());
- EXPECT_INS_EQ(final_read->InputAt(0)->InputAt(0), moved_ni);
- EXPECT_INS_EQ(final_read->InputAt(0)->InputAt(1), get_initial);
-}
-
-// // ENTRY
-// obj = new Obj();
-// obj.field = 3;
-// if (parameter_value) {
-// // LEFT
-// escape(obj);
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// }
-// // MERGE
-// if (second_param) {
-// // NON_ESCAPE
-// obj.field = 1;
-// noescape();
-// }
-// EXIT
-// predicated-ELIMINATE
-// return obj.field
-TEST_F(LoadStoreEliminationTest, PredicatedLoad2) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "merge"},
- {"right", "merge"},
- {"merge", "non_escape"},
- {"non_escape", "breturn"},
- {"merge", "crit_break"},
- {"crit_break", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
- GET_BLOCK(merge);
- GET_BLOCK(non_escape);
- GET_BLOCK(crit_break);
-#undef GET_BLOCK
- EnsurePredecessorOrder(merge, {left, right});
- EnsurePredecessorOrder(breturn, {crit_break, non_escape});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* bool_value2 = MakeParam(DataType::Type::kBool);
- HInstruction* null_const = graph_->GetNullConstant();
- HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_entry);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* merge_if = new (GetAllocator()) HIf(bool_value2);
- merge->AddInstruction(merge_if);
-
- crit_break->AddInstruction(new (GetAllocator()) HGoto());
-
- HInstruction* write_non_escape = MakeIFieldSet(new_inst, c1, MemberOffset(32));
- HInstruction* non_escape_call = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* non_escape_goto = new (GetAllocator()) HGoto();
- non_escape->AddInstruction(write_non_escape);
- non_escape->AddInstruction(non_escape_call);
- non_escape->AddInstruction(non_escape_goto);
- non_escape_call->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_REMOVED(write_right);
- EXPECT_INS_RETAINED(call_left);
- std::vector<HPhi*> merges;
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- std::tie(merges) = FindAllInstructions<HPhi>(graph_);
- ASSERT_EQ(merges.size(), 3u);
- HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->GetBlock() == breturn;
- });
- HPhi* merge_value_merge = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->GetBlock() != breturn;
- });
- HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
- return p->GetType() == DataType::Type::kReference;
- });
- ASSERT_NE(merge_alloc, nullptr);
- EXPECT_TRUE(merge_alloc->InputAt(0)->IsNewInstance()) << *merge_alloc;
- EXPECT_INS_EQ(merge_alloc->InputAt(0)->InputAt(0), cls)
- << " phi is: " << merge_alloc->DumpWithArgs();
- EXPECT_INS_EQ(merge_alloc->InputAt(1), null_const);
- ASSERT_NE(pred_get, nullptr);
- EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
- EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return)
- << "get is " << pred_get->DumpWithArgs();
- EXPECT_INS_EQ(merge_value_return->InputAt(0), merge_value_merge)
- << " phi is: " << *merge_value_return;
- EXPECT_INS_EQ(merge_value_return->InputAt(1), c1)
- << " phi is: " << merge_value_return->DumpWithArgs();
- EXPECT_INS_EQ(merge_value_merge->InputAt(0), graph_->GetIntConstant(0))
- << " phi is: " << *merge_value_merge;
- EXPECT_INS_EQ(merge_value_merge->InputAt(1), c2)
- << " phi is: " << merge_value_merge->DumpWithArgs();
-}
-
-// // ENTRY
-// obj = new Obj();
-// obj.field = 3;
-// if (parameter_value) {
-// // LEFT
-// escape(obj);
-// } else {
-// // RIGHT
-// // ELIMINATE
-// obj.field = 2;
-// }
-// // MERGE
-// if (second_param) {
-// // NON_ESCAPE
-// obj.field = 1;
-// }
-// noescape();
-// EXIT
-// predicated-ELIMINATE
-// return obj.field
-TEST_F(LoadStoreEliminationTest, PredicatedLoad3) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "merge"},
- {"right", "merge"},
- {"merge", "non_escape"},
- {"non_escape", "breturn"},
- {"merge", "crit_break"},
- {"crit_break", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
- GET_BLOCK(merge);
- GET_BLOCK(crit_break);
- GET_BLOCK(non_escape);
-#undef GET_BLOCK
- EnsurePredecessorOrder(merge, {left, right});
- EnsurePredecessorOrder(breturn, {crit_break, non_escape});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* bool_value2 = MakeParam(DataType::Type::kBool);
- HInstruction* null_const = graph_->GetNullConstant();
- HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_entry);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* merge_if = new (GetAllocator()) HIf(bool_value2);
- merge->AddInstruction(merge_if);
-
- HInstruction* write_non_escape = MakeIFieldSet(new_inst, c1, MemberOffset(32));
- HInstruction* non_escape_goto = new (GetAllocator()) HGoto();
- non_escape->AddInstruction(write_non_escape);
- non_escape->AddInstruction(non_escape_goto);
-
- crit_break->AddInstruction(new (GetAllocator()) HGoto());
-
- HInstruction* bottom_call = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(bottom_call);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
- bottom_call->CopyEnvironmentFrom(cls->GetEnvironment());
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_REMOVED(write_right);
- EXPECT_INS_RETAINED(call_left);
- std::vector<HPhi*> merges;
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- std::tie(merges) = FindAllInstructions<HPhi>(graph_);
- ASSERT_EQ(merges.size(), 3u);
- HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->GetBlock() == breturn;
- });
- HPhi* merge_value_merge = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
- return p->GetType() == DataType::Type::kInt32 && p->GetBlock() != breturn;
- });
- HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
- return p->GetType() == DataType::Type::kReference;
- });
- ASSERT_NE(merge_alloc, nullptr);
- EXPECT_TRUE(merge_alloc->InputAt(0)->IsNewInstance()) << merge_alloc->DumpWithArgs();
- EXPECT_INS_EQ(merge_alloc->InputAt(0)->InputAt(0), cls)
- << " phi is: " << merge_alloc->DumpWithArgs();
- EXPECT_INS_EQ(merge_alloc->InputAt(1), null_const);
- ASSERT_NE(pred_get, nullptr);
- EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
- EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return)
- << "get is " << pred_get->DumpWithArgs();
- EXPECT_INS_EQ(merge_value_return->InputAt(0), merge_value_merge)
- << " phi is: " << *merge_value_return;
- EXPECT_INS_EQ(merge_value_return->InputAt(1), c1) << " phi is: " << *merge_value_return;
- EXPECT_INS_EQ(merge_value_merge->InputAt(0), graph_->GetIntConstant(0))
- << " phi is: " << *merge_value_merge;
- EXPECT_INS_EQ(merge_value_merge->InputAt(1), c2) << " phi is: " << *merge_value_merge;
-}
-
-// // ENTRY
-// obj = new Obj();
-// if (parameter_value) {
-// // LEFT
-// obj.field = 3;
-// escape(obj);
-// } else {
-// // RIGHT - Leave it as default value
-// }
-// EXIT
-// predicated-ELIMINATE
-// return obj.field
-TEST_F(LoadStoreEliminationTest, PredicatedLoadDefaultValue) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left, right});
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* null_const = graph_->GetNullConstant();
- HInstruction* c0 = graph_->GetIntConstant(0);
- HInstruction* c3 = graph_->GetIntConstant(3);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_left = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(write_left);
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(goto_right);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_REMOVED(read_bottom);
- EXPECT_INS_RETAINED(write_left);
- EXPECT_INS_RETAINED(call_left);
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- HPhi* merge_alloc = FindSingleInstruction<HPhi>(graph_, breturn);
- ASSERT_NE(merge_alloc, nullptr);
- EXPECT_TRUE(merge_alloc->InputAt(0)->IsNewInstance()) << *merge_alloc;
- EXPECT_EQ(merge_alloc->InputAt(0)->InputAt(0), cls) << *merge_alloc << " cls? " << *cls;
- EXPECT_EQ(merge_alloc->InputAt(1), null_const);
- ASSERT_NE(pred_get, nullptr);
- EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
- EXPECT_INS_EQ(pred_get->GetDefaultValue(), c0) << " pred-get is: " << *pred_get;
-}
-
-// // ENTRY
-// obj = new Obj();
-// // ALL should be kept
-// switch (parameter_value) {
-// case 1:
-// // Case1
-// obj.field = 1;
-// call_func(obj);
-// break;
-// case 2:
-// // Case2
-// obj.field = 2;
-// call_func(obj);
-// break;
-// default:
-// // Case3
-// obj.field = 3;
-// do {
-// if (test2()) { } else { obj.field = 5; }
-// } while (test());
-// break;
-// }
-// EXIT
-// // predicated-ELIMINATE
-// return obj.field
-TEST_F(LoadStoreEliminationTest, PartialLoopPhis1) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "bswitch"},
- {"bswitch", "case1"},
- {"bswitch", "case2"},
- {"bswitch", "case3"},
- {"case1", "breturn"},
- {"case2", "breturn"},
- {"case3", "loop_pre_header"},
- {"loop_pre_header", "loop_header"},
- {"loop_header", "loop_body"},
- {"loop_body", "loop_if_left"},
- {"loop_body", "loop_if_right"},
- {"loop_if_left", "loop_merge"},
- {"loop_if_right", "loop_merge"},
- {"loop_merge", "loop_end"},
- {"loop_end", "loop_header"},
- {"loop_end", "critical_break"},
- {"critical_break", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(bswitch);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(case1);
- GET_BLOCK(case2);
- GET_BLOCK(case3);
-
- GET_BLOCK(loop_pre_header);
- GET_BLOCK(loop_header);
- GET_BLOCK(loop_body);
- GET_BLOCK(loop_if_left);
- GET_BLOCK(loop_if_right);
- GET_BLOCK(loop_merge);
- GET_BLOCK(loop_end);
- GET_BLOCK(critical_break);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {case1, case2, critical_break});
- EnsurePredecessorOrder(loop_header, {loop_pre_header, loop_end});
- EnsurePredecessorOrder(loop_merge, {loop_if_left, loop_if_right});
- CHECK_SUBROUTINE_FAILURE();
- HInstruction* switch_val = MakeParam(DataType::Type::kInt32);
- HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c5 = graph_->GetIntConstant(5);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* entry_goto = new (GetAllocator()) HGoto();
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(entry_goto);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, switch_val);
- bswitch->AddInstruction(switch_inst);
-
- HInstruction* write_c1 = MakeIFieldSet(new_inst, c1, MemberOffset(32));
- HInstruction* call_c1 = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_c1 = new (GetAllocator()) HGoto();
- case1->AddInstruction(write_c1);
- case1->AddInstruction(call_c1);
- case1->AddInstruction(goto_c1);
- call_c1->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_c2 = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* call_c2 = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_c2 = new (GetAllocator()) HGoto();
- case2->AddInstruction(write_c2);
- case2->AddInstruction(call_c2);
- case2->AddInstruction(goto_c2);
- call_c2->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_c3 = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* goto_c3 = new (GetAllocator()) HGoto();
- case3->AddInstruction(write_c3);
- case3->AddInstruction(goto_c3);
-
- HInstruction* goto_preheader = new (GetAllocator()) HGoto();
- loop_pre_header->AddInstruction(goto_preheader);
-
- HInstruction* suspend_check_header = new (GetAllocator()) HSuspendCheck();
- HInstruction* goto_header = new (GetAllocator()) HGoto();
- loop_header->AddInstruction(suspend_check_header);
- loop_header->AddInstruction(goto_header);
- suspend_check_header->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_loop_body = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* if_loop_body = new (GetAllocator()) HIf(call_loop_body);
- loop_body->AddInstruction(call_loop_body);
- loop_body->AddInstruction(if_loop_body);
- call_loop_body->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
- loop_if_left->AddInstruction(goto_loop_left);
-
- HInstruction* write_loop_right = MakeIFieldSet(new_inst, c5, MemberOffset(32));
- HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
- loop_if_right->AddInstruction(write_loop_right);
- loop_if_right->AddInstruction(goto_loop_right);
-
- HInstruction* goto_loop_merge = new (GetAllocator()) HGoto();
- loop_merge->AddInstruction(goto_loop_merge);
-
- HInstruction* call_end = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* if_end = new (GetAllocator()) HIf(call_end);
- loop_end->AddInstruction(call_end);
- loop_end->AddInstruction(if_end);
- call_end->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_critical_break = new (GetAllocator()) HGoto();
- critical_break->AddInstruction(goto_critical_break);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- EXPECT_INS_REMOVED(read_bottom) << *read_bottom;
- ASSERT_TRUE(pred_get != nullptr);
- HPhi* inst_return_phi = pred_get->GetTarget()->AsPhiOrNull();
- ASSERT_TRUE(inst_return_phi != nullptr) << pred_get->GetTarget()->DumpWithArgs();
- EXPECT_INS_EQ(inst_return_phi->InputAt(0),
- FindSingleInstruction<HNewInstance>(graph_, case1->GetSinglePredecessor()));
- EXPECT_INS_EQ(inst_return_phi->InputAt(1),
- FindSingleInstruction<HNewInstance>(graph_, case2->GetSinglePredecessor()));
- EXPECT_INS_EQ(inst_return_phi->InputAt(2), graph_->GetNullConstant());
- HPhi* inst_value_phi = pred_get->GetDefaultValue()->AsPhiOrNull();
- ASSERT_TRUE(inst_value_phi != nullptr) << pred_get->GetDefaultValue()->DumpWithArgs();
- EXPECT_INS_EQ(inst_value_phi->InputAt(0), graph_->GetIntConstant(0));
- EXPECT_INS_EQ(inst_value_phi->InputAt(1), graph_->GetIntConstant(0));
- HPhi* loop_merge_phi = FindSingleInstruction<HPhi>(graph_, loop_merge);
- ASSERT_TRUE(loop_merge_phi != nullptr);
- HPhi* loop_header_phi = FindSingleInstruction<HPhi>(graph_, loop_header);
- ASSERT_TRUE(loop_header_phi != nullptr);
- EXPECT_INS_EQ(loop_header_phi->InputAt(0), c3);
- EXPECT_INS_EQ(loop_header_phi->InputAt(1), loop_merge_phi);
- EXPECT_INS_EQ(loop_merge_phi->InputAt(0), loop_header_phi);
- EXPECT_INS_EQ(loop_merge_phi->InputAt(1), c5);
- EXPECT_INS_EQ(inst_value_phi->InputAt(2), loop_merge_phi);
- EXPECT_INS_RETAINED(write_c1) << *write_c1;
- EXPECT_INS_RETAINED(write_c2) << *write_c2;
- EXPECT_INS_REMOVED(write_c3) << *write_c3;
- EXPECT_INS_REMOVED(write_loop_right) << *write_loop_right;
-}
-
-// // ENTRY
-// obj = new Obj();
-// switch (parameter_value) {
-// case 1:
-// // Case1
-// obj.field = 1;
-// call_func(obj);
-// break;
-// case 2:
-// // Case2
-// obj.field = 2;
-// call_func(obj);
-// break;
-// default:
-// // Case3
-// obj.field = 3;
-// while (!test()) {
-// if (test2()) { } else { obj.field = 5; }
-// }
-// break;
-// }
-// EXIT
-// // predicated-ELIMINATE
-// return obj.field
-TEST_F(LoadStoreEliminationTest, PartialLoopPhis2) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "bswitch"},
- {"bswitch", "case1"},
- {"bswitch", "case2"},
- {"bswitch", "case3"},
- {"case1", "breturn"},
- {"case2", "breturn"},
- {"case3", "loop_pre_header"},
-
- {"loop_pre_header", "loop_header"},
- {"loop_header", "critical_break"},
- {"loop_header", "loop_body"},
- {"loop_body", "loop_if_left"},
- {"loop_body", "loop_if_right"},
- {"loop_if_left", "loop_merge"},
- {"loop_if_right", "loop_merge"},
- {"loop_merge", "loop_header"},
-
- {"critical_break", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(bswitch);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(case1);
- GET_BLOCK(case2);
- GET_BLOCK(case3);
-
- GET_BLOCK(loop_pre_header);
- GET_BLOCK(loop_header);
- GET_BLOCK(loop_body);
- GET_BLOCK(loop_if_left);
- GET_BLOCK(loop_if_right);
- GET_BLOCK(loop_merge);
- GET_BLOCK(critical_break);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {case1, case2, critical_break});
- EnsurePredecessorOrder(loop_header, {loop_pre_header, loop_merge});
- EnsurePredecessorOrder(loop_merge, {loop_if_left, loop_if_right});
- CHECK_SUBROUTINE_FAILURE();
- HInstruction* switch_val = MakeParam(DataType::Type::kInt32);
- HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c5 = graph_->GetIntConstant(5);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* entry_goto = new (GetAllocator()) HGoto();
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(entry_goto);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, switch_val);
- bswitch->AddInstruction(switch_inst);
-
- HInstruction* write_c1 = MakeIFieldSet(new_inst, c1, MemberOffset(32));
- HInstruction* call_c1 = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_c1 = new (GetAllocator()) HGoto();
- case1->AddInstruction(write_c1);
- case1->AddInstruction(call_c1);
- case1->AddInstruction(goto_c1);
- call_c1->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_c2 = MakeIFieldSet(new_inst, c2, MemberOffset(32));
- HInstruction* call_c2 = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_c2 = new (GetAllocator()) HGoto();
- case2->AddInstruction(write_c2);
- case2->AddInstruction(call_c2);
- case2->AddInstruction(goto_c2);
- call_c2->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_c3 = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* goto_c3 = new (GetAllocator()) HGoto();
- case3->AddInstruction(write_c3);
- case3->AddInstruction(goto_c3);
-
- HInstruction* goto_preheader = new (GetAllocator()) HGoto();
- loop_pre_header->AddInstruction(goto_preheader);
-
- HInstruction* suspend_check_header = new (GetAllocator()) HSuspendCheck();
- HInstruction* call_header = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* if_header = new (GetAllocator()) HIf(call_header);
- loop_header->AddInstruction(suspend_check_header);
- loop_header->AddInstruction(call_header);
- loop_header->AddInstruction(if_header);
- call_header->CopyEnvironmentFrom(cls->GetEnvironment());
- suspend_check_header->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_loop_body = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* if_loop_body = new (GetAllocator()) HIf(call_loop_body);
- loop_body->AddInstruction(call_loop_body);
- loop_body->AddInstruction(if_loop_body);
- call_loop_body->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
- loop_if_left->AddInstruction(goto_loop_left);
-
- HInstruction* write_loop_right = MakeIFieldSet(new_inst, c5, MemberOffset(32));
- HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
- loop_if_right->AddInstruction(write_loop_right);
- loop_if_right->AddInstruction(goto_loop_right);
-
- HInstruction* goto_loop_merge = new (GetAllocator()) HGoto();
- loop_merge->AddInstruction(goto_loop_merge);
-
- HInstruction* goto_critical_break = new (GetAllocator()) HGoto();
- critical_break->AddInstruction(goto_critical_break);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- EXPECT_INS_REMOVED(read_bottom) << *read_bottom;
- ASSERT_TRUE(pred_get != nullptr);
- HPhi* inst_return_phi = pred_get->GetTarget()->AsPhiOrNull();
- ASSERT_TRUE(inst_return_phi != nullptr) << pred_get->GetTarget()->DumpWithArgs();
- EXPECT_INS_EQ(inst_return_phi->InputAt(0),
- FindSingleInstruction<HNewInstance>(graph_, case1->GetSinglePredecessor()));
- EXPECT_INS_EQ(inst_return_phi->InputAt(1),
- FindSingleInstruction<HNewInstance>(graph_, case2->GetSinglePredecessor()));
- EXPECT_INS_EQ(inst_return_phi->InputAt(2), graph_->GetNullConstant());
- HPhi* inst_value_phi = pred_get->GetDefaultValue()->AsPhiOrNull();
- ASSERT_TRUE(inst_value_phi != nullptr) << pred_get->GetDefaultValue()->DumpWithArgs();
- EXPECT_INS_EQ(inst_value_phi->InputAt(0), graph_->GetIntConstant(0));
- EXPECT_INS_EQ(inst_value_phi->InputAt(1), graph_->GetIntConstant(0));
- HPhi* loop_merge_phi = FindSingleInstruction<HPhi>(graph_, loop_merge);
- ASSERT_TRUE(loop_merge_phi != nullptr);
- HPhi* loop_header_phi = FindSingleInstruction<HPhi>(graph_, loop_header);
- ASSERT_TRUE(loop_header_phi != nullptr);
- EXPECT_INS_EQ(loop_header_phi->InputAt(0), c3);
- EXPECT_INS_EQ(loop_header_phi->InputAt(1), loop_merge_phi);
- EXPECT_INS_EQ(loop_merge_phi->InputAt(0), loop_header_phi);
- EXPECT_INS_EQ(loop_merge_phi->InputAt(1), c5);
- EXPECT_INS_EQ(inst_value_phi->InputAt(2), loop_header_phi);
- EXPECT_INS_RETAINED(write_c1) << *write_c1;
- EXPECT_INS_RETAINED(write_c2) << *write_c2;
- EXPECT_INS_REMOVED(write_c3) << *write_c3;
- EXPECT_INS_REMOVED(write_loop_right) << *write_loop_right;
-}
-
-// // ENTRY
-// obj = new Obj();
-// obj.field = 3;
-// while (!test()) {
-// if (test2()) { } else { obj.field = 5; }
-// }
-// if (parameter_value) {
-// escape(obj);
-// }
-// EXIT
-// return obj.field
-TEST_F(LoadStoreEliminationTest, PartialLoopPhis3) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "loop_pre_header"},
-
- {"loop_pre_header", "loop_header"},
- {"loop_header", "escape_check"},
- {"loop_header", "loop_body"},
- {"loop_body", "loop_if_left"},
- {"loop_body", "loop_if_right"},
- {"loop_if_left", "loop_merge"},
- {"loop_if_right", "loop_merge"},
- {"loop_merge", "loop_header"},
-
- {"escape_check", "escape"},
- {"escape_check", "no_escape"},
- {"no_escape", "breturn"},
- {"escape", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(no_escape);
- GET_BLOCK(escape);
- GET_BLOCK(escape_check);
-
- GET_BLOCK(loop_pre_header);
- GET_BLOCK(loop_header);
- GET_BLOCK(loop_body);
- GET_BLOCK(loop_if_left);
- GET_BLOCK(loop_if_right);
- GET_BLOCK(loop_merge);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {no_escape, escape});
- EnsurePredecessorOrder(loop_header, {loop_pre_header, loop_merge});
- EnsurePredecessorOrder(loop_merge, {loop_if_left, loop_if_right});
- CHECK_SUBROUTINE_FAILURE();
- HInstruction* bool_val = MakeParam(DataType::Type::kBool);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c5 = graph_->GetIntConstant(5);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* entry_goto = new (GetAllocator()) HGoto();
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(entry_goto);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_pre_header = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* goto_preheader = new (GetAllocator()) HGoto();
- loop_pre_header->AddInstruction(write_pre_header);
- loop_pre_header->AddInstruction(goto_preheader);
-
- HInstruction* suspend_check_header = new (GetAllocator()) HSuspendCheck();
- HInstruction* call_header = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* if_header = new (GetAllocator()) HIf(call_header);
- loop_header->AddInstruction(suspend_check_header);
- loop_header->AddInstruction(call_header);
- loop_header->AddInstruction(if_header);
- call_header->CopyEnvironmentFrom(cls->GetEnvironment());
- suspend_check_header->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_loop_body = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* if_loop_body = new (GetAllocator()) HIf(call_loop_body);
- loop_body->AddInstruction(call_loop_body);
- loop_body->AddInstruction(if_loop_body);
- call_loop_body->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
- loop_if_left->AddInstruction(goto_loop_left);
-
- HInstruction* write_loop_right = MakeIFieldSet(new_inst, c5, MemberOffset(32));
- HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
- loop_if_right->AddInstruction(write_loop_right);
- loop_if_right->AddInstruction(goto_loop_right);
-
- HInstruction* goto_loop_merge = new (GetAllocator()) HGoto();
- loop_merge->AddInstruction(goto_loop_merge);
-
- HInstruction* if_esc_check = new (GetAllocator()) HIf(bool_val);
- escape_check->AddInstruction(if_esc_check);
-
- HInstruction* call_escape = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_escape = new (GetAllocator()) HGoto();
- escape->AddInstruction(call_escape);
- escape->AddInstruction(goto_escape);
- call_escape->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_no_escape = new (GetAllocator()) HGoto();
- no_escape->AddInstruction(goto_no_escape);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- EXPECT_INS_REMOVED(read_bottom) << *read_bottom;
- ASSERT_TRUE(pred_get != nullptr);
- HPhi* inst_return_phi = pred_get->GetTarget()->AsPhiOrNull();
- ASSERT_TRUE(inst_return_phi != nullptr) << pred_get->GetTarget()->DumpWithArgs();
- EXPECT_INS_EQ(inst_return_phi->InputAt(0), graph_->GetNullConstant());
- EXPECT_INS_EQ(inst_return_phi->InputAt(1),
- FindSingleInstruction<HNewInstance>(graph_, escape->GetSinglePredecessor()));
- HPhi* inst_value_phi = pred_get->GetDefaultValue()->AsPhiOrNull();
- ASSERT_TRUE(inst_value_phi != nullptr) << pred_get->GetDefaultValue()->DumpWithArgs();
- HPhi* loop_header_phi = FindSingleInstruction<HPhi>(graph_, loop_header);
- HPhi* loop_merge_phi = FindSingleInstruction<HPhi>(graph_, loop_merge);
- EXPECT_INS_EQ(inst_value_phi->InputAt(0), loop_header_phi);
- EXPECT_INS_EQ(inst_value_phi->InputAt(1), graph_->GetIntConstant(0));
- EXPECT_INS_EQ(loop_header_phi->InputAt(0), c3);
- EXPECT_INS_EQ(loop_header_phi->InputAt(1), loop_merge_phi);
- EXPECT_INS_EQ(loop_merge_phi->InputAt(0), loop_header_phi);
- EXPECT_INS_EQ(loop_merge_phi->InputAt(1), c5);
- HInstanceFieldSet* mat_set =
- FindSingleInstruction<HInstanceFieldSet>(graph_, escape->GetSinglePredecessor());
- ASSERT_NE(mat_set, nullptr);
- EXPECT_INS_EQ(mat_set->InputAt(1), loop_header_phi);
- EXPECT_INS_REMOVED(write_loop_right) << *write_loop_right;
- EXPECT_INS_REMOVED(write_pre_header) << *write_pre_header;
-}
-
-// // ENTRY
-// obj = new Obj();
-// if (parameter_value) {
-// escape(obj);
-// }
-// obj.field = 3;
-// while (!test()) {
-// if (test2()) { } else { obj.field = 5; }
-// }
-// EXIT
-// return obj.field
-TEST_F(LoadStoreEliminationTest, PartialLoopPhis4) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "escape_check"},
- {"escape_check", "escape"},
- {"escape_check", "no_escape"},
- {"no_escape", "loop_pre_header"},
- {"escape", "loop_pre_header"},
-
- {"loop_pre_header", "loop_header"},
- {"loop_header", "breturn"},
- {"loop_header", "loop_body"},
- {"loop_body", "loop_if_left"},
- {"loop_body", "loop_if_right"},
- {"loop_if_left", "loop_merge"},
- {"loop_if_right", "loop_merge"},
- {"loop_merge", "loop_header"},
-
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(no_escape);
- GET_BLOCK(escape);
- GET_BLOCK(escape_check);
-
- GET_BLOCK(loop_pre_header);
- GET_BLOCK(loop_header);
- GET_BLOCK(loop_body);
- GET_BLOCK(loop_if_left);
- GET_BLOCK(loop_if_right);
- GET_BLOCK(loop_merge);
-#undef GET_BLOCK
- EnsurePredecessorOrder(loop_pre_header, {no_escape, escape});
- EnsurePredecessorOrder(loop_header, {loop_pre_header, loop_merge});
- EnsurePredecessorOrder(loop_merge, {loop_if_left, loop_if_right});
- CHECK_SUBROUTINE_FAILURE();
- HInstruction* bool_val = MakeParam(DataType::Type::kBool);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c5 = graph_->GetIntConstant(5);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* entry_goto = new (GetAllocator()) HGoto();
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(entry_goto);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* if_esc_check = new (GetAllocator()) HIf(bool_val);
- escape_check->AddInstruction(if_esc_check);
-
- HInstruction* call_escape = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_escape = new (GetAllocator()) HGoto();
- escape->AddInstruction(call_escape);
- escape->AddInstruction(goto_escape);
- call_escape->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_no_escape = new (GetAllocator()) HGoto();
- no_escape->AddInstruction(goto_no_escape);
-
- HInstanceFieldSet* write_pre_header = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* goto_preheader = new (GetAllocator()) HGoto();
- loop_pre_header->AddInstruction(write_pre_header);
- loop_pre_header->AddInstruction(goto_preheader);
-
- HInstruction* suspend_check_header = new (GetAllocator()) HSuspendCheck();
- HInstruction* call_header = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* if_header = new (GetAllocator()) HIf(call_header);
- loop_header->AddInstruction(suspend_check_header);
- loop_header->AddInstruction(call_header);
- loop_header->AddInstruction(if_header);
- call_header->CopyEnvironmentFrom(cls->GetEnvironment());
- suspend_check_header->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_loop_body = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* if_loop_body = new (GetAllocator()) HIf(call_loop_body);
- loop_body->AddInstruction(call_loop_body);
- loop_body->AddInstruction(if_loop_body);
- call_loop_body->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
- loop_if_left->AddInstruction(goto_loop_left);
-
- HInstanceFieldSet* write_loop_right = MakeIFieldSet(new_inst, c5, MemberOffset(32));
- HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
- loop_if_right->AddInstruction(write_loop_right);
- loop_if_right->AddInstruction(goto_loop_right);
-
- HInstruction* goto_loop_merge = new (GetAllocator()) HGoto();
- loop_merge->AddInstruction(goto_loop_merge);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- EXPECT_INS_REMOVED(read_bottom) << *read_bottom;
- ASSERT_TRUE(pred_get != nullptr);
- HPhi* inst_return_phi = pred_get->GetTarget()->AsPhiOrNull();
- ASSERT_TRUE(inst_return_phi != nullptr) << pred_get->GetTarget()->DumpWithArgs();
- EXPECT_INS_EQ(inst_return_phi->InputAt(0), graph_->GetNullConstant());
- EXPECT_INS_EQ(inst_return_phi->InputAt(1),
- FindSingleInstruction<HNewInstance>(graph_, escape->GetSinglePredecessor()));
- HPhi* inst_value_phi = pred_get->GetDefaultValue()->AsPhiOrNull();
- ASSERT_TRUE(inst_value_phi != nullptr) << pred_get->GetDefaultValue()->DumpWithArgs();
- HPhi* loop_header_phi = FindSingleInstruction<HPhi>(graph_, loop_header);
- HPhi* loop_merge_phi = FindSingleInstruction<HPhi>(graph_, loop_merge);
- EXPECT_INS_EQ(inst_value_phi, loop_header_phi);
- EXPECT_INS_EQ(loop_header_phi->InputAt(0), c3);
- EXPECT_INS_EQ(loop_header_phi->InputAt(1), loop_merge_phi);
- EXPECT_INS_EQ(loop_merge_phi->InputAt(0), loop_header_phi);
- EXPECT_INS_EQ(loop_merge_phi->InputAt(1), c5);
- EXPECT_INS_RETAINED(write_loop_right) << *write_loop_right;
- EXPECT_TRUE(write_loop_right->GetIsPredicatedSet()) << *write_loop_right;
- EXPECT_INS_RETAINED(write_pre_header) << *write_pre_header;
- EXPECT_TRUE(write_pre_header->GetIsPredicatedSet()) << *write_pre_header;
-}
-
-// // ENTRY
-// obj = new Obj();
-// obj.field = 3;
-// while (!test()) {
-// if (test2()) { } else { obj.field += 5; }
-// }
-// if (parameter_value) {
-// escape(obj);
-// }
-// EXIT
-// return obj.field
-TEST_F(LoadStoreEliminationTest, PartialLoopPhis5) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "loop_pre_header"},
- {"loop_pre_header", "loop_header"},
- {"loop_header", "escape_check"},
- {"loop_header", "loop_body"},
- {"loop_body", "loop_if_left"},
- {"loop_body", "loop_if_right"},
- {"loop_if_left", "loop_merge"},
- {"loop_if_right", "loop_merge"},
- {"loop_merge", "loop_header"},
- {"escape_check", "escape"},
- {"escape_check", "no_escape"},
- {"no_escape", "breturn"},
- {"escape", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(no_escape);
- GET_BLOCK(escape);
- GET_BLOCK(escape_check);
-
- GET_BLOCK(loop_pre_header);
- GET_BLOCK(loop_header);
- GET_BLOCK(loop_body);
- GET_BLOCK(loop_if_left);
- GET_BLOCK(loop_if_right);
- GET_BLOCK(loop_merge);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {no_escape, escape});
- EnsurePredecessorOrder(loop_header, {loop_pre_header, loop_merge});
- EnsurePredecessorOrder(loop_merge, {loop_if_left, loop_if_right});
- CHECK_SUBROUTINE_FAILURE();
- HInstruction* bool_val = MakeParam(DataType::Type::kBool);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c5 = graph_->GetIntConstant(5);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* entry_goto = new (GetAllocator()) HGoto();
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(entry_goto);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_pre_header = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* goto_preheader = new (GetAllocator()) HGoto();
- loop_pre_header->AddInstruction(write_pre_header);
- loop_pre_header->AddInstruction(goto_preheader);
-
- HInstruction* suspend_check_header = new (GetAllocator()) HSuspendCheck();
- HInstruction* call_header = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* if_header = new (GetAllocator()) HIf(call_header);
- loop_header->AddInstruction(suspend_check_header);
- loop_header->AddInstruction(call_header);
- loop_header->AddInstruction(if_header);
- call_header->CopyEnvironmentFrom(cls->GetEnvironment());
- suspend_check_header->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_loop_body = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* if_loop_body = new (GetAllocator()) HIf(call_loop_body);
- loop_body->AddInstruction(call_loop_body);
- loop_body->AddInstruction(if_loop_body);
- call_loop_body->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
- loop_if_left->AddInstruction(goto_loop_left);
-
- HInstruction* read_loop_right = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* add_loop_right =
- new (GetAllocator()) HAdd(DataType::Type::kInt32, read_loop_right, c5);
- HInstruction* write_loop_right = MakeIFieldSet(new_inst, add_loop_right, MemberOffset(32));
- HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
- loop_if_right->AddInstruction(read_loop_right);
- loop_if_right->AddInstruction(add_loop_right);
- loop_if_right->AddInstruction(write_loop_right);
- loop_if_right->AddInstruction(goto_loop_right);
-
- HInstruction* goto_loop_merge = new (GetAllocator()) HGoto();
- loop_merge->AddInstruction(goto_loop_merge);
-
- HInstruction* if_esc_check = new (GetAllocator()) HIf(bool_val);
- escape_check->AddInstruction(if_esc_check);
-
- HInstruction* call_escape = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_escape = new (GetAllocator()) HGoto();
- escape->AddInstruction(call_escape);
- escape->AddInstruction(goto_escape);
- call_escape->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_no_escape = new (GetAllocator()) HGoto();
- no_escape->AddInstruction(goto_no_escape);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- EXPECT_INS_REMOVED(read_bottom) << *read_bottom;
- ASSERT_TRUE(pred_get != nullptr);
- HPhi* inst_return_phi = pred_get->GetTarget()->AsPhiOrNull();
- ASSERT_TRUE(inst_return_phi != nullptr) << pred_get->GetTarget()->DumpWithArgs();
- EXPECT_INS_EQ(inst_return_phi->InputAt(0), graph_->GetNullConstant());
- EXPECT_INS_EQ(inst_return_phi->InputAt(1),
- FindSingleInstruction<HNewInstance>(graph_, escape->GetSinglePredecessor()));
- HPhi* inst_value_phi = pred_get->GetDefaultValue()->AsPhiOrNull();
- ASSERT_TRUE(inst_value_phi != nullptr) << pred_get->GetDefaultValue()->DumpWithArgs();
- HPhi* loop_header_phi = FindSingleInstruction<HPhi>(graph_, loop_header);
- HPhi* loop_merge_phi = FindSingleInstruction<HPhi>(graph_, loop_merge);
- EXPECT_INS_EQ(inst_value_phi->InputAt(0), loop_header_phi);
- EXPECT_INS_EQ(inst_value_phi->InputAt(1), graph_->GetIntConstant(0));
- EXPECT_INS_EQ(loop_header_phi->InputAt(0), c3);
- EXPECT_INS_EQ(loop_header_phi->InputAt(1), loop_merge_phi);
- EXPECT_INS_EQ(loop_merge_phi->InputAt(0), loop_header_phi);
- EXPECT_INS_EQ(loop_merge_phi->InputAt(1), add_loop_right);
- EXPECT_INS_EQ(add_loop_right->InputAt(0), loop_header_phi);
- EXPECT_INS_EQ(add_loop_right->InputAt(1), c5);
- HInstanceFieldSet* mat_set =
- FindSingleInstruction<HInstanceFieldSet>(graph_, escape->GetSinglePredecessor());
- ASSERT_NE(mat_set, nullptr);
- EXPECT_INS_EQ(mat_set->InputAt(1), loop_header_phi);
- EXPECT_INS_REMOVED(write_loop_right) << *write_loop_right;
- EXPECT_INS_REMOVED(write_pre_header) << *write_pre_header;
-}
-
-// // ENTRY
-// obj = new Obj();
-// obj.field = 3;
-// if (param) {
-// while (!test()) {
-// if (test2()) {
-// noescape();
-// } else {
-// abc = obj.field;
-// obj.field = abc + 5;
-// noescape();
-// }
-// }
-// escape(obj);
-// } else {
-// }
-// return obj.field
-TEST_F(LoadStoreEliminationTest, PartialLoopPhis6) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(/*handles=*/&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "start"},
- {"start", "left"},
- {"start", "right"},
- {"left", "loop_pre_header"},
-
- {"loop_pre_header", "loop_header"},
- {"loop_header", "escape"},
- {"loop_header", "loop_body"},
- {"loop_body", "loop_if_left"},
- {"loop_body", "loop_if_right"},
- {"loop_if_left", "loop_header"},
- {"loop_if_right", "loop_header"},
-
- {"escape", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
- GET_BLOCK(start);
- GET_BLOCK(escape);
-
- GET_BLOCK(loop_pre_header);
- GET_BLOCK(loop_header);
- GET_BLOCK(loop_body);
- GET_BLOCK(loop_if_left);
- GET_BLOCK(loop_if_right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {escape, right});
- EnsurePredecessorOrder(loop_header, {loop_pre_header, loop_if_left, loop_if_right});
- CHECK_SUBROUTINE_FAILURE();
- HInstruction* bool_val = MakeParam(DataType::Type::kBool);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c5 = graph_->GetIntConstant(5);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* entry_goto = new (GetAllocator()) HGoto();
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_entry);
- entry->AddInstruction(entry_goto);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- start->AddInstruction(new (GetAllocator()) HIf(bool_val));
-
- HInstruction* left_goto = new (GetAllocator()) HGoto();
- left->AddInstruction(left_goto);
-
- HInstruction* goto_preheader = new (GetAllocator()) HGoto();
- loop_pre_header->AddInstruction(goto_preheader);
-
- HInstruction* suspend_check_header = new (GetAllocator()) HSuspendCheck();
- HInstruction* call_header = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* if_header = new (GetAllocator()) HIf(call_header);
- loop_header->AddInstruction(suspend_check_header);
- loop_header->AddInstruction(call_header);
- loop_header->AddInstruction(if_header);
- call_header->CopyEnvironmentFrom(cls->GetEnvironment());
- suspend_check_header->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_loop_body = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* if_loop_body = new (GetAllocator()) HIf(call_loop_body);
- loop_body->AddInstruction(call_loop_body);
- loop_body->AddInstruction(if_loop_body);
- call_loop_body->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_loop_left = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
- loop_if_left->AddInstruction(call_loop_left);
- loop_if_left->AddInstruction(goto_loop_left);
- call_loop_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* read_loop_right = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* add_loop_right =
- new (GetAllocator()) HAdd(DataType::Type::kInt32, c5, read_loop_right);
- HInstruction* write_loop_right = MakeIFieldSet(new_inst, add_loop_right, MemberOffset(32));
- HInstruction* call_loop_right = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
- loop_if_right->AddInstruction(read_loop_right);
- loop_if_right->AddInstruction(add_loop_right);
- loop_if_right->AddInstruction(write_loop_right);
- loop_if_right->AddInstruction(call_loop_right);
- loop_if_right->AddInstruction(goto_loop_right);
- call_loop_right->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_escape = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_escape = new (GetAllocator()) HGoto();
- escape->AddInstruction(call_escape);
- escape->AddInstruction(goto_escape);
- call_escape->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(goto_right);
-
- HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
- breturn->AddInstruction(read_bottom);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- EXPECT_INS_REMOVED(read_bottom) << *read_bottom;
- ASSERT_TRUE(pred_get != nullptr);
- HPhi* inst_return_phi = pred_get->GetTarget()->AsPhiOrNull();
- ASSERT_TRUE(inst_return_phi != nullptr) << pred_get->GetTarget()->DumpWithArgs();
- EXPECT_INS_EQ(inst_return_phi->InputAt(0),
- FindSingleInstruction<HNewInstance>(graph_, escape->GetSinglePredecessor()));
- EXPECT_INS_EQ(inst_return_phi->InputAt(1), graph_->GetNullConstant());
- EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(0), graph_->GetIntConstant(0));
- EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(1), c3);
- HPhi* loop_header_phi = FindSingleInstruction<HPhi>(graph_, loop_header);
- ASSERT_NE(loop_header_phi, nullptr);
- EXPECT_INS_EQ(loop_header_phi->InputAt(0), c3);
- EXPECT_INS_EQ(loop_header_phi->InputAt(1), loop_header_phi);
- EXPECT_INS_EQ(loop_header_phi->InputAt(2), add_loop_right);
- EXPECT_INS_EQ(add_loop_right->InputAt(0), c5);
- EXPECT_INS_EQ(add_loop_right->InputAt(1), loop_header_phi);
- HInstanceFieldSet* mat_set =
- FindSingleInstruction<HInstanceFieldSet>(graph_, escape->GetSinglePredecessor());
- ASSERT_NE(mat_set, nullptr);
- EXPECT_INS_EQ(mat_set->InputAt(1), loop_header_phi);
- EXPECT_INS_REMOVED(write_loop_right);
- EXPECT_INS_REMOVED(write_entry);
- EXPECT_INS_RETAINED(call_header);
- EXPECT_INS_RETAINED(call_loop_left);
- EXPECT_INS_RETAINED(call_loop_right);
-}
-
-// TODO This should really be in an Instruction simplifier Gtest but (1) that
-// doesn't exist and (2) we should move this simplification to directly in the
-// LSE pass since there is more information then.
-// // ENTRY
-// obj = new Obj();
-// obj.field = 3;
-// if (param) {
-// escape(obj);
-// } else {
-// obj.field = 10;
-// }
-// return obj.field;
-TEST_F(LoadStoreEliminationTest, SimplifyTest) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left, right});
-
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c10 = graph_->GetIntConstant(10);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_start = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_start);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_right = MakeIFieldSet(new_inst, c10, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
-
- HInstruction* read_end = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
- breturn->AddInstruction(read_end);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- // Run the code-simplifier too
- PerformSimplifications(blks);
-
- EXPECT_INS_REMOVED(write_right);
- EXPECT_INS_REMOVED(write_start);
- EXPECT_INS_REMOVED(read_end);
- EXPECT_INS_RETAINED(call_left);
-
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- ASSERT_NE(pred_get, nullptr);
- EXPECT_INS_EQ(pred_get->GetDefaultValue(), c10);
-}
-
-
-// TODO This should really be in an Instruction simplifier Gtest but (1) that
-// doesn't exist and (2) we should move this simplification to directly in the
-// LSE pass since there is more information then.
-//
-// This checks that we don't replace phis when the replacement isn't valid at
-// that point (i.e. it doesn't dominate)
-// // ENTRY
-// obj = new Obj();
-// obj.field = 3;
-// if (param) {
-// escape(obj);
-// } else {
-// obj.field = noescape();
-// }
-// return obj.field;
-TEST_F(LoadStoreEliminationTest, SimplifyTest2) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
- {"right", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left, right});
-
- HInstruction* bool_value = MakeParam(DataType::Type::kBool);
- HInstruction* c3 = graph_->GetIntConstant(3);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_start = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_start);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, {new_inst});
- HInstruction* goto_left = new (GetAllocator()) HGoto();
- left->AddInstruction(call_left);
- left->AddInstruction(goto_left);
- call_left->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_right = MakeInvoke(DataType::Type::kInt32, {});
- HInstruction* write_right = MakeIFieldSet(new_inst, call_right, MemberOffset(32));
- HInstruction* goto_right = new (GetAllocator()) HGoto();
- right->AddInstruction(call_right);
- right->AddInstruction(write_right);
- right->AddInstruction(goto_right);
- call_right->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* read_end = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
- breturn->AddInstruction(read_end);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- // Run the code-simplifier too
- PerformSimplifications(blks);
-
- EXPECT_INS_REMOVED(write_right);
- EXPECT_INS_REMOVED(write_start);
- EXPECT_INS_REMOVED(read_end);
- EXPECT_INS_RETAINED(call_left);
- EXPECT_INS_RETAINED(call_right);
- EXPECT_EQ(call_right->GetBlock(), right);
-
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- ASSERT_NE(pred_get, nullptr);
- EXPECT_TRUE(pred_get->GetDefaultValue()->IsPhi()) << pred_get->DumpWithArgs();
- EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(0), graph_->GetIntConstant(0))
- << pred_get->DumpWithArgs();
- EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(1), call_right) << pred_get->DumpWithArgs();
-}
-
-// TODO This should really be in an Instruction simplifier Gtest but (1) that
-// doesn't exist and (2) we should move this simplification to directly in the
-// LSE pass since there is more information then.
-//
-// This checks that we replace phis even when there are multiple replacements as
-// long as they are equal
-// // ENTRY
-// obj = new Obj();
-// obj.field = 3;
-// switch (param) {
-// case 1:
-// escape(obj);
-// break;
-// case 2:
-// obj.field = 10;
-// break;
-// case 3:
-// obj.field = 10;
-// break;
-// }
-// return obj.field;
-TEST_F(LoadStoreEliminationTest, SimplifyTest3) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "case1"},
- {"entry", "case2"},
- {"entry", "case3"},
- {"case1", "breturn"},
- {"case2", "breturn"},
- {"case3", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(case1);
- GET_BLOCK(case2);
- GET_BLOCK(case3);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {case1, case2, case3});
-
- HInstruction* int_val = MakeParam(DataType::Type::kInt32);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c10 = graph_->GetIntConstant(10);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_start = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, int_val);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_start);
- entry->AddInstruction(switch_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_case1 = MakeInvoke(DataType::Type::kVoid, {new_inst});
- HInstruction* goto_case1 = new (GetAllocator()) HGoto();
- case1->AddInstruction(call_case1);
- case1->AddInstruction(goto_case1);
- call_case1->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_case2 = MakeIFieldSet(new_inst, c10, MemberOffset(32));
- HInstruction* goto_case2 = new (GetAllocator()) HGoto();
- case2->AddInstruction(write_case2);
- case2->AddInstruction(goto_case2);
-
- HInstruction* write_case3 = MakeIFieldSet(new_inst, c10, MemberOffset(32));
- HInstruction* goto_case3 = new (GetAllocator()) HGoto();
- case3->AddInstruction(write_case3);
- case3->AddInstruction(goto_case3);
-
- HInstruction* read_end = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
- breturn->AddInstruction(read_end);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- // Run the code-simplifier too
- PerformSimplifications(blks);
-
- EXPECT_INS_REMOVED(write_case2);
- EXPECT_INS_REMOVED(write_case3);
- EXPECT_INS_REMOVED(write_start);
- EXPECT_INS_REMOVED(read_end);
- EXPECT_INS_RETAINED(call_case1);
-
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- ASSERT_NE(pred_get, nullptr);
- EXPECT_INS_EQ(pred_get->GetDefaultValue(), c10)
- << pred_get->DumpWithArgs();
-}
-
-// TODO This should really be in an Instruction simplifier Gtest but (1) that
-// doesn't exist and (2) we should move this simplification to directly in the
-// LSE pass since there is more information then.
-//
-// This checks that we don't replace phis even when there are multiple
-// replacements if they are not equal
-// // ENTRY
-// obj = new Obj();
-// obj.field = 3;
-// switch (param) {
-// case 1:
-// escape(obj);
-// break;
-// case 2:
-// obj.field = 10;
-// break;
-// case 3:
-// obj.field = 20;
-// break;
-// }
-// return obj.field;
-TEST_F(LoadStoreEliminationTest, SimplifyTest4) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "case1"},
- {"entry", "case2"},
- {"entry", "case3"},
- {"case1", "breturn"},
- {"case2", "breturn"},
- {"case3", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(case1);
- GET_BLOCK(case2);
- GET_BLOCK(case3);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {case1, case2, case3});
-
- HInstruction* int_val = MakeParam(DataType::Type::kInt32);
- HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* c10 = graph_->GetIntConstant(10);
- HInstruction* c20 = graph_->GetIntConstant(20);
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_start = MakeIFieldSet(new_inst, c3, MemberOffset(32));
- HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, int_val);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_start);
- entry->AddInstruction(switch_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* call_case1 = MakeInvoke(DataType::Type::kVoid, {new_inst});
- HInstruction* goto_case1 = new (GetAllocator()) HGoto();
- case1->AddInstruction(call_case1);
- case1->AddInstruction(goto_case1);
- call_case1->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* write_case2 = MakeIFieldSet(new_inst, c10, MemberOffset(32));
- HInstruction* goto_case2 = new (GetAllocator()) HGoto();
- case2->AddInstruction(write_case2);
- case2->AddInstruction(goto_case2);
-
- HInstruction* write_case3 = MakeIFieldSet(new_inst, c20, MemberOffset(32));
- HInstruction* goto_case3 = new (GetAllocator()) HGoto();
- case3->AddInstruction(write_case3);
- case3->AddInstruction(goto_case3);
-
- HInstruction* read_end = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
- breturn->AddInstruction(read_end);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- // Run the code-simplifier too
- PerformSimplifications(blks);
-
- EXPECT_INS_REMOVED(write_case2);
- EXPECT_INS_REMOVED(write_case3);
- EXPECT_INS_REMOVED(write_start);
- EXPECT_INS_REMOVED(read_end);
- EXPECT_INS_RETAINED(call_case1);
-
- HPredicatedInstanceFieldGet* pred_get =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- ASSERT_NE(pred_get, nullptr);
- EXPECT_TRUE(pred_get->GetDefaultValue()->IsPhi())
- << pred_get->DumpWithArgs();
- EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(0), graph_->GetIntConstant(0));
- EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(1), c10);
- EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(2), c20);
-}
-
-// Make sure that irreducible loops don't screw up Partial LSE. We can't pull
-// phis through them so we need to treat them as escapes.
-// TODO We should be able to do better than this? Need to do some research.
-// // ENTRY
-// obj = new Obj();
-// obj.foo = 11;
-// if (param1) {
-// } else {
-// // irreducible loop here. NB the objdoesn't actually escape
-// obj.foo = 33;
-// if (param2) {
-// goto inner;
-// } else {
-// while (test()) {
-// if (test()) {
-// obj.foo = 66;
-// } else {
-// }
-// inner:
-// }
-// }
-// }
-// return obj.foo;
-TEST_F(LoadStoreEliminationTest, PartialIrreducibleLoop) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("start",
- "exit",
- {{"start", "entry"},
- {"entry", "left"},
- {"entry", "right"},
- {"left", "breturn"},
-
- {"right", "right_crit_break_loop"},
- {"right_crit_break_loop", "loop_header"},
- {"right", "right_crit_break_end"},
- {"right_crit_break_end", "loop_end"},
-
- {"loop_header", "loop_body"},
- {"loop_body", "loop_left"},
- {"loop_body", "loop_right"},
- {"loop_left", "loop_end"},
- {"loop_right", "loop_end"},
- {"loop_end", "loop_header"},
- {"loop_header", "loop_header_crit_break"},
- {"loop_header_crit_break", "breturn"},
-
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(start);
- GET_BLOCK(entry);
- GET_BLOCK(exit);
- GET_BLOCK(breturn);
- GET_BLOCK(left);
- GET_BLOCK(right);
- GET_BLOCK(right_crit_break_end);
- GET_BLOCK(right_crit_break_loop);
- GET_BLOCK(loop_header);
- GET_BLOCK(loop_header_crit_break);
- GET_BLOCK(loop_body);
- GET_BLOCK(loop_left);
- GET_BLOCK(loop_right);
- GET_BLOCK(loop_end);
-#undef GET_BLOCK
- EnsurePredecessorOrder(breturn, {left, loop_header_crit_break});
- HInstruction* c11 = graph_->GetIntConstant(11);
- HInstruction* c33 = graph_->GetIntConstant(33);
- HInstruction* c66 = graph_->GetIntConstant(66);
- HInstruction* param1 = MakeParam(DataType::Type::kBool);
- HInstruction* param2 = MakeParam(DataType::Type::kBool);
-
- HInstruction* suspend = new (GetAllocator()) HSuspendCheck();
- HInstruction* start_goto = new (GetAllocator()) HGoto();
- start->AddInstruction(suspend);
- start->AddInstruction(start_goto);
- ManuallyBuildEnvFor(suspend, {});
-
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* write_start = MakeIFieldSet(new_inst, c11, MemberOffset(32));
- HInstruction* if_inst = new (GetAllocator()) HIf(param1);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(write_start);
- entry->AddInstruction(if_inst);
- ManuallyBuildEnvFor(cls, {});
- new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
-
- left->AddInstruction(new (GetAllocator()) HGoto());
-
- right->AddInstruction(MakeIFieldSet(new_inst, c33, MemberOffset(32)));
- right->AddInstruction(new (GetAllocator()) HIf(param2));
-
- right_crit_break_end->AddInstruction(new (GetAllocator()) HGoto());
- right_crit_break_loop->AddInstruction(new (GetAllocator()) HGoto());
-
- HInstruction* header_suspend = new (GetAllocator()) HSuspendCheck();
- HInstruction* header_invoke = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* header_if = new (GetAllocator()) HIf(header_invoke);
- loop_header->AddInstruction(header_suspend);
- loop_header->AddInstruction(header_invoke);
- loop_header->AddInstruction(header_if);
- header_suspend->CopyEnvironmentFrom(cls->GetEnvironment());
- header_invoke->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* body_invoke = MakeInvoke(DataType::Type::kBool, {});
- HInstruction* body_if = new (GetAllocator()) HIf(body_invoke);
- loop_body->AddInstruction(body_invoke);
- loop_body->AddInstruction(body_if);
- body_invoke->CopyEnvironmentFrom(cls->GetEnvironment());
-
- HInstruction* left_set = MakeIFieldSet(new_inst, c66, MemberOffset(32));
- HInstruction* left_goto = MakeIFieldSet(new_inst, c66, MemberOffset(32));
- loop_left->AddInstruction(left_set);
- loop_left->AddInstruction(left_goto);
-
- loop_right->AddInstruction(new (GetAllocator()) HGoto());
-
- loop_end->AddInstruction(new (GetAllocator()) HGoto());
-
- HInstruction* read_end = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
- breturn->AddInstruction(read_end);
- breturn->AddInstruction(return_exit);
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_TRUE(loop_header->IsLoopHeader());
- EXPECT_TRUE(loop_header->GetLoopInformation()->IsIrreducible());
-
- // Partial LSE cannot run with irreducible loops.
- EXPECT_INS_RETAINED(left_set);
- EXPECT_INS_RETAINED(write_start);
- EXPECT_INS_RETAINED(read_end);
-}
-
-enum class UsesOrder { kDefaultOrder, kReverseOrder };
-std::ostream& operator<<(std::ostream& os, const UsesOrder& ord) {
- switch (ord) {
- case UsesOrder::kDefaultOrder:
- return os << "DefaultOrder";
- case UsesOrder::kReverseOrder:
- return os << "ReverseOrder";
- }
-}
-
-class UsesOrderDependentTestGroup
- : public LoadStoreEliminationTestBase<CommonCompilerTestWithParam<UsesOrder>> {};
-
-// Make sure that we record replacements by predicated loads and use them
-// instead of constructing Phis with inputs removed from the graph. Bug: 183897743
-// Note that the bug was hit only for a certain ordering of the NewInstance
-// uses, so we test both orderings.
-// // ENTRY
-// obj = new Obj();
-// obj.foo = 11;
-// if (param1) {
-// // LEFT1
-// escape(obj);
-// } else {
-// // RIGHT1
-// }
-// // MIDDLE
-// a = obj.foo;
-// if (param2) {
-// // LEFT2
-// obj.foo = 33;
-// } else {
-// // RIGHT2
-// }
-// // BRETURN
-// no_escape() // If `obj` escaped, the field value can change. (Avoid non-partial LSE.)
-// b = obj.foo;
-// return a + b;
-TEST_P(UsesOrderDependentTestGroup, RecordPredicatedReplacements1) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left1"},
- {"entry", "right1"},
- {"left1", "middle"},
- {"right1", "middle"},
- {"middle", "left2"},
- {"middle", "right2"},
- {"left2", "breturn"},
- {"right2", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(left1);
- GET_BLOCK(right1);
- GET_BLOCK(middle);
- GET_BLOCK(left2);
- GET_BLOCK(right2);
- GET_BLOCK(breturn);
- GET_BLOCK(exit);
-#undef GET_BLOCK
- EnsurePredecessorOrder(middle, {left1, right1});
- EnsurePredecessorOrder(breturn, {left2, right2});
- HInstruction* c0 = graph_->GetIntConstant(0);
- HInstruction* cnull = graph_->GetNullConstant();
- HInstruction* c11 = graph_->GetIntConstant(11);
- HInstruction* c33 = graph_->GetIntConstant(33);
- HInstruction* param1 = MakeParam(DataType::Type::kBool);
- HInstruction* param2 = MakeParam(DataType::Type::kBool);
-
- HInstruction* suspend = new (GetAllocator()) HSuspendCheck();
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* entry_write = MakeIFieldSet(new_inst, c11, MemberOffset(32));
- HInstruction* entry_if = new (GetAllocator()) HIf(param1);
- entry->AddInstruction(suspend);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(entry_write);
- entry->AddInstruction(entry_if);
- ManuallyBuildEnvFor(suspend, {});
- ManuallyBuildEnvFor(cls, {});
- ManuallyBuildEnvFor(new_inst, {});
-
- HInstruction* left1_call = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* left1_goto = new (GetAllocator()) HGoto();
- left1->AddInstruction(left1_call);
- left1->AddInstruction(left1_goto);
- ManuallyBuildEnvFor(left1_call, {});
-
- HInstruction* right1_goto = new (GetAllocator()) HGoto();
- right1->AddInstruction(right1_goto);
-
- HInstruction* middle_read = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* middle_if = new (GetAllocator()) HIf(param2);
- if (GetParam() == UsesOrder::kDefaultOrder) {
- middle->AddInstruction(middle_read);
- }
- middle->AddInstruction(middle_if);
-
- HInstanceFieldSet* left2_write = MakeIFieldSet(new_inst, c33, MemberOffset(32));
- HInstruction* left2_goto = new (GetAllocator()) HGoto();
- left2->AddInstruction(left2_write);
- left2->AddInstruction(left2_goto);
-
- HInstruction* right2_goto = new (GetAllocator()) HGoto();
- right2->AddInstruction(right2_goto);
-
- HInstruction* breturn_call = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* breturn_read = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* breturn_add =
- new (GetAllocator()) HAdd(DataType::Type::kInt32, middle_read, breturn_read);
- HInstruction* breturn_return = new (GetAllocator()) HReturn(breturn_add);
- breturn->AddInstruction(breturn_call);
- breturn->AddInstruction(breturn_read);
- breturn->AddInstruction(breturn_add);
- breturn->AddInstruction(breturn_return);
- ManuallyBuildEnvFor(breturn_call, {});
-
- if (GetParam() == UsesOrder::kReverseOrder) {
- // Insert `middle_read` in the same position as for the `kDefaultOrder` case.
- // The only difference is the order of entries in `new_inst->GetUses()` which
- // is used by `HeapReferenceData::CollectReplacements()` and defines the order
- // of instructions to process for `HeapReferenceData::PredicateInstructions()`.
- middle->InsertInstructionBefore(middle_read, middle_if);
- }
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_RETAINED(cls);
- EXPECT_INS_REMOVED(new_inst);
- HNewInstance* replacement_new_inst = FindSingleInstruction<HNewInstance>(graph_);
- ASSERT_NE(replacement_new_inst, nullptr);
- EXPECT_INS_REMOVED(entry_write);
- std::vector<HInstanceFieldSet*> all_writes;
- std::tie(all_writes) = FindAllInstructions<HInstanceFieldSet>(graph_);
- ASSERT_EQ(2u, all_writes.size());
- ASSERT_NE(all_writes[0] == left2_write, all_writes[1] == left2_write);
- HInstanceFieldSet* replacement_write = all_writes[(all_writes[0] == left2_write) ? 1u : 0u];
- ASSERT_FALSE(replacement_write->GetIsPredicatedSet());
- ASSERT_INS_EQ(replacement_write->InputAt(0), replacement_new_inst);
- ASSERT_INS_EQ(replacement_write->InputAt(1), c11);
-
- EXPECT_INS_RETAINED(left1_call);
-
- EXPECT_INS_REMOVED(middle_read);
- HPredicatedInstanceFieldGet* replacement_middle_read =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, middle);
- ASSERT_NE(replacement_middle_read, nullptr);
- ASSERT_TRUE(replacement_middle_read->GetTarget()->IsPhi());
- ASSERT_EQ(2u, replacement_middle_read->GetTarget()->InputCount());
- ASSERT_INS_EQ(replacement_middle_read->GetTarget()->InputAt(0), replacement_new_inst);
- ASSERT_INS_EQ(replacement_middle_read->GetTarget()->InputAt(1), cnull);
- ASSERT_TRUE(replacement_middle_read->GetDefaultValue()->IsPhi());
- ASSERT_EQ(2u, replacement_middle_read->GetDefaultValue()->InputCount());
- ASSERT_INS_EQ(replacement_middle_read->GetDefaultValue()->InputAt(0), c0);
- ASSERT_INS_EQ(replacement_middle_read->GetDefaultValue()->InputAt(1), c11);
-
- EXPECT_INS_RETAINED(left2_write);
- ASSERT_TRUE(left2_write->GetIsPredicatedSet());
-
- EXPECT_INS_REMOVED(breturn_read);
- HPredicatedInstanceFieldGet* replacement_breturn_read =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- ASSERT_NE(replacement_breturn_read, nullptr);
- ASSERT_INS_EQ(replacement_breturn_read->GetTarget(), replacement_middle_read->GetTarget());
- ASSERT_TRUE(replacement_breturn_read->GetDefaultValue()->IsPhi());
- ASSERT_EQ(2u, replacement_breturn_read->GetDefaultValue()->InputCount());
- ASSERT_INS_EQ(replacement_breturn_read->GetDefaultValue()->InputAt(0), c33);
- HInstruction* other_input = replacement_breturn_read->GetDefaultValue()->InputAt(1);
- ASSERT_NE(other_input->GetBlock(), nullptr) << GetParam();
- ASSERT_INS_EQ(other_input, replacement_middle_read);
-}
-
-// Regression test for a bad DCHECK() found while trying to write a test for b/188188275.
-// // ENTRY
-// obj = new Obj();
-// obj.foo = 11;
-// if (param1) {
-// // LEFT1
-// escape(obj);
-// } else {
-// // RIGHT1
-// }
-// // MIDDLE
-// a = obj.foo;
-// if (param2) {
-// // LEFT2
-// no_escape();
-// } else {
-// // RIGHT2
-// }
-// // BRETURN
-// b = obj.foo;
-// return a + b;
-TEST_P(UsesOrderDependentTestGroup, RecordPredicatedReplacements2) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left1"},
- {"entry", "right1"},
- {"left1", "middle"},
- {"right1", "middle"},
- {"middle", "left2"},
- {"middle", "right2"},
- {"left2", "breturn"},
- {"right2", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(left1);
- GET_BLOCK(right1);
- GET_BLOCK(middle);
- GET_BLOCK(left2);
- GET_BLOCK(right2);
- GET_BLOCK(breturn);
- GET_BLOCK(exit);
-#undef GET_BLOCK
- EnsurePredecessorOrder(middle, {left1, right1});
- EnsurePredecessorOrder(breturn, {left2, right2});
- HInstruction* c0 = graph_->GetIntConstant(0);
- HInstruction* cnull = graph_->GetNullConstant();
- HInstruction* c11 = graph_->GetIntConstant(11);
- HInstruction* param1 = MakeParam(DataType::Type::kBool);
- HInstruction* param2 = MakeParam(DataType::Type::kBool);
-
- HInstruction* suspend = new (GetAllocator()) HSuspendCheck();
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* entry_write = MakeIFieldSet(new_inst, c11, MemberOffset(32));
- HInstruction* entry_if = new (GetAllocator()) HIf(param1);
- entry->AddInstruction(suspend);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(entry_write);
- entry->AddInstruction(entry_if);
- ManuallyBuildEnvFor(suspend, {});
- ManuallyBuildEnvFor(cls, {});
- ManuallyBuildEnvFor(new_inst, {});
-
- HInstruction* left1_call = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* left1_goto = new (GetAllocator()) HGoto();
- left1->AddInstruction(left1_call);
- left1->AddInstruction(left1_goto);
- ManuallyBuildEnvFor(left1_call, {});
-
- HInstruction* right1_goto = new (GetAllocator()) HGoto();
- right1->AddInstruction(right1_goto);
-
- HInstruction* middle_read = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* middle_if = new (GetAllocator()) HIf(param2);
- if (GetParam() == UsesOrder::kDefaultOrder) {
- middle->AddInstruction(middle_read);
- }
- middle->AddInstruction(middle_if);
-
- HInstruction* left2_call = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* left2_goto = new (GetAllocator()) HGoto();
- left2->AddInstruction(left2_call);
- left2->AddInstruction(left2_goto);
- ManuallyBuildEnvFor(left2_call, {});
-
- HInstruction* right2_goto = new (GetAllocator()) HGoto();
- right2->AddInstruction(right2_goto);
-
- HInstruction* breturn_read = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* breturn_add =
- new (GetAllocator()) HAdd(DataType::Type::kInt32, middle_read, breturn_read);
- HInstruction* breturn_return = new (GetAllocator()) HReturn(breturn_add);
- breturn->AddInstruction(breturn_read);
- breturn->AddInstruction(breturn_add);
- breturn->AddInstruction(breturn_return);
-
- if (GetParam() == UsesOrder::kReverseOrder) {
- // Insert `middle_read` in the same position as for the `kDefaultOrder` case.
- // The only difference is the order of entries in `new_inst->GetUses()` which
- // is used by `HeapReferenceData::CollectReplacements()` and defines the order
- // of instructions to process for `HeapReferenceData::PredicateInstructions()`.
- middle->InsertInstructionBefore(middle_read, middle_if);
- }
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_RETAINED(cls);
- EXPECT_INS_REMOVED(new_inst);
- HNewInstance* replacement_new_inst = FindSingleInstruction<HNewInstance>(graph_);
- ASSERT_NE(replacement_new_inst, nullptr);
- EXPECT_INS_REMOVED(entry_write);
- HInstanceFieldSet* replacement_write = FindSingleInstruction<HInstanceFieldSet>(graph_);
- ASSERT_NE(replacement_write, nullptr);
- ASSERT_FALSE(replacement_write->GetIsPredicatedSet());
- ASSERT_INS_EQ(replacement_write->InputAt(0), replacement_new_inst);
- ASSERT_INS_EQ(replacement_write->InputAt(1), c11);
-
- EXPECT_INS_RETAINED(left1_call);
-
- EXPECT_INS_REMOVED(middle_read);
- HPredicatedInstanceFieldGet* replacement_middle_read =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, middle);
- ASSERT_NE(replacement_middle_read, nullptr);
- ASSERT_TRUE(replacement_middle_read->GetTarget()->IsPhi());
- ASSERT_EQ(2u, replacement_middle_read->GetTarget()->InputCount());
- ASSERT_INS_EQ(replacement_middle_read->GetTarget()->InputAt(0), replacement_new_inst);
- ASSERT_INS_EQ(replacement_middle_read->GetTarget()->InputAt(1), cnull);
- ASSERT_TRUE(replacement_middle_read->GetDefaultValue()->IsPhi());
- ASSERT_EQ(2u, replacement_middle_read->GetDefaultValue()->InputCount());
- ASSERT_INS_EQ(replacement_middle_read->GetDefaultValue()->InputAt(0), c0);
- ASSERT_INS_EQ(replacement_middle_read->GetDefaultValue()->InputAt(1), c11);
-
- EXPECT_INS_RETAINED(left2_call);
-
- EXPECT_INS_REMOVED(breturn_read);
- HPredicatedInstanceFieldGet* replacement_breturn_read =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- ASSERT_NE(replacement_breturn_read, nullptr);
- ASSERT_INS_EQ(replacement_breturn_read->GetTarget(), replacement_middle_read->GetTarget());
- ASSERT_INS_EQ(replacement_breturn_read->GetDefaultValue(), replacement_middle_read);
-}
-
-INSTANTIATE_TEST_SUITE_P(LoadStoreEliminationTest,
- UsesOrderDependentTestGroup,
- testing::Values(UsesOrder::kDefaultOrder, UsesOrder::kReverseOrder));
-
-// The parameter is the number of times we call `std::next_permutation` (from 0 to 5)
-// so that we test all 6 permutation of three items.
-class UsesOrderDependentTestGroupForThreeItems
- : public LoadStoreEliminationTestBase<CommonCompilerTestWithParam<size_t>> {};
-
-// Make sure that after we record replacements by predicated loads, we correctly
-// use that predicated load for Phi placeholders that were previously marked as
-// replaced by the now removed unpredicated load. (The fix for bug 183897743 was
-// not good enough.) Bug: 188188275
-// // ENTRY
-// obj = new Obj();
-// obj.foo = 11;
-// if (param1) {
-// // LEFT1
-// escape(obj);
-// } else {
-// // RIGHT1
-// }
-// // MIDDLE1
-// a = obj.foo;
-// if (param2) {
-// // LEFT2
-// no_escape1();
-// } else {
-// // RIGHT2
-// }
-// // MIDDLE2
-// if (param3) {
-// // LEFT3
-// x = obj.foo;
-// no_escape2();
-// } else {
-// // RIGHT3
-// x = 0;
-// }
-// // BRETURN
-// b = obj.foo;
-// return a + b + x;
-TEST_P(UsesOrderDependentTestGroupForThreeItems, RecordPredicatedReplacements3) {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope vshs(soa.Self());
- CreateGraph(&vshs);
- AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
- "exit",
- {{"entry", "left1"},
- {"entry", "right1"},
- {"left1", "middle1"},
- {"right1", "middle1"},
- {"middle1", "left2"},
- {"middle1", "right2"},
- {"left2", "middle2"},
- {"right2", "middle2"},
- {"middle2", "left3"},
- {"middle2", "right3"},
- {"left3", "breturn"},
- {"right3", "breturn"},
- {"breturn", "exit"}}));
-#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
- GET_BLOCK(entry);
- GET_BLOCK(left1);
- GET_BLOCK(right1);
- GET_BLOCK(middle1);
- GET_BLOCK(left2);
- GET_BLOCK(right2);
- GET_BLOCK(middle2);
- GET_BLOCK(left3);
- GET_BLOCK(right3);
- GET_BLOCK(breturn);
- GET_BLOCK(exit);
-#undef GET_BLOCK
- EnsurePredecessorOrder(middle1, {left1, right1});
- EnsurePredecessorOrder(middle2, {left2, right2});
- EnsurePredecessorOrder(breturn, {left3, right3});
- HInstruction* c0 = graph_->GetIntConstant(0);
- HInstruction* cnull = graph_->GetNullConstant();
- HInstruction* c11 = graph_->GetIntConstant(11);
- HInstruction* param1 = MakeParam(DataType::Type::kBool);
- HInstruction* param2 = MakeParam(DataType::Type::kBool);
- HInstruction* param3 = MakeParam(DataType::Type::kBool);
-
- HInstruction* suspend = new (GetAllocator()) HSuspendCheck();
- HInstruction* cls = MakeClassLoad();
- HInstruction* new_inst = MakeNewInstance(cls);
- HInstruction* entry_write = MakeIFieldSet(new_inst, c11, MemberOffset(32));
- HInstruction* entry_if = new (GetAllocator()) HIf(param1);
- entry->AddInstruction(suspend);
- entry->AddInstruction(cls);
- entry->AddInstruction(new_inst);
- entry->AddInstruction(entry_write);
- entry->AddInstruction(entry_if);
- ManuallyBuildEnvFor(suspend, {});
- ManuallyBuildEnvFor(cls, {});
- ManuallyBuildEnvFor(new_inst, {});
-
- HInstruction* left1_call = MakeInvoke(DataType::Type::kVoid, { new_inst });
- HInstruction* left1_goto = new (GetAllocator()) HGoto();
- left1->AddInstruction(left1_call);
- left1->AddInstruction(left1_goto);
- ManuallyBuildEnvFor(left1_call, {});
-
- HInstruction* right1_goto = new (GetAllocator()) HGoto();
- right1->AddInstruction(right1_goto);
-
- HInstruction* middle1_read = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* middle1_if = new (GetAllocator()) HIf(param2);
- // Delay inserting `middle1_read`, do that later with ordering based on `GetParam()`.
- middle1->AddInstruction(middle1_if);
-
- HInstruction* left2_call = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* left2_goto = new (GetAllocator()) HGoto();
- left2->AddInstruction(left2_call);
- left2->AddInstruction(left2_goto);
- ManuallyBuildEnvFor(left2_call, {});
-
- HInstruction* right2_goto = new (GetAllocator()) HGoto();
- right2->AddInstruction(right2_goto);
-
- HInstruction* middle2_if = new (GetAllocator()) HIf(param3);
- middle2->AddInstruction(middle2_if);
-
- HInstruction* left3_read = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* left3_call = MakeInvoke(DataType::Type::kVoid, {});
- HInstruction* left3_goto = new (GetAllocator()) HGoto();
- // Delay inserting `left3_read`, do that later with ordering based on `GetParam()`.
- left3->AddInstruction(left3_call);
- left3->AddInstruction(left3_goto);
- ManuallyBuildEnvFor(left3_call, {});
-
- HInstruction* right3_goto = new (GetAllocator()) HGoto();
- right3->AddInstruction(right3_goto);
-
- HPhi* breturn_phi = MakePhi({left3_read, c0});
- HInstruction* breturn_read = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
- HInstruction* breturn_add1 =
- new (GetAllocator()) HAdd(DataType::Type::kInt32, middle1_read, breturn_read);
- HInstruction* breturn_add2 =
- new (GetAllocator()) HAdd(DataType::Type::kInt32, breturn_add1, breturn_phi);
- HInstruction* breturn_return = new (GetAllocator()) HReturn(breturn_add2);
- breturn->AddPhi(breturn_phi);
- // Delay inserting `breturn_read`, do that later with ordering based on `GetParam()`.
- breturn->AddInstruction(breturn_add1);
- breturn->AddInstruction(breturn_add2);
- breturn->AddInstruction(breturn_return);
-
- // Insert reads in the same positions but in different insertion orders.
- // The only difference is the order of entries in `new_inst->GetUses()` which
- // is used by `HeapReferenceData::CollectReplacements()` and defines the order
- // of instructions to process for `HeapReferenceData::PredicateInstructions()`.
- std::tuple<size_t, HInstruction*, HInstruction*> read_insertions[] = {
- { 0u, middle1_read, middle1_if },
- { 1u, left3_read, left3_call },
- { 2u, breturn_read, breturn_add1 },
- };
- for (size_t i = 0, num = GetParam(); i != num; ++i) {
- std::next_permutation(read_insertions, read_insertions + std::size(read_insertions));
- }
- for (auto [order, read, cursor] : read_insertions) {
- cursor->GetBlock()->InsertInstructionBefore(read, cursor);
- }
-
- SetupExit(exit);
-
- PerformLSEWithPartial(blks);
-
- EXPECT_INS_RETAINED(cls);
- EXPECT_INS_REMOVED(new_inst);
- HNewInstance* replacement_new_inst = FindSingleInstruction<HNewInstance>(graph_);
- ASSERT_NE(replacement_new_inst, nullptr);
- EXPECT_INS_REMOVED(entry_write);
- HInstanceFieldSet* replacement_write = FindSingleInstruction<HInstanceFieldSet>(graph_);
- ASSERT_NE(replacement_write, nullptr);
- ASSERT_FALSE(replacement_write->GetIsPredicatedSet());
- ASSERT_INS_EQ(replacement_write->InputAt(0), replacement_new_inst);
- ASSERT_INS_EQ(replacement_write->InputAt(1), c11);
-
- EXPECT_INS_RETAINED(left1_call);
-
- EXPECT_INS_REMOVED(middle1_read);
- HPredicatedInstanceFieldGet* replacement_middle1_read =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, middle1);
- ASSERT_NE(replacement_middle1_read, nullptr);
- ASSERT_TRUE(replacement_middle1_read->GetTarget()->IsPhi());
- ASSERT_EQ(2u, replacement_middle1_read->GetTarget()->InputCount());
- ASSERT_INS_EQ(replacement_middle1_read->GetTarget()->InputAt(0), replacement_new_inst);
- ASSERT_INS_EQ(replacement_middle1_read->GetTarget()->InputAt(1), cnull);
- ASSERT_TRUE(replacement_middle1_read->GetDefaultValue()->IsPhi());
- ASSERT_EQ(2u, replacement_middle1_read->GetDefaultValue()->InputCount());
- ASSERT_INS_EQ(replacement_middle1_read->GetDefaultValue()->InputAt(0), c0);
- ASSERT_INS_EQ(replacement_middle1_read->GetDefaultValue()->InputAt(1), c11);
-
- EXPECT_INS_RETAINED(left2_call);
-
- EXPECT_INS_REMOVED(left3_read);
- HPredicatedInstanceFieldGet* replacement_left3_read =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, left3);
- ASSERT_NE(replacement_left3_read, nullptr);
- ASSERT_TRUE(replacement_left3_read->GetTarget()->IsPhi());
- ASSERT_INS_EQ(replacement_left3_read->GetTarget(), replacement_middle1_read->GetTarget());
- ASSERT_INS_EQ(replacement_left3_read->GetDefaultValue(), replacement_middle1_read);
- EXPECT_INS_RETAINED(left3_call);
-
- EXPECT_INS_RETAINED(breturn_phi);
- EXPECT_INS_REMOVED(breturn_read);
- HPredicatedInstanceFieldGet* replacement_breturn_read =
- FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
- ASSERT_NE(replacement_breturn_read, nullptr);
- ASSERT_INS_EQ(replacement_breturn_read->GetTarget(), replacement_middle1_read->GetTarget());
- ASSERT_TRUE(replacement_breturn_read->GetDefaultValue()->IsPhi());
- ASSERT_EQ(2u, replacement_breturn_read->GetDefaultValue()->InputCount());
- ASSERT_INS_EQ(replacement_breturn_read->GetDefaultValue()->InputAt(0), replacement_left3_read);
- ASSERT_INS_EQ(replacement_breturn_read->GetDefaultValue()->InputAt(1), replacement_middle1_read);
- EXPECT_INS_RETAINED(breturn_add1);
- ASSERT_INS_EQ(breturn_add1->InputAt(0), replacement_middle1_read);
- ASSERT_INS_EQ(breturn_add1->InputAt(1), replacement_breturn_read);
- EXPECT_INS_RETAINED(breturn_add2);
- ASSERT_INS_EQ(breturn_add2->InputAt(0), breturn_add1);
- ASSERT_INS_EQ(breturn_add2->InputAt(1), breturn_phi);
- EXPECT_INS_RETAINED(breturn_return);
-}
-
-INSTANTIATE_TEST_SUITE_P(LoadStoreEliminationTest,
- UsesOrderDependentTestGroupForThreeItems,
- testing::Values(0u, 1u, 2u, 3u, 4u, 5u));
-
} // namespace art
diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc
index 6163624a97..b3f9e835de 100644
--- a/compiler/optimizing/loop_analysis.cc
+++ b/compiler/optimizing/loop_analysis.cc
@@ -221,9 +221,6 @@ class X86_64LoopHelper : public ArchDefaultLoopHelper {
return 3;
case HInstruction::InstructionKind::kIf:
return 2;
- case HInstruction::InstructionKind::kPredicatedInstanceFieldGet:
- // test + cond-jump + IFieldGet
- return 4;
case HInstruction::InstructionKind::kInstanceFieldGet:
return 2;
case HInstruction::InstructionKind::kInstanceFieldSet:
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6ec83beabd..7d9e50e3e7 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1552,7 +1552,6 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(If, Instruction) \
M(InstanceFieldGet, Instruction) \
M(InstanceFieldSet, Instruction) \
- M(PredicatedInstanceFieldGet, Instruction) \
M(InstanceOf, Instruction) \
M(IntConstant, Constant) \
M(IntermediateAddress, Instruction) \
@@ -6369,96 +6368,6 @@ class HInstanceFieldGet final : public HExpression<1> {
const FieldInfo field_info_;
};
-class HPredicatedInstanceFieldGet final : public HExpression<2> {
- public:
- HPredicatedInstanceFieldGet(HInstanceFieldGet* orig,
- HInstruction* target,
- HInstruction* default_val)
- : HExpression(kPredicatedInstanceFieldGet,
- orig->GetFieldType(),
- orig->GetSideEffects(),
- orig->GetDexPc()),
- field_info_(orig->GetFieldInfo()) {
- // NB Default-val is at 0 so we can avoid doing a move.
- SetRawInputAt(1, target);
- SetRawInputAt(0, default_val);
- }
-
- HPredicatedInstanceFieldGet(HInstruction* value,
- ArtField* field,
- HInstruction* default_value,
- DataType::Type field_type,
- MemberOffset field_offset,
- bool is_volatile,
- uint32_t field_idx,
- uint16_t declaring_class_def_index,
- const DexFile& dex_file,
- uint32_t dex_pc)
- : HExpression(kPredicatedInstanceFieldGet,
- field_type,
- SideEffects::FieldReadOfType(field_type, is_volatile),
- dex_pc),
- field_info_(field,
- field_offset,
- field_type,
- is_volatile,
- field_idx,
- declaring_class_def_index,
- dex_file) {
- SetRawInputAt(1, value);
- SetRawInputAt(0, default_value);
- }
-
- bool IsClonable() const override {
- return true;
- }
- bool CanBeMoved() const override {
- return !IsVolatile();
- }
-
- HInstruction* GetDefaultValue() const {
- return InputAt(0);
- }
- HInstruction* GetTarget() const {
- return InputAt(1);
- }
-
- bool InstructionDataEquals(const HInstruction* other) const override {
- const HPredicatedInstanceFieldGet* other_get = other->AsPredicatedInstanceFieldGet();
- return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue() &&
- GetDefaultValue() == other_get->GetDefaultValue();
- }
-
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
- return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
- }
-
- size_t ComputeHashCode() const override {
- return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
- }
-
- bool IsFieldAccess() const override { return true; }
- const FieldInfo& GetFieldInfo() const override { return field_info_; }
- MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
- DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
- bool IsVolatile() const { return field_info_.IsVolatile(); }
-
- void SetType(DataType::Type new_type) {
- DCHECK(DataType::IsIntegralType(GetType()));
- DCHECK(DataType::IsIntegralType(new_type));
- DCHECK_EQ(DataType::Size(GetType()), DataType::Size(new_type));
- SetPackedField<TypeField>(new_type);
- }
-
- DECLARE_INSTRUCTION(PredicatedInstanceFieldGet);
-
- protected:
- DEFAULT_COPY_CONSTRUCTOR(PredicatedInstanceFieldGet);
-
- private:
- const FieldInfo field_info_;
-};
-
enum class WriteBarrierKind {
// Emit the write barrier, with a runtime optimization which checks if the value that it is being
// set is null.
@@ -6503,7 +6412,6 @@ class HInstanceFieldSet final : public HExpression<2> {
declaring_class_def_index,
dex_file) {
SetPackedFlag<kFlagValueCanBeNull>(true);
- SetPackedFlag<kFlagIsPredicatedSet>(false);
SetPackedField<WriteBarrierKindField>(WriteBarrierKind::kEmitWithNullCheck);
SetRawInputAt(0, object);
SetRawInputAt(1, value);
@@ -6523,8 +6431,6 @@ class HInstanceFieldSet final : public HExpression<2> {
HInstruction* GetValue() const { return InputAt(1); }
bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
- bool GetIsPredicatedSet() const { return GetPackedFlag<kFlagIsPredicatedSet>(); }
- void SetIsPredicatedSet(bool value = true) { SetPackedFlag<kFlagIsPredicatedSet>(value); }
WriteBarrierKind GetWriteBarrierKind() { return GetPackedField<WriteBarrierKindField>(); }
void SetWriteBarrierKind(WriteBarrierKind kind) {
DCHECK(kind != WriteBarrierKind::kEmitWithNullCheck)
@@ -6539,8 +6445,7 @@ class HInstanceFieldSet final : public HExpression<2> {
private:
static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits;
- static constexpr size_t kFlagIsPredicatedSet = kFlagValueCanBeNull + 1;
- static constexpr size_t kWriteBarrierKind = kFlagIsPredicatedSet + 1;
+ static constexpr size_t kWriteBarrierKind = kFlagValueCanBeNull + 1;
static constexpr size_t kWriteBarrierKindSize =
MinimumBitsToStore(static_cast<size_t>(WriteBarrierKind::kLast));
static constexpr size_t kNumberOfInstanceFieldSetPackedBits =
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 5d32ea2fbd..4549af3cbf 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -131,8 +131,6 @@ enum class MethodCompilationStat {
kPartialLSEPossible,
kPartialStoreRemoved,
kPartialAllocationMoved,
- kPredicatedLoadAdded,
- kPredicatedStoreAdded,
kDevirtualized,
kLastStat
};
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 3a5cceed9a..6f44d45ed4 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -63,7 +63,6 @@ class ReferenceTypePropagation::RTPVisitor final : public HGraphDelegateVisitor
void VisitLoadException(HLoadException* instr) override;
void VisitNewArray(HNewArray* instr) override;
void VisitParameterValue(HParameterValue* instr) override;
- void VisitPredicatedInstanceFieldGet(HPredicatedInstanceFieldGet* instr) override;
void VisitInstanceFieldGet(HInstanceFieldGet* instr) override;
void VisitStaticFieldGet(HStaticFieldGet* instr) override;
void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) override;
@@ -266,7 +265,7 @@ static void BoundTypeForClassCheck(HInstruction* check) {
}
HInstruction* field_get = (load_class == input_one) ? input_two : input_one;
- if (!field_get->IsInstanceFieldGet() && !field_get->IsPredicatedInstanceFieldGet()) {
+ if (!field_get->IsInstanceFieldGet()) {
return;
}
HInstruction* receiver = field_get->InputAt(0);
@@ -587,11 +586,6 @@ void ReferenceTypePropagation::RTPVisitor::UpdateFieldAccessTypeInfo(HInstructio
SetClassAsTypeInfo(instr, klass, /* is_exact= */ false);
}
-void ReferenceTypePropagation::RTPVisitor::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* instr) {
- UpdateFieldAccessTypeInfo(instr, instr->GetFieldInfo());
-}
-
void ReferenceTypePropagation::RTPVisitor::VisitInstanceFieldGet(HInstanceFieldGet* instr) {
UpdateFieldAccessTypeInfo(instr, instr->GetFieldInfo());
}
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 1cdc98a8be..4c68844dbb 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -108,7 +108,6 @@ static bool IsArrayAccess(const HInstruction* instruction) {
static bool IsInstanceFieldAccess(const HInstruction* instruction) {
return instruction->IsInstanceFieldGet() ||
instruction->IsInstanceFieldSet() ||
- instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsUnresolvedInstanceFieldGet() ||
instruction->IsUnresolvedInstanceFieldSet();
}
@@ -123,7 +122,6 @@ static bool IsStaticFieldAccess(const HInstruction* instruction) {
static bool IsResolvedFieldAccess(const HInstruction* instruction) {
return instruction->IsInstanceFieldGet() ||
instruction->IsInstanceFieldSet() ||
- instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsStaticFieldGet() ||
instruction->IsStaticFieldSet();
}
@@ -149,9 +147,7 @@ size_t SideEffectDependencyAnalysis::MemoryDependencyAnalysis::FieldAccessHeapLo
DCHECK(GetFieldInfo(instr) != nullptr);
DCHECK(heap_location_collector_ != nullptr);
- HInstruction* ref = instr->IsPredicatedInstanceFieldGet()
- ? instr->AsPredicatedInstanceFieldGet()->GetTarget()
- : instr->InputAt(0);
+ HInstruction* ref = instr->InputAt(0);
size_t heap_loc = heap_location_collector_->GetFieldHeapLocation(ref, GetFieldInfo(instr));
// This field access should be analyzed and added to HeapLocationCollector before.
DCHECK(heap_loc != HeapLocationCollector::kHeapLocationNotFound);
@@ -554,7 +550,7 @@ void HScheduler::Schedule(HGraph* graph) {
// should run the analysis or not.
const HeapLocationCollector* heap_location_collector = nullptr;
ScopedArenaAllocator allocator(graph->GetArenaStack());
- LoadStoreAnalysis lsa(graph, /*stats=*/nullptr, &allocator, LoadStoreAnalysisType::kBasic);
+ LoadStoreAnalysis lsa(graph, /*stats=*/nullptr, &allocator);
if (!only_optimize_loop_blocks_ || graph->HasLoops()) {
lsa.Run();
heap_location_collector = &lsa.GetHeapLocationCollector();
@@ -734,8 +730,6 @@ bool HScheduler::IsSchedulable(const HInstruction* instruction) const {
instruction->IsCurrentMethod() ||
instruction->IsDivZeroCheck() ||
(instruction->IsInstanceFieldGet() && !instruction->AsInstanceFieldGet()->IsVolatile()) ||
- (instruction->IsPredicatedInstanceFieldGet() &&
- !instruction->AsPredicatedInstanceFieldGet()->IsVolatile()) ||
(instruction->IsInstanceFieldSet() && !instruction->AsInstanceFieldSet()->IsVolatile()) ||
instruction->IsInstanceOf() ||
instruction->IsInvokeInterface() ||
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index ba6e109832..cafb0f5da6 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -853,11 +853,6 @@ void SchedulingLatencyVisitorARM::VisitDiv(HDiv* instruction) {
}
}
-void SchedulingLatencyVisitorARM::VisitPredicatedInstanceFieldGet(
- HPredicatedInstanceFieldGet* instruction) {
- HandleFieldGetLatencies(instruction, instruction->GetFieldInfo());
-}
-
void SchedulingLatencyVisitorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGetLatencies(instruction, instruction->GetFieldInfo());
}
@@ -918,9 +913,7 @@ void SchedulingLatencyVisitorARM::VisitRem(HRem* instruction) {
void SchedulingLatencyVisitorARM::HandleFieldGetLatencies(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsPredicatedInstanceFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
DCHECK(codegen_ != nullptr);
bool is_volatile = field_info.IsVolatile();
DataType::Type field_type = field_info.GetFieldType();
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index cedc12a2be..cf00fa12a3 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -77,7 +77,6 @@ class SchedulingLatencyVisitorARM final : public SchedulingLatencyVisitor {
M(Condition, unused) \
M(Compare, unused) \
M(BoundsCheck, unused) \
- M(PredicatedInstanceFieldGet, unused) \
M(InstanceFieldGet, unused) \
M(InstanceFieldSet, unused) \
M(InstanceOf, unused) \
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 165bfe3d94..c2b1fd6f7c 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -274,8 +274,7 @@ class SchedulerTest : public CommonCompilerTest, public OptimizingUnitTestHelper
entry->AddInstruction(instr);
}
- HeapLocationCollector heap_location_collector(
- graph_, GetScopedAllocator(), LoadStoreAnalysisType::kBasic);
+ HeapLocationCollector heap_location_collector(graph_, GetScopedAllocator());
heap_location_collector.VisitBasicBlock(entry);
heap_location_collector.BuildAliasingMatrix();
TestSchedulingGraph scheduling_graph(GetScopedAllocator(), &heap_location_collector);
diff --git a/test/530-checker-instance-of-simplifier/jasmin/Main.j b/test/530-checker-instance-of-simplifier/jasmin/Main.j
index 83cb4fa5c6..9af04a25ec 100644
--- a/test/530-checker-instance-of-simplifier/jasmin/Main.j
+++ b/test/530-checker-instance-of-simplifier/jasmin/Main.j
@@ -32,7 +32,6 @@
;; CHECK-START: int Main.$noinline$test(boolean) instruction_simplifier$before_codegen (after)
;; CHECK-NOT: InstanceFieldSet
;; CHECK-NOT: InstanceFieldGet
-;; CHECK-NOT: PredicatedInstanceFieldGet
; public static int $noinline$test(boolean escape) {
; Foo f = new Foo();