Revert^4 "Partial Load Store Elimination"
This reverts commit 791df7a161ecfa28eb69862a4bc285282463b960.
This unreverts commit fc1ce4e8be0d977e3d41699f5ec746d68f63c024.
This unreverts commit b8686ce4c93eba7192ed7ef89e7ffd9f3aa6cd07.
We incorrectly failed to include PredicatedInstanceFieldGet in a few
conditions, including a DCHECK. This caused tests to fail under the
read-barrier-table-lookup configuration.
Reason for revert: Fixed 2 incorrect checks
Bug: 67037140
Test: ./art/test/testrunner/run_build_test_target.py -j70 art-gtest-read-barrier-table-lookup
Change-Id: I32b01b29fb32077fb5074e7c77a0226bd1fcaab4
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 68120e2..8333b32 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1667,6 +1667,7 @@
(kEmitCompilerReadBarrier &&
!kUseBakerReadBarrier &&
(instruction->IsInstanceFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsStaticFieldGet() ||
instruction->IsArrayGet() ||
instruction->IsLoadClass() ||
@@ -1677,7 +1678,8 @@
<< "instruction->DebugName()=" << instruction->DebugName()
<< " instruction->GetSideEffects().ToString()="
<< instruction->GetSideEffects().ToString()
- << " slow_path->GetDescription()=" << slow_path->GetDescription();
+ << " slow_path->GetDescription()=" << slow_path->GetDescription() << std::endl
+ << "Instruction and args: " << instruction->DumpWithArgs();
}
} else {
// The GC side effect is not required for the instruction. But the instruction might still have
@@ -1702,6 +1704,7 @@
// PC-related information.
DCHECK(kUseBakerReadBarrier);
DCHECK(instruction->IsInstanceFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsStaticFieldGet() ||
instruction->IsArrayGet() ||
instruction->IsArraySet() ||
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a9f03b0..b945be2 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -16,6 +16,8 @@
#include "code_generator_arm64.h"
+#include "aarch64/assembler-aarch64.h"
+#include "aarch64/registers-aarch64.h"
#include "arch/arm64/asm_support_arm64.h"
#include "arch/arm64/instruction_set_features_arm64.h"
#include "arch/arm64/jni_frame_arm64.h"
@@ -40,6 +42,7 @@
#include "mirror/class-inl.h"
#include "mirror/var_handle.h"
#include "offsets.h"
+#include "optimizing/common_arm64.h"
#include "thread.h"
#include "utils/arm64/assembler_arm64.h"
#include "utils/assembler.h"
@@ -645,6 +648,7 @@
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg()));
DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
@@ -2002,7 +2006,11 @@
void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet());
+
+ bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
@@ -2021,29 +2029,45 @@
locations->AddTemp(FixedTempLocation());
}
}
- locations->SetInAt(0, Location::RequiresRegister());
+ // Input for object receiver.
+ locations->SetInAt(is_predicated ? 1 : 0, Location::RequiresRegister());
if (DataType::IsFloatingPointType(instruction->GetType())) {
- locations->SetOut(Location::RequiresFpuRegister());
+ if (is_predicated) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ } else {
+ locations->SetOut(Location::RequiresFpuRegister());
+ }
} else {
- // The output overlaps for an object field get when read barriers
- // are enabled: we do not want the load to overwrite the object's
- // location, as we need it to emit the read barrier.
- locations->SetOut(
- Location::RequiresRegister(),
- object_field_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
+ if (is_predicated) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ } else {
+ // The output overlaps for an object field get when read barriers
+ // are enabled: we do not want the load to overwrite the object's
+ // location, as we need it to emit the read barrier.
+ locations->SetOut(Location::RequiresRegister(),
+ object_field_get_with_read_barrier ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
+ }
}
}
void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet());
+ bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations = instruction->GetLocations();
- Location base_loc = locations->InAt(0);
+ uint32_t receiver_input = is_predicated ? 1 : 0;
+ Location base_loc = locations->InAt(receiver_input);
Location out = locations->Out();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType()));
DataType::Type load_type = instruction->GetType();
- MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset());
+ MemOperand field =
+ HeapOperand(InputRegisterAt(instruction, receiver_input), field_info.GetFieldOffset());
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier &&
load_type == DataType::Type::kReference) {
@@ -2105,12 +2129,19 @@
const FieldInfo& field_info,
bool value_can_be_null) {
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
+ bool is_predicated =
+ instruction->IsInstanceFieldSet() && instruction->AsInstanceFieldSet()->GetIsPredicatedSet();
Register obj = InputRegisterAt(instruction, 0);
CPURegister value = InputCPURegisterOrZeroRegAt(instruction, 1);
CPURegister source = value;
Offset offset = field_info.GetFieldOffset();
DataType::Type field_type = field_info.GetFieldType();
+ std::optional<vixl::aarch64::Label> pred_is_null;
+ if (is_predicated) {
+ pred_is_null.emplace();
+ __ Cbz(obj, &*pred_is_null);
+ }
{
// We use a block to end the scratch scope before the write barrier, thus
@@ -2139,6 +2170,10 @@
if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
codegen_->MarkGCCard(obj, Register(value), value_can_be_null);
}
+
+ if (is_predicated) {
+ __ Bind(&*pred_is_null);
+ }
}
void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
@@ -3794,10 +3829,23 @@
__ Nop();
}
+void LocationsBuilderARM64::VisitPredicatedInstanceFieldGet(
+ HPredicatedInstanceFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
+void InstructionCodeGeneratorARM64::VisitPredicatedInstanceFieldGet(
+ HPredicatedInstanceFieldGet* instruction) {
+ vixl::aarch64::Label finish;
+ __ Cbz(InputRegisterAt(instruction, 1), &finish);
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+ __ Bind(&finish);
+}
+
void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index d7d09af..18709f8 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -761,6 +761,7 @@
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out.GetCode()));
DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
@@ -5733,7 +5734,10 @@
LocationSummary* locations = instruction->GetLocations();
vixl32::Register base = InputRegisterAt(instruction, 0);
Location value = locations->InAt(1);
+ std::optional<vixl::aarch32::Label> pred_is_null;
+ bool is_predicated =
+ instruction->IsInstanceFieldSet() && instruction->AsInstanceFieldSet()->GetIsPredicatedSet();
bool is_volatile = field_info.IsVolatile();
bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
DataType::Type field_type = field_info.GetFieldType();
@@ -5741,6 +5745,11 @@
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
+ if (is_predicated) {
+ pred_is_null.emplace();
+ __ CompareAndBranchIfZero(base, &*pred_is_null, /* is_far_target= */ false);
+ }
+
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
}
@@ -5844,14 +5853,21 @@
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
+
+ if (is_predicated) {
+ __ Bind(&*pred_is_null);
+ }
}
void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet());
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference);
+ bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_field_get_with_read_barrier
@@ -5860,7 +5876,8 @@
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- locations->SetInAt(0, Location::RequiresRegister());
+ // Input for object receiver.
+ locations->SetInAt(is_predicated ? 1 : 0, Location::RequiresRegister());
bool volatile_for_double = field_info.IsVolatile()
&& (field_info.GetFieldType() == DataType::Type::kFloat64)
@@ -5875,10 +5892,20 @@
object_field_get_with_read_barrier;
if (DataType::IsFloatingPointType(instruction->GetType())) {
- locations->SetOut(Location::RequiresFpuRegister());
+ if (is_predicated) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ } else {
+ locations->SetOut(Location::RequiresFpuRegister());
+ }
} else {
- locations->SetOut(Location::RequiresRegister(),
- (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
+ if (is_predicated) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ } else {
+ locations->SetOut(Location::RequiresRegister(),
+ (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
+ }
}
if (volatile_for_double) {
// ARM encoding have some additional constraints for ldrexd/strexd:
@@ -5979,10 +6006,13 @@
void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet());
LocationSummary* locations = instruction->GetLocations();
- vixl32::Register base = InputRegisterAt(instruction, 0);
+ uint32_t receiver_input = instruction->IsPredicatedInstanceFieldGet() ? 1 : 0;
+ vixl32::Register base = InputRegisterAt(instruction, receiver_input);
Location out = locations->Out();
bool is_volatile = field_info.IsVolatile();
bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
@@ -6029,7 +6059,8 @@
// If read barriers are enabled, emit read barriers other than
// Baker's using a slow path (and also unpoison the loaded
// reference, if heap poisoning is enabled).
- codegen_->MaybeGenerateReadBarrierSlow(instruction, out, out, locations->InAt(0), offset);
+ codegen_->MaybeGenerateReadBarrierSlow(
+ instruction, out, out, locations->InAt(receiver_input), offset);
}
break;
}
@@ -6100,6 +6131,19 @@
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
+void LocationsBuilderARMVIXL::VisitPredicatedInstanceFieldGet(
+ HPredicatedInstanceFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitPredicatedInstanceFieldGet(
+ HPredicatedInstanceFieldGet* instruction) {
+ vixl::aarch32::Label finish;
+ __ CompareAndBranchIfZero(InputRegisterAt(instruction, 1), &finish, false);
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+ __ Bind(&finish);
+}
+
void InstructionCodeGeneratorARMVIXL::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f6c0270..4fc29fc 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -489,6 +489,7 @@
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsArraySet() ||
@@ -749,6 +750,7 @@
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
@@ -5642,10 +5644,13 @@
}
void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet());
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
kEmitCompilerReadBarrier
@@ -5654,21 +5659,30 @@
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- locations->SetInAt(0, Location::RequiresRegister());
-
+ // receiver_input
+ locations->SetInAt(is_predicated ? 1 : 0, Location::RequiresRegister());
+ if (is_predicated) {
+ if (DataType::IsFloatingPointType(instruction->GetType())) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+ }
if (DataType::IsFloatingPointType(instruction->GetType())) {
- locations->SetOut(Location::RequiresFpuRegister());
+ locations->SetOut(is_predicated ? Location::SameAsFirstInput()
+ : Location::RequiresFpuRegister());
} else {
// The output overlaps in case of long: we don't want the low move
// to overwrite the object's location. Likewise, in the case of
// an object field get with read barriers enabled, we do not want
// the move to overwrite the object's location, as we need it to emit
// the read barrier.
- locations->SetOut(
- Location::RequiresRegister(),
- (object_field_get_with_read_barrier || instruction->GetType() == DataType::Type::kInt64) ?
- Location::kOutputOverlap :
- Location::kNoOutputOverlap);
+ locations->SetOut(is_predicated ? Location::SameAsFirstInput() : Location::RequiresRegister(),
+ (object_field_get_with_read_barrier ||
+ instruction->GetType() == DataType::Type::kInt64 ||
+ is_predicated)
+ ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
}
if (field_info.IsVolatile() && (field_info.GetFieldType() == DataType::Type::kInt64)) {
@@ -5682,10 +5696,12 @@
void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet());
LocationSummary* locations = instruction->GetLocations();
- Location base_loc = locations->InAt(0);
+ Location base_loc = locations->InAt(instruction->IsPredicatedInstanceFieldGet() ? 1 : 0);
Register base = base_loc.AsRegister<Register>();
Location out = locations->Out();
bool is_volatile = field_info.IsVolatile();
@@ -5979,9 +5995,17 @@
bool is_volatile = field_info.IsVolatile();
DataType::Type field_type = field_info.GetFieldType();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+ bool is_predicated =
+ instruction->IsInstanceFieldSet() && instruction->AsInstanceFieldSet()->GetIsPredicatedSet();
Address field_addr(base, offset);
+ NearLabel pred_is_null;
+ if (is_predicated) {
+ __ testl(base, base);
+ __ j(kEqual, &pred_is_null);
+ }
+
HandleFieldSet(instruction,
/* value_index= */ 1,
field_type,
@@ -5989,6 +6013,10 @@
base,
is_volatile,
value_can_be_null);
+
+ if (is_predicated) {
+ __ Bind(&pred_is_null);
+ }
}
void LocationsBuilderX86::VisitStaticFieldGet(HStaticFieldGet* instruction) {
@@ -6015,10 +6043,25 @@
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
+void LocationsBuilderX86::VisitPredicatedInstanceFieldGet(
+ HPredicatedInstanceFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
void LocationsBuilderX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
+void InstructionCodeGeneratorX86::VisitPredicatedInstanceFieldGet(
+ HPredicatedInstanceFieldGet* instruction) {
+ NearLabel finish;
+ LocationSummary* locations = instruction->GetLocations();
+ Register recv = locations->InAt(1).AsRegister<Register>();
+ __ testl(recv, recv);
+ __ j(kZero, &finish);
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+ __ Bind(&finish);
+}
void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index d79c2e4..d54484c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -39,6 +39,7 @@
#include "utils/assembler.h"
#include "utils/stack_checks.h"
#include "utils/x86_64/assembler_x86_64.h"
+#include "utils/x86_64/constants_x86_64.h"
#include "utils/x86_64/managed_register_x86_64.h"
namespace art {
@@ -500,6 +501,7 @@
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsArraySet() ||
@@ -761,6 +763,7 @@
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out.AsRegister())) << out_;
DCHECK(instruction_->IsInstanceFieldGet() ||
+ instruction_->IsPredicatedInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
@@ -4856,10 +4859,13 @@
}
void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) {
- DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet());
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_field_get_with_read_barrier
@@ -4868,25 +4874,38 @@
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- locations->SetInAt(0, Location::RequiresRegister());
+ // receiver_input
+ locations->SetInAt(is_predicated ? 1 : 0, Location::RequiresRegister());
+ if (is_predicated) {
+ if (DataType::IsFloatingPointType(instruction->GetType())) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+ }
if (DataType::IsFloatingPointType(instruction->GetType())) {
- locations->SetOut(Location::RequiresFpuRegister());
+ locations->SetOut(is_predicated ? Location::SameAsFirstInput()
+ : Location::RequiresFpuRegister());
} else {
- // The output overlaps for an object field get when read barriers
- // are enabled: we do not want the move to overwrite the object's
- // location, as we need it to emit the read barrier.
- locations->SetOut(
- Location::RequiresRegister(),
- object_field_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
+ // The output overlaps for an object field get when read barriers are
+ // enabled: we do not want the move to overwrite the object's location, as
+ // we need it to emit the read barrier. For predicated instructions we can
+ // always overlap since the output is SameAsFirst and the default value.
+ locations->SetOut(is_predicated ? Location::SameAsFirstInput() : Location::RequiresRegister(),
+ object_field_get_with_read_barrier || is_predicated
+ ? Location::kOutputOverlap
+ : Location::kNoOutputOverlap);
}
}
void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet());
LocationSummary* locations = instruction->GetLocations();
- Location base_loc = locations->InAt(0);
+ Location base_loc = locations->InAt(instruction->IsPredicatedInstanceFieldGet() ? 1 : 0);
CpuRegister base = base_loc.AsRegister<CpuRegister>();
Location out = locations->Out();
bool is_volatile = field_info.IsVolatile();
@@ -5032,6 +5051,8 @@
bool is_volatile = field_info.IsVolatile();
DataType::Type field_type = field_info.GetFieldType();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
+ bool is_predicated =
+ instruction->IsInstanceFieldSet() && instruction->AsInstanceFieldSet()->GetIsPredicatedSet();
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
@@ -5039,6 +5060,12 @@
bool maybe_record_implicit_null_check_done = false;
+ NearLabel pred_is_null;
+ if (is_predicated) {
+ __ testl(base, base);
+ __ j(kZero, &pred_is_null);
+ }
+
switch (field_type) {
case DataType::Type::kBool:
case DataType::Type::kUint8:
@@ -5145,6 +5172,10 @@
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
+
+ if (is_predicated) {
+ __ Bind(&pred_is_null);
+ }
}
void LocationsBuilderX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
@@ -5155,10 +5186,26 @@
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
+void LocationsBuilderX86_64::VisitPredicatedInstanceFieldGet(
+ HPredicatedInstanceFieldGet* instruction) {
+ HandleFieldGet(instruction);
+}
+
void LocationsBuilderX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction);
}
+void InstructionCodeGeneratorX86_64::VisitPredicatedInstanceFieldGet(
+ HPredicatedInstanceFieldGet* instruction) {
+ NearLabel finish;
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister target = locations->InAt(1).AsRegister<CpuRegister>();
+ __ testl(target, target);
+ __ j(kZero, &finish);
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+ __ Bind(&finish);
+}
+
void InstructionCodeGeneratorX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index f946e50..49acab6 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -107,6 +107,7 @@
// hard to test, as LSE removes them.
if (instruction->IsStaticFieldGet() ||
instruction->IsInstanceFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsArrayGet()) {
return false;
}
diff --git a/compiler/optimizing/execution_subgraph.h b/compiler/optimizing/execution_subgraph.h
index dac938e..7fabbae 100644
--- a/compiler/optimizing/execution_subgraph.h
+++ b/compiler/optimizing/execution_subgraph.h
@@ -27,6 +27,7 @@
#include "base/bit_vector-inl.h"
#include "base/globals.h"
#include "base/iteration_range.h"
+#include "base/mutex.h"
#include "base/scoped_arena_allocator.h"
#include "base/scoped_arena_containers.h"
#include "base/stl_util.h"
@@ -35,6 +36,18 @@
namespace art {
+// Helper for transforming blocks to block_ids.
+class BlockToBlockIdTransformer {
+ public:
+ BlockToBlockIdTransformer(BlockToBlockIdTransformer&&) = default;
+ BlockToBlockIdTransformer(const BlockToBlockIdTransformer&) = default;
+ BlockToBlockIdTransformer() {}
+
+ inline uint32_t operator()(const HBasicBlock* b) const {
+ return b->GetBlockId();
+ }
+};
+
// Helper for transforming block ids to blocks.
class BlockIdToBlockTransformer {
public:
@@ -61,6 +74,20 @@
const HGraph* const graph_;
};
+class BlockIdFilterThunk {
+ public:
+ explicit BlockIdFilterThunk(const BitVector& i) : inner_(i) {}
+ BlockIdFilterThunk(BlockIdFilterThunk&& other) noexcept = default;
+ BlockIdFilterThunk(const BlockIdFilterThunk&) = default;
+
+ bool operator()(const HBasicBlock* b) const {
+ return inner_.IsBitSet(b->GetBlockId());
+ }
+
+ private:
+ const BitVector& inner_;
+};
+
// A representation of a particular section of the graph. The graph is split
// into an excluded and included area and is used to track escapes.
//
@@ -80,10 +107,18 @@
// cohort-exit block to reach any cohort-entry block. This means we can use the
// boundary between the cohort and the rest of the graph to insert
// materialization blocks for partial LSE.
+//
+// TODO We really should expand this to take into account where the object
+// allocation takes place directly. Currently we always act as though it were
+// allocated in the entry block. This is a massively simplifying assumption but
+// means we can't partially remove objects that are repeatedly allocated in a
+// loop.
class ExecutionSubgraph : public ArenaObject<kArenaAllocLSA> {
public:
using BitVecBlockRange =
IterationRange<TransformIterator<BitVector::IndexIterator, BlockIdToBlockTransformer>>;
+ using FilteredBitVecBlockRange = IterationRange<
+ FilterIterator<ArenaVector<HBasicBlock*>::const_iterator, BlockIdFilterThunk>>;
// A set of connected blocks which are connected and removed from the
// ExecutionSubgraph. See above comment for explanation.
@@ -110,6 +145,15 @@
return BlockIterRange(entry_blocks_);
}
+ FilteredBitVecBlockRange EntryBlocksReversePostOrder() const {
+ return Filter(MakeIterationRange(graph_->GetReversePostOrder()),
+ BlockIdFilterThunk(entry_blocks_));
+ }
+
+ bool IsEntryBlock(const HBasicBlock* blk) const {
+ return entry_blocks_.IsBitSet(blk->GetBlockId());
+ }
+
// Blocks that have successors outside of the cohort. The successors of
// these blocks will need to have PHI's to restore state.
BitVecBlockRange ExitBlocks() const {
diff --git a/compiler/optimizing/execution_subgraph_test.cc b/compiler/optimizing/execution_subgraph_test.cc
index 1fc00d9..98e642f 100644
--- a/compiler/optimizing/execution_subgraph_test.cc
+++ b/compiler/optimizing/execution_subgraph_test.cc
@@ -425,6 +425,150 @@
ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
}
+// ┌───────┐ ┌──────────────┐
+// │ right │ ◀── │ entry │
+// └───────┘ └──────────────┘
+// │ │
+// │ │
+// ▼ ▼
+// ┌────┐ ┌───────┐ ┌──────────────┐
+// │ l2 │ ──▶ │ exit │ ┌─ │ l1 │ ◀┐
+// └────┘ └───────┘ │ └──────────────┘ │
+// ▲ │ │ │
+// └───────────────────┘ │ │
+// ▼ │
+// ┌──────────────┐ │ ┌──────────────┐
+// ┌─ │ l1loop │ │ │ l1loop_right │ ◀┐
+// │ └──────────────┘ │ └──────────────┘ │
+// │ │ │ │ │
+// │ │ │ │ │
+// │ ▼ │ │ │
+// │ ┌−−−−−−−−−−−−−−−−−−┐ │ │ │
+// │ ╎ removed ╎ │ │ │
+// │ ╎ ╎ │ │ │
+// │ ╎ ┌──────────────┐ ╎ │ │ │
+// │ ╎ │ l1loop_left │ ╎ │ │ │
+// │ ╎ └──────────────┘ ╎ │ │ │
+// │ ╎ ╎ │ │ │
+// │ └−−−−−−−−−−−−−−−−−−┘ │ │ │
+// │ │ │ │ │
+// │ │ │ │ │
+// │ ▼ │ │ │
+// │ ┌──────────────┐ │ │ │
+// │ │ l1loop_merge │ ─┘ │ │
+// │ └──────────────┘ │ │
+// │ ▲ │ │
+// │ └──────────────────────┘ │
+// │ │
+// │ │
+// └─────────────────────────────────────────────┘
+
+TEST_F(ExecutionSubgraphTest, PropagationLoop4) {
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "l1"},
+ {"l1", "l2"},
+ {"l1", "l1loop"},
+ {"l1loop", "l1loop_left"},
+ {"l1loop", "l1loop_right"},
+ {"l1loop_left", "l1loop_merge"},
+ {"l1loop_right", "l1loop_merge"},
+ {"l1loop_merge", "l1"},
+ {"l2", "exit"},
+ {"entry", "right"},
+ {"right", "exit"}}));
+ ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
+ ExecutionSubgraph esg(graph_, /*analysis_possible=*/true, GetScopedAllocator());
+ esg.RemoveBlock(blks.Get("l1loop_left"));
+ esg.Finalize();
+ ASSERT_TRUE(esg.IsValid());
+ ASSERT_TRUE(IsValidSubgraph(esg));
+ std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
+ esg.ReachableBlocks().end());
+
+ ASSERT_EQ(contents.size(), 3u);
+
+ // Not present, no path through. If we got to l1 loop then we must merge back
+ // with l1 and l2 so they're bad too.
+ ASSERT_TRUE(contents.find(blks.Get("l1loop")) == contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("l1")) == contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("l1loop_left")) == contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("l1loop_right")) == contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("l1loop_merge")) == contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("l2")) == contents.end());
+
+ // present, path through.
+ ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
+}
+
+// +------------------------------------------------------+
+// | |
+// | +--------------+ +-------------+ |
+// | | right | <-- | entry | |
+// | +--------------+ +-------------+ |
+// | | | |
+// | | | |
+// | v v |
+// | +--------------+ +--------------------+ +----+
+// +> | exit | +> | l1 | --> | l2 |
+// +--------------+ | +--------------------+ +----+
+// | | ^
+// +---------------+ | |
+// | v |
+// +--------------+ +-------------+ |
+// | l1loop_right | <-- | l1loop | |
+// +--------------+ +-------------+ |
+// | |
+// | |
+// v |
+// + - - - - - - - - + |
+// ' removed ' |
+// ' ' |
+// ' +-------------+ ' |
+// ' | l1loop_left | ' -+
+// ' +-------------+ '
+// ' '
+// + - - - - - - - - +
+TEST_F(ExecutionSubgraphTest, PropagationLoop5) {
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "l1"},
+ {"l1", "l2"},
+ {"l1", "l1loop"},
+ {"l1loop", "l1loop_left"},
+ {"l1loop", "l1loop_right"},
+ {"l1loop_left", "l1"},
+ {"l1loop_right", "l1"},
+ {"l2", "exit"},
+ {"entry", "right"},
+ {"right", "exit"}}));
+ ASSERT_TRUE(ExecutionSubgraph::CanAnalyse(graph_));
+ ExecutionSubgraph esg(graph_, /*analysis_possible=*/true, GetScopedAllocator());
+ esg.RemoveBlock(blks.Get("l1loop_left"));
+ esg.Finalize();
+ ASSERT_TRUE(esg.IsValid());
+ ASSERT_TRUE(IsValidSubgraph(esg));
+ std::unordered_set<const HBasicBlock*> contents(esg.ReachableBlocks().begin(),
+ esg.ReachableBlocks().end());
+
+ ASSERT_EQ(contents.size(), 3u);
+
+ // Not present, no path through. If we got to l1 loop then we must merge back
+ // with l1 and l2 so they're bad too.
+ ASSERT_TRUE(contents.find(blks.Get("l1loop")) == contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("l1")) == contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("l1loop_left")) == contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("l1loop_right")) == contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("l2")) == contents.end());
+
+ // present, path through.
+ ASSERT_TRUE(contents.find(blks.Get("right")) != contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("entry")) != contents.end());
+ ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
+}
+
TEST_F(ExecutionSubgraphTest, Invalid) {
AdjacencyListGraph blks(SetupFromAdjacencyList(
"entry",
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index da34af2..5a264b7 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -19,6 +19,7 @@
#include <dlfcn.h>
#include <cctype>
+#include <ios>
#include <sstream>
#include "android-base/stringprintf.h"
@@ -529,6 +530,13 @@
StartAttributeStream("invoke_type") << "InvokePolymorphic";
}
+ void VisitPredicatedInstanceFieldGet(HPredicatedInstanceFieldGet* iget) override {
+ StartAttributeStream("field_name") <<
+ iget->GetFieldInfo().GetDexFile().PrettyField(iget->GetFieldInfo().GetFieldIndex(),
+ /* with type */ false);
+ StartAttributeStream("field_type") << iget->GetFieldType();
+ }
+
void VisitInstanceFieldGet(HInstanceFieldGet* iget) override {
StartAttributeStream("field_name") <<
iget->GetFieldInfo().GetDexFile().PrettyField(iget->GetFieldInfo().GetFieldIndex(),
@@ -541,6 +549,7 @@
iset->GetFieldInfo().GetDexFile().PrettyField(iset->GetFieldInfo().GetFieldIndex(),
/* with type */ false);
StartAttributeStream("field_type") << iset->GetFieldType();
+ StartAttributeStream("predicated") << std::boolalpha << iset->GetIsPredicatedSet();
}
void VisitStaticFieldGet(HStaticFieldGet* sget) override {
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 8886f14..7137617 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -23,6 +23,7 @@
#include "escape.h"
#include "intrinsics.h"
#include "mirror/class-inl.h"
+#include "optimizing/nodes.h"
#include "scoped_thread_state_change-inl.h"
#include "sharpening.h"
#include "string_builder_append.h"
@@ -109,6 +110,7 @@
void VisitInvoke(HInvoke* invoke) override;
void VisitDeoptimize(HDeoptimize* deoptimize) override;
void VisitVecMul(HVecMul* instruction) override;
+ void VisitPredicatedInstanceFieldGet(HPredicatedInstanceFieldGet* instruction) override;
bool CanEnsureNotNullAt(HInstruction* instr, HInstruction* at) const;
@@ -915,6 +917,42 @@
return nullptr;
}
+// TODO This should really be done by LSE itself since there is significantly
+// more information available there.
+void InstructionSimplifierVisitor::VisitPredicatedInstanceFieldGet(
+ HPredicatedInstanceFieldGet* pred_get) {
+ HInstruction* target = pred_get->GetTarget();
+ HInstruction* default_val = pred_get->GetDefaultValue();
+ // TODO Technically we could end up with a case where the target isn't a phi
+ // (allowing us to eliminate the instruction and replace with either a
+ // InstanceFieldGet or the default) but due to the ordering of compilation
+ // passes this can't happen in ART.
+ if (!target->IsPhi() || !default_val->IsPhi() || default_val->GetBlock() != target->GetBlock()) {
+ // Already reduced the target or the phi selection will differ between the
+ // target and default.
+ return;
+ }
+ DCHECK_EQ(default_val->InputCount(), target->InputCount());
+ // In the same block both phis only one non-null we can remove the phi from default_val.
+ HInstruction* single_value = nullptr;
+ auto inputs = target->GetInputs();
+ for (auto [input, idx] : ZipCount(MakeIterationRange(inputs))) {
+ if (input->CanBeNull()) {
+ if (single_value == nullptr) {
+ single_value = default_val->InputAt(idx);
+ } else if (single_value != default_val->InputAt(idx) &&
+ !single_value->Equals(default_val->InputAt(idx))) {
+ // Multiple values, can't combine.
+ return;
+ }
+ }
+ }
+ if (single_value->StrictlyDominates(pred_get)) {
+ // Combine all the maybe null values into one.
+ pred_get->ReplaceInput(single_value, 0);
+ }
+}
+
void InstructionSimplifierVisitor::VisitSelect(HSelect* select) {
HInstruction* replace_with = nullptr;
HInstruction* condition = select->GetCondition();
@@ -1098,6 +1136,9 @@
if (maybe_get->IsInstanceFieldGet()) {
maybe_get->AsInstanceFieldGet()->SetType(new_type);
return true;
+ } else if (maybe_get->IsPredicatedInstanceFieldGet()) {
+ maybe_get->AsPredicatedInstanceFieldGet()->SetType(new_type);
+ return true;
} else if (maybe_get->IsStaticFieldGet()) {
maybe_get->AsStaticFieldGet()->SetType(new_type);
return true;
diff --git a/compiler/optimizing/load_store_analysis.cc b/compiler/optimizing/load_store_analysis.cc
index 3daa647..38ed98a 100644
--- a/compiler/optimizing/load_store_analysis.cc
+++ b/compiler/optimizing/load_store_analysis.cc
@@ -16,6 +16,9 @@
#include "load_store_analysis.h"
+#include "base/scoped_arena_allocator.h"
+#include "optimizing/escape.h"
+
namespace art {
// A cap for the number of heap locations to prevent pathological time/space consumption.
@@ -100,14 +103,11 @@
allocator_, graph->GetBlocks().size(), false, kArenaAllocLSA);
for (const HUseListNode<HInstruction*>& use : reference_->GetUses()) {
const HInstruction* user = use.GetUser();
- const bool possible_exclusion =
- !additional_exclusions.IsBitSet(user->GetBlock()->GetBlockId()) &&
- subgraph_.ContainsBlock(user->GetBlock());
- const bool is_written_to =
+ if (!additional_exclusions.IsBitSet(user->GetBlock()->GetBlockId()) &&
+ subgraph_.ContainsBlock(user->GetBlock()) &&
(user->IsUnresolvedInstanceFieldSet() || user->IsUnresolvedStaticFieldSet() ||
user->IsInstanceFieldSet() || user->IsStaticFieldSet() || user->IsArraySet()) &&
- (reference_ == user->InputAt(0));
- if (possible_exclusion && is_written_to &&
+ (reference_ == user->InputAt(0)) &&
std::any_of(subgraph_.UnreachableBlocks().begin(),
subgraph_.UnreachableBlocks().end(),
[&](const HBasicBlock* excluded) -> bool {
@@ -148,6 +148,37 @@
}
}
+void ReferenceInfo::CollectPartialEscapes(HGraph* graph) {
+ ScopedArenaAllocator saa(graph->GetArenaStack());
+ ArenaBitVector seen_instructions(&saa, graph->GetCurrentInstructionId(), false, kArenaAllocLSA);
+ // Get regular escapes.
+ ScopedArenaVector<HInstruction*> additional_escape_vectors(saa.Adapter(kArenaAllocLSA));
+ LambdaEscapeVisitor scan_instructions([&](HInstruction* escape) -> bool {
+ HandleEscape(escape);
+ // LSE can't track heap-locations through Phi and Select instructions so we
+ // need to assume all escapes from these are escapes for the base reference.
+ if ((escape->IsPhi() || escape->IsSelect()) && !seen_instructions.IsBitSet(escape->GetId())) {
+ seen_instructions.SetBit(escape->GetId());
+ additional_escape_vectors.push_back(escape);
+ }
+ return true;
+ });
+ additional_escape_vectors.push_back(reference_);
+ while (!additional_escape_vectors.empty()) {
+ HInstruction* ref = additional_escape_vectors.back();
+ additional_escape_vectors.pop_back();
+ DCHECK(ref == reference_ || ref->IsPhi() || ref->IsSelect()) << *ref;
+ VisitEscapes(ref, scan_instructions);
+ }
+
+ // Mark irreducible loop headers as escaping since they cannot be tracked through.
+ for (HBasicBlock* blk : graph->GetActiveBlocks()) {
+ if (blk->IsLoopHeader() && blk->GetLoopInformation()->IsIrreducible()) {
+ HandleEscape(blk);
+ }
+ }
+}
+
void HeapLocationCollector::DumpReferenceStats(OptimizingCompilerStats* stats) {
if (stats == nullptr) {
return;
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index 5d2d841..e815727 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -30,6 +30,12 @@
namespace art {
+enum class LoadStoreAnalysisType {
+ kBasic,
+ kNoPredicatedInstructions,
+ kFull,
+};
+
// A ReferenceInfo contains additional info about a reference such as
// whether it's a singleton, returned, etc.
class ReferenceInfo : public DeletableArenaObject<kArenaAllocLSA> {
@@ -37,22 +43,23 @@
ReferenceInfo(HInstruction* reference,
ScopedArenaAllocator* allocator,
size_t pos,
- bool for_partial_elimination)
+ LoadStoreAnalysisType elimination_type)
: reference_(reference),
position_(pos),
is_singleton_(true),
is_singleton_and_not_returned_(true),
is_singleton_and_not_deopt_visible_(true),
allocator_(allocator),
- subgraph_(reference->GetBlock()->GetGraph(), for_partial_elimination, allocator_) {
+ subgraph_(reference->GetBlock()->GetGraph(),
+ elimination_type != LoadStoreAnalysisType::kBasic,
+ allocator_) {
// TODO We can do this in one pass.
// TODO NewArray is possible but will need to get a handle on how to deal with the dynamic loads
// for now just ignore it.
- bool can_be_partial =
- for_partial_elimination && (/* reference_->IsNewArray() || */ reference_->IsNewInstance());
- LambdaEscapeVisitor func([&](HInstruction* inst) { return HandleEscape(inst); });
+ bool can_be_partial = elimination_type != LoadStoreAnalysisType::kBasic &&
+ (/* reference_->IsNewArray() || */ reference_->IsNewInstance());
if (can_be_partial) {
- VisitEscapes(reference_, func);
+ CollectPartialEscapes(reference_->GetBlock()->GetGraph());
}
CalculateEscape(reference_,
nullptr,
@@ -60,10 +67,12 @@
&is_singleton_and_not_returned_,
&is_singleton_and_not_deopt_visible_);
if (can_be_partial) {
- // This is to mark writes to partially escaped values as also part of the escaped subset.
- // TODO We can avoid this if we have a 'ConditionalWrite' instruction. Will require testing
- // to see if the additional branches are worth it.
- PrunePartialEscapeWrites();
+ if (elimination_type == LoadStoreAnalysisType::kNoPredicatedInstructions) {
+ // This is to mark writes to partially escaped values as also part of the escaped subset.
+ // TODO We can avoid this if we have a 'ConditionalWrite' instruction. Will require testing
+ // to see if the additional branches are worth it.
+ PrunePartialEscapeWrites();
+ }
subgraph_.Finalize();
} else {
subgraph_.Invalidate();
@@ -112,9 +121,12 @@
}
private:
- bool HandleEscape(HInstruction* escape) {
- subgraph_.RemoveBlock(escape->GetBlock());
- return true;
+ void CollectPartialEscapes(HGraph* graph);
+ void HandleEscape(HBasicBlock* escape) {
+ subgraph_.RemoveBlock(escape);
+ }
+ void HandleEscape(HInstruction* escape) {
+ HandleEscape(escape->GetBlock());
}
// Make sure we mark any writes/potential writes to heap-locations within partially
@@ -229,7 +241,7 @@
HeapLocationCollector(HGraph* graph,
ScopedArenaAllocator* allocator,
- bool for_partial_elimination)
+ LoadStoreAnalysisType lse_type)
: HGraphVisitor(graph),
allocator_(allocator),
ref_info_array_(allocator->Adapter(kArenaAllocLSA)),
@@ -238,7 +250,7 @@
has_heap_stores_(false),
has_volatile_(false),
has_monitor_operations_(false),
- for_partial_elimination_(for_partial_elimination) {
+ lse_type_(lse_type) {
aliasing_matrix_.ClearAllBits();
}
@@ -252,6 +264,10 @@
ref_info_array_.clear();
}
+ size_t GetNumberOfReferenceInfos() const {
+ return ref_info_array_.size();
+ }
+
size_t GetNumberOfHeapLocations() const {
return heap_locations_.size();
}
@@ -260,6 +276,11 @@
return heap_locations_[index];
}
+ size_t GetHeapLocationIndex(const HeapLocation* hl) const {
+ auto res = std::find(heap_locations_.cbegin(), heap_locations_.cend(), hl);
+ return std::distance(heap_locations_.cbegin(), res);
+ }
+
HInstruction* HuntForOriginalReference(HInstruction* ref) const {
// An original reference can be transformed by instructions like:
// i0 NewArray
@@ -480,8 +501,7 @@
ReferenceInfo* ref_info = FindReferenceInfoOf(instruction);
if (ref_info == nullptr) {
size_t pos = ref_info_array_.size();
- ref_info =
- new (allocator_) ReferenceInfo(instruction, allocator_, pos, for_partial_elimination_);
+ ref_info = new (allocator_) ReferenceInfo(instruction, allocator_, pos, lse_type_);
ref_info_array_.push_back(ref_info);
}
return ref_info;
@@ -539,6 +559,10 @@
HeapLocation::kDeclaringClassDefIndexForArrays);
}
+ void VisitPredicatedInstanceFieldGet(HPredicatedInstanceFieldGet* instruction) override {
+ VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
+ CreateReferenceInfoForReferenceType(instruction);
+ }
void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override {
VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
CreateReferenceInfoForReferenceType(instruction);
@@ -618,7 +642,7 @@
// alias analysis and won't be as effective.
bool has_volatile_; // If there are volatile field accesses.
bool has_monitor_operations_; // If there are monitor operations.
- bool for_partial_elimination_;
+ LoadStoreAnalysisType lse_type_;
DISALLOW_COPY_AND_ASSIGN(HeapLocationCollector);
};
@@ -630,13 +654,13 @@
explicit LoadStoreAnalysis(HGraph* graph,
OptimizingCompilerStats* stats,
ScopedArenaAllocator* local_allocator,
- bool for_elimination = true)
+ LoadStoreAnalysisType lse_type)
: graph_(graph),
stats_(stats),
heap_location_collector_(
graph,
local_allocator,
- /*for_partial_elimination=*/for_elimination && ExecutionSubgraph::CanAnalyse(graph_)) {}
+ ExecutionSubgraph::CanAnalyse(graph_) ? lse_type : LoadStoreAnalysisType::kBasic) {}
const HeapLocationCollector& GetHeapLocationCollector() const {
return heap_location_collector_;
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index a5b628c..fd15802 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -100,8 +100,7 @@
// Test HeapLocationCollector initialization.
// Should be no heap locations, no operations on the heap.
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- HeapLocationCollector heap_location_collector(
- graph_, &allocator, /*for_partial_elimination=*/true);
+ HeapLocationCollector heap_location_collector(graph_, &allocator, LoadStoreAnalysisType::kFull);
ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 0U);
ASSERT_FALSE(heap_location_collector.HasHeapStores());
@@ -198,8 +197,7 @@
// Test HeapLocationCollector initialization.
// Should be no heap locations, no operations on the heap.
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- HeapLocationCollector heap_location_collector(
- graph_, &allocator, /*for_partial_elimination=*/true);
+ HeapLocationCollector heap_location_collector(graph_, &allocator, LoadStoreAnalysisType::kFull);
ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 0U);
ASSERT_FALSE(heap_location_collector.HasHeapStores());
@@ -279,7 +277,7 @@
entry->AddInstruction(arr_set8); // array[i-(-1)] = c0
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, /*for_elimination=*/false);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kBasic);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
@@ -446,7 +444,7 @@
entry->AddInstruction(vstore_i_add6_vlen2);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, /*for_elimination=*/false);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kBasic);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
@@ -605,7 +603,7 @@
entry->AddInstruction(arr_set_8);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, /*for_elimination=*/false);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kBasic);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
@@ -695,8 +693,7 @@
entry->AddInstruction(array_get4);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- HeapLocationCollector heap_location_collector(
- graph_, &allocator, /*for_partial_elimination=*/true);
+ HeapLocationCollector heap_location_collector(graph_, &allocator, LoadStoreAnalysisType::kFull);
heap_location_collector.VisitBasicBlock(entry);
// Test that the HeapLocationCollector should be able to tell
@@ -916,7 +913,7 @@
exit->AddInstruction(read_final);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, /*for_elimination=*/true);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
@@ -1023,7 +1020,7 @@
exit->AddInstruction(read_final);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, /*for_elimination=*/true);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
@@ -1144,7 +1141,7 @@
exit->AddInstruction(read_final);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, /*for_elimination=*/true);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
@@ -1165,6 +1162,119 @@
ASSERT_TRUE(contents.find(blks.Get("exit")) != contents.end());
}
+// before we had predicated-set we needed to be able to remove the store as
+// well. This test makes sure that still works.
+// // ENTRY
+// obj = new Obj();
+// if (parameter_value) {
+// // LEFT
+// call_func(obj);
+// } else {
+// // RIGHT
+// obj.f1 = 0;
+// }
+// // EXIT
+// // call_func prevents the elimination of this store.
+// obj.f2 = 0;
+TEST_F(LoadStoreAnalysisTest, TotalEscapeAdjacentNoPredicated) {
+ AdjacencyListGraph blks(SetupFromAdjacencyList(
+ "entry",
+ "exit",
+ {{"entry", "left"}, {"entry", "right"}, {"left", "exit"}, {"right", "exit"}}));
+ HBasicBlock* entry = blks.Get("entry");
+ HBasicBlock* left = blks.Get("left");
+ HBasicBlock* right = blks.Get("right");
+ HBasicBlock* exit = blks.Get("exit");
+
+ HInstruction* bool_value = new (GetAllocator())
+ HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* c0 = graph_->GetIntConstant(0);
+ HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
+ dex::TypeIndex(10),
+ graph_->GetDexFile(),
+ ScopedNullHandle<mirror::Class>(),
+ false,
+ 0,
+ false);
+ HInstruction* new_inst =
+ new (GetAllocator()) HNewInstance(cls,
+ 0,
+ dex::TypeIndex(10),
+ graph_->GetDexFile(),
+ false,
+ QuickEntrypointEnum::kQuickAllocObjectInitialized);
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(if_inst);
+
+ HInstruction* call_left = new (GetAllocator())
+ HInvokeStaticOrDirect(GetAllocator(),
+ 1,
+ DataType::Type::kVoid,
+ 0,
+ {nullptr, 0},
+ nullptr,
+ {},
+ InvokeType::kStatic,
+ {nullptr, 0},
+ HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ call_left->AsInvoke()->SetRawInputAt(0, new_inst);
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+
+ HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
+ c0,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(32),
+ false,
+ 0,
+ 0,
+ graph_->GetDexFile(),
+ 0);
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right);
+ right->AddInstruction(goto_right);
+
+ HInstruction* write_final = new (GetAllocator()) HInstanceFieldSet(new_inst,
+ c0,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(16),
+ false,
+ 0,
+ 0,
+ graph_->GetDexFile(),
+ 0);
+ exit->AddInstruction(write_final);
+
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
+ graph_->ClearDominanceInformation();
+ graph_->BuildDominatorTree();
+ LoadStoreAnalysis lsa(
+ graph_, nullptr, &allocator, LoadStoreAnalysisType::kNoPredicatedInstructions);
+ lsa.Run();
+
+ const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
+ ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
+ const ExecutionSubgraph* esg = info->GetNoEscapeSubgraph();
+
+ EXPECT_FALSE(esg->IsValid()) << esg->GetExcludedCohorts();
+ EXPECT_FALSE(IsValidSubgraph(esg));
+ std::unordered_set<const HBasicBlock*> contents(esg->ReachableBlocks().begin(),
+ esg->ReachableBlocks().end());
+
+ EXPECT_EQ(contents.size(), 0u);
+ EXPECT_TRUE(contents.find(blks.Get("left")) == contents.end());
+ EXPECT_TRUE(contents.find(blks.Get("right")) == contents.end());
+ EXPECT_TRUE(contents.find(blks.Get("entry")) == contents.end());
+ EXPECT_TRUE(contents.find(blks.Get("exit")) == contents.end());
+}
+
+// With predicated-set we can (partially) remove the store as well.
// // ENTRY
// obj = new Obj();
// if (parameter_value) {
@@ -1253,23 +1363,25 @@
exit->AddInstruction(write_final);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, /*for_elimination=*/true);
+ graph_->ClearDominanceInformation();
+ graph_->BuildDominatorTree();
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
const ExecutionSubgraph* esg = info->GetNoEscapeSubgraph();
- ASSERT_FALSE(esg->IsValid()) << esg->GetExcludedCohorts();
- ASSERT_FALSE(IsValidSubgraph(esg));
+ EXPECT_TRUE(esg->IsValid()) << esg->GetExcludedCohorts();
+ EXPECT_TRUE(IsValidSubgraph(esg));
std::unordered_set<const HBasicBlock*> contents(esg->ReachableBlocks().begin(),
esg->ReachableBlocks().end());
- ASSERT_EQ(contents.size(), 0u);
- ASSERT_TRUE(contents.find(blks.Get("left")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("right")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("entry")) == contents.end());
- ASSERT_TRUE(contents.find(blks.Get("exit")) == contents.end());
+ EXPECT_EQ(contents.size(), 3u);
+ EXPECT_TRUE(contents.find(blks.Get("left")) == contents.end());
+ EXPECT_FALSE(contents.find(blks.Get("right")) == contents.end());
+ EXPECT_FALSE(contents.find(blks.Get("entry")) == contents.end());
+ EXPECT_FALSE(contents.find(blks.Get("exit")) == contents.end());
}
// // ENTRY
@@ -1372,7 +1484,7 @@
exit->AddInstruction(read_final);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, /*for_elimination=*/true);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
@@ -1437,7 +1549,7 @@
exit->AddInstruction(return_final);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, /*for_elimination=*/true);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
@@ -1618,7 +1730,7 @@
exit->AddInstruction(read_final);
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, nullptr, &allocator, /*for_elimination=*/true);
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
@@ -1633,4 +1745,172 @@
ASSERT_EQ(contents.size(), 0u);
}
+// // ENTRY
+// Obj new_inst = new Obj();
+// new_inst.foo = 12;
+// Obj obj;
+// Obj out;
+// if (param1) {
+// // LEFT_START
+// if (param2) {
+// // LEFT_LEFT
+// obj = new_inst;
+// } else {
+// // LEFT_RIGHT
+// obj = obj_param;
+// }
+// // LEFT_MERGE
+// // technically the phi is enough to cause an escape but might as well be
+// // thorough.
+// // obj = phi[new_inst, param]
+// escape(obj);
+// out = obj;
+// } else {
+// // RIGHT
+// out = obj_param;
+// }
+// // EXIT
+// // Can't do anything with this since we don't have good tracking for the heap-locations
+// // out = phi[param, phi[new_inst, param]]
+// return out.foo
+TEST_F(LoadStoreAnalysisTest, PartialPhiPropagation1) {
+ CreateGraph();
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "left_left"},
+ {"left", "left_right"},
+ {"left_left", "left_merge"},
+ {"left_right", "left_merge"},
+ {"left_merge", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+ GET_BLOCK(left_left);
+ GET_BLOCK(left_right);
+ GET_BLOCK(left_merge);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {left_merge, right});
+ EnsurePredecessorOrder(left_merge, {left_left, left_right});
+ HInstruction* param1 = new (GetAllocator())
+ HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* param2 = new (GetAllocator())
+ HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 2, DataType::Type::kBool);
+ HInstruction* obj_param = new (GetAllocator())
+ HParameterValue(graph_->GetDexFile(), dex::TypeIndex(10), 3, DataType::Type::kReference);
+ HInstruction* c12 = graph_->GetIntConstant(12);
+ HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
+ dex::TypeIndex(10),
+ graph_->GetDexFile(),
+ ScopedNullHandle<mirror::Class>(),
+ false,
+ 0,
+ false);
+ HInstruction* new_inst =
+ new (GetAllocator()) HNewInstance(cls,
+ 0,
+ dex::TypeIndex(10),
+ graph_->GetDexFile(),
+ false,
+ QuickEntrypointEnum::kQuickAllocObjectInitialized);
+ HInstruction* store = new (GetAllocator()) HInstanceFieldSet(new_inst,
+ c12,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(32),
+ false,
+ 0,
+ 0,
+ graph_->GetDexFile(),
+ 0);
+ HInstruction* if_param1 = new (GetAllocator()) HIf(param1);
+ entry->AddInstruction(param1);
+ entry->AddInstruction(param2);
+ entry->AddInstruction(obj_param);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(store);
+ entry->AddInstruction(if_param1);
+ ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
+ ManuallyBuildEnvFor(cls, ¤t_locals);
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* if_left = new (GetAllocator()) HIf(param2);
+ left->AddInstruction(if_left);
+
+ HInstruction* goto_left_left = new (GetAllocator()) HGoto();
+ left_left->AddInstruction(goto_left_left);
+
+ HInstruction* goto_left_right = new (GetAllocator()) HGoto();
+ left_right->AddInstruction(goto_left_right);
+
+ HPhi* left_phi =
+ new (GetAllocator()) HPhi(GetAllocator(), kNoRegNumber, 2, DataType::Type::kReference);
+ HInstruction* call_left = new (GetAllocator())
+ HInvokeStaticOrDirect(GetAllocator(),
+ 1,
+ DataType::Type::kVoid,
+ 0,
+ {nullptr, 0},
+ nullptr,
+ {},
+ InvokeType::kStatic,
+ {nullptr, 0},
+ HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* goto_left_merge = new (GetAllocator()) HGoto();
+ left_phi->SetRawInputAt(0, obj_param);
+ left_phi->SetRawInputAt(1, new_inst);
+ call_left->AsInvoke()->SetRawInputAt(0, left_phi);
+ left_merge->AddPhi(left_phi);
+ left_merge->AddInstruction(call_left);
+ left_merge->AddInstruction(goto_left_merge);
+ left_phi->SetCanBeNull(true);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(goto_right);
+
+ HPhi* return_phi =
+ new (GetAllocator()) HPhi(GetAllocator(), kNoRegNumber, 2, DataType::Type::kReference);
+ HInstruction* read_exit = new (GetAllocator()) HInstanceFieldGet(return_phi,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(32),
+ false,
+ 0,
+ 0,
+ graph_->GetDexFile(),
+ 0);
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_exit);
+ return_phi->SetRawInputAt(0, left_phi);
+ return_phi->SetRawInputAt(1, obj_param);
+ breturn->AddPhi(return_phi);
+ breturn->AddInstruction(read_exit);
+ breturn->AddInstruction(return_exit);
+
+ HInstruction* exit_instruction = new (GetAllocator()) HExit();
+ exit->AddInstruction(exit_instruction);
+
+ graph_->ClearDominanceInformation();
+ graph_->BuildDominatorTree();
+
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
+ LoadStoreAnalysis lsa(graph_, nullptr, &allocator, LoadStoreAnalysisType::kFull);
+ lsa.Run();
+
+ const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
+ ReferenceInfo* info = heap_location_collector.FindReferenceInfoOf(new_inst);
+ const ExecutionSubgraph* esg = info->GetNoEscapeSubgraph();
+ std::unordered_set<const HBasicBlock*> contents(esg->ReachableBlocks().begin(),
+ esg->ReachableBlocks().end());
+
+ ASSERT_EQ(contents.size(), 0u);
+ ASSERT_FALSE(esg->IsValid());
+}
} // namespace art
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 2e0f2b1..17ce694 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -27,16 +27,23 @@
#include "base/bit_vector-inl.h"
#include "base/bit_vector.h"
#include "base/globals.h"
+#include "base/indenter.h"
#include "base/iteration_range.h"
#include "base/scoped_arena_allocator.h"
#include "base/scoped_arena_containers.h"
+#include "base/transform_iterator.h"
#include "escape.h"
#include "execution_subgraph.h"
+#include "handle.h"
#include "load_store_analysis.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
#include "nodes.h"
+#include "optimizing/execution_subgraph.h"
#include "optimizing_compiler_stats.h"
#include "reference_type_propagation.h"
#include "side_effects_analysis.h"
+#include "stack_map.h"
/**
* The general algorithm of load-store elimination (LSE).
@@ -57,6 +64,9 @@
* - In phase 4, we commit the changes, replacing loads marked for elimination
* in previous processing and removing stores not marked for keeping. We also
* remove allocations that are no longer needed.
+ * - In phase 5, we move allocations which only escape along some executions
+ * closer to their escape points and fixup non-escaping paths with their actual
+ * values, creating PHIs when needed.
*
* 1. Walk over blocks and their instructions.
*
@@ -82,7 +92,9 @@
* to maintain the validity of all heap locations during the optimization
* phase, we only record substitutes at this phase and the real elimination
* is delayed till the end of LSE. Loads that require a loop Phi placeholder
- * replacement are recorded for processing later.
+ * replacement are recorded for processing later. We also keep track of the
+ * heap-value at the start load so that later partial-LSE can predicate the
+ * load.
* - If the instruction is a store, it updates the heap value for the heap
* location with the stored value and records the store itself so that we can
* mark it for keeping if the value becomes observable. Heap values are
@@ -228,7 +240,80 @@
* The time complexity of this phase is
* O(instructions + instruction_uses) .
*
- * FIXME: The time complexity described above assumes that the
+ * 5. Partial LSE
+ *
+ * Move allocations closer to their escapes and remove/predicate loads and
+ * stores as required.
+ *
+ * Partial singletons are objects which only escape from the function or have
+ * multiple names along certain execution paths. In cases where we recognize
+ * these partial singletons we can move the allocation and initialization
+ * closer to the actual escape(s). We can then perform a simplified version of
+ * LSE step 2 to determine the unescaped value of any reads performed after the
+ * object may have escaped. These are used to replace these reads with
+ * 'predicated-read' instructions where the value is only read if the object
+ * has actually escaped. We use the existence of the object itself as the
+ * marker of whether escape has occurred.
+ *
+ * There are several steps in this sub-pass
+ *
+ * 5.1 Group references
+ *
+ * Since all heap-locations for a single reference escape at the same time, we
+ * need to group the heap-locations by reference and process them at the same
+ * time.
+ *
+ * O(heap_locations).
+ *
+ * FIXME: The time complexity above assumes we can bucket the heap-locations in
+ * O(1) which is not true since we just perform a linear-scan of the heap-ref
+ * list. Since there are generally only a small number of heap-references which
+ * are partial-singletons this is fine and lower real overhead than a hash map.
+ *
+ * 5.2 Generate materializations
+ *
+ * Once we have the references we add new 'materialization blocks' on the edges
+ * where escape becomes inevitable. This information is calculated by the
+ * execution-subgraphs created during load-store-analysis. We create new
+ * 'materialization's in these blocks and initialize them with the value of
+ * each heap-location ignoring side effects (since the object hasn't escaped
+ * yet). Worst case this is the same time-complexity as step 3 since we may
+ * need to materialize phis.
+ *
+ * O(heap_locations^2 * materialization_edges)
+ *
+ * 5.3 Propagate materializations
+ *
+ * Since we use the materialization as the marker for escape we need to
+ * propagate it throughout the graph. Since the subgraph analysis considers any
+ * lifetime that escapes a loop (and hence would require a loop-phi) to be
+ * escaping at the loop-header we do not need to create any loop-phis to do
+ * this.
+ *
+ * O(edges)
+ *
+ * NB: Currently the subgraph analysis considers all objects to have their
+ * lifetimes start at the entry block. This simplifies that analysis enormously
+ * but means that we cannot distinguish between an escape in a loop where the
+ * lifetime does not escape the loop (in which case this pass could optimize)
+ * and one where it does escape the loop (in which case the whole loop is
+ * escaping). This is a shortcoming that would be good to fix at some point.
+ *
+ * 5.4 Propagate partial values
+ *
+ * We need to replace loads and stores to the partial reference with predicated
+ * ones that have default non-escaping values. Again this is the same as step 3.
+ *
+ * O(heap_locations^2 * edges)
+ *
+ * 5.5 Final fixup
+ *
+ * Now all we need to do is replace and remove uses of the old reference with the
+ * appropriate materialization.
+ *
+ * O(instructions + uses)
+ *
+ * FIXME: The time complexities described above assumes that the
* HeapLocationCollector finds a heap location for an instruction in O(1)
* time but it is currently O(heap_locations); this can be fixed by adding
* a hash map to the HeapLocationCollector.
@@ -236,11 +321,18 @@
namespace art {
+#define LSE_VLOG \
+ if (::art::LoadStoreElimination::kVerboseLoggingMode && VLOG_IS_ON(compiler)) LOG(INFO)
+
+class PartialLoadStoreEliminationHelper;
+class HeapRefHolder;
+
// Use HGraphDelegateVisitor for which all VisitInvokeXXX() delegate to VisitInvoke().
class LSEVisitor final : private HGraphDelegateVisitor {
public:
LSEVisitor(HGraph* graph,
const HeapLocationCollector& heap_location_collector,
+ bool perform_partial_lse,
OptimizingCompilerStats* stats);
void Run();
@@ -278,6 +370,45 @@
uint32_t heap_location_;
};
+ struct Marker {};
+
+ class Value;
+
+ class PriorValueHolder {
+ public:
+ constexpr explicit PriorValueHolder(Value prior);
+
+ constexpr bool IsInstruction() const {
+ return std::holds_alternative<HInstruction*>(value_);
+ }
+ constexpr bool IsPhi() const {
+ return std::holds_alternative<PhiPlaceholder>(value_);
+ }
+ constexpr bool IsDefault() const {
+ return std::holds_alternative<Marker>(value_);
+ }
+ constexpr PhiPlaceholder GetPhiPlaceholder() const {
+ DCHECK(IsPhi());
+ return std::get<PhiPlaceholder>(value_);
+ }
+ constexpr HInstruction* GetInstruction() const {
+ DCHECK(IsInstruction());
+ return std::get<HInstruction*>(value_);
+ }
+
+ Value ToValue() const;
+ void Dump(std::ostream& oss) const;
+
+ constexpr bool Equals(PriorValueHolder other) const {
+ return value_ == other.value_;
+ }
+
+ private:
+ std::variant<Marker, HInstruction*, PhiPlaceholder> value_;
+ };
+
+ friend constexpr bool operator==(const Marker&, const Marker&);
+ friend constexpr bool operator==(const PriorValueHolder& p1, const PriorValueHolder& p2);
friend constexpr bool operator==(const PhiPlaceholder& p1, const PhiPlaceholder& p2);
friend std::ostream& operator<<(std::ostream& oss, const PhiPlaceholder& p2);
@@ -310,6 +441,14 @@
return Value(ValuelessType::kPureUnknown);
}
+ static constexpr Value PartialUnknown(Value old_value) {
+ if (old_value.IsInvalid() || old_value.IsPureUnknown()) {
+ return PureUnknown();
+ } else {
+ return Value(PriorValueHolder(old_value));
+ }
+ }
+
static constexpr Value MergedUnknown(PhiPlaceholder phi_placeholder) {
return Value(MergedUnknownMarker{phi_placeholder});
}
@@ -346,6 +485,10 @@
GetValuelessType() == ValuelessType::kInvalid;
}
+ bool IsPartialUnknown() const {
+ return std::holds_alternative<PriorValueHolder>(value_);
+ }
+
bool IsMergedUnknown() const {
return std::holds_alternative<MergedUnknownMarker>(value_);
}
@@ -356,7 +499,7 @@
}
bool IsUnknown() const {
- return IsPureUnknown() || IsMergedUnknown();
+ return IsPureUnknown() || IsMergedUnknown() || IsPartialUnknown();
}
bool IsDefault() const {
@@ -381,10 +524,15 @@
}
HInstruction* GetInstruction() const {
- DCHECK(IsInstruction());
+ DCHECK(IsInstruction()) << *this;
return std::get<HInstruction*>(value_);
}
+ PriorValueHolder GetPriorValue() const {
+ DCHECK(IsPartialUnknown());
+ return std::get<PriorValueHolder>(value_);
+ }
+
PhiPlaceholder GetPhiPlaceholder() const {
DCHECK(NeedsPhi() || IsMergedUnknown());
if (NeedsNonLoopPhi()) {
@@ -402,7 +550,7 @@
}
HBasicBlock* GetMergeBlock(const HGraph* graph) const {
- DCHECK(IsMergedUnknown()) << this;
+ DCHECK(IsMergedUnknown()) << *this;
return graph->GetBlocks()[GetMergeBlockId()];
}
@@ -411,6 +559,8 @@
return GetPhiPlaceholder().GetHeapLocation();
}
+ constexpr bool ExactEquals(Value other) const;
+
constexpr bool Equals(Value other) const;
constexpr bool Equals(HInstruction* instruction) const {
@@ -427,7 +577,8 @@
HInstruction*,
MergedUnknownMarker,
NeedsNonLoopPhiMarker,
- NeedsLoopPhiMarker>;
+ NeedsLoopPhiMarker,
+ PriorValueHolder>;
constexpr ValuelessType GetValuelessType() const {
return std::get<ValuelessType>(value_);
}
@@ -493,7 +644,9 @@
}
Value Replacement(Value value) const {
- DCHECK(value.NeedsPhi());
+ DCHECK(value.NeedsPhi() ||
+ (current_phase_ == Phase::kPartialElimination && value.IsMergedUnknown()))
+ << value << " phase: " << current_phase_;
Value replacement = phi_placeholder_replacements_[PhiPlaceholderIndex(value)];
DCHECK(replacement.IsUnknown() || replacement.IsInstruction());
DCHECK(replacement.IsUnknown() ||
@@ -502,6 +655,16 @@
}
Value ReplacementOrValue(Value value) const {
+ if (current_phase_ == Phase::kPartialElimination) {
+ if (value.IsPartialUnknown()) {
+ value = value.GetPriorValue().ToValue();
+ }
+ if (value.IsMergedUnknown()) {
+ return phi_placeholder_replacements_[PhiPlaceholderIndex(value)].IsValid()
+ ? Replacement(value)
+ : Value::ForLoopPhiPlaceholder(value.GetPhiPlaceholder());
+ }
+ }
if (value.NeedsPhi() && phi_placeholder_replacements_[PhiPlaceholderIndex(value)].IsValid()) {
return Replacement(value);
} else {
@@ -598,6 +761,7 @@
static bool IsLoad(HInstruction* instruction) {
// Unresolved load is not treated as a load.
return instruction->IsInstanceFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsStaticFieldGet() ||
instruction->IsVecLoad() ||
instruction->IsArrayGet();
@@ -623,7 +787,7 @@
// Keep the store referenced by the instruction, or all stores that feed a Phi placeholder.
// This is necessary if the stored heap value can be observed.
void KeepStores(Value value) {
- if (value.IsPureUnknown()) {
+ if (value.IsPureUnknown() || value.IsPartialUnknown()) {
return;
}
if (value.IsMergedUnknown()) {
@@ -743,12 +907,16 @@
void VisitGetLocation(HInstruction* instruction, size_t idx);
void VisitSetLocation(HInstruction* instruction, size_t idx, HInstruction* value);
+ void RecordFieldInfo(const FieldInfo* info, size_t heap_loc) {
+ field_infos_[heap_loc] = info;
+ }
void VisitBasicBlock(HBasicBlock* block) override;
enum class Phase {
kLoadElimination,
- kStoreElimination
+ kStoreElimination,
+ kPartialElimination,
};
bool TryReplacingLoopPhiPlaceholderWithDefault(
@@ -765,8 +933,10 @@
bool can_use_default_or_phi);
bool MaterializeLoopPhis(const ScopedArenaVector<size_t>& phi_placeholder_indexes,
DataType::Type type);
+ bool MaterializeLoopPhis(ArrayRef<const size_t> phi_placeholder_indexes, DataType::Type type);
bool MaterializeLoopPhis(const ArenaBitVector& phi_placeholders_to_materialize,
DataType::Type type);
+ bool FullyMaterializePhi(PhiPlaceholder phi_placeholder, DataType::Type type);
std::optional<PhiPlaceholder> TryToMaterializeLoopPhis(PhiPlaceholder phi_placeholder,
HInstruction* load);
void ProcessLoopPhiWithUnknownInput(PhiPlaceholder loop_phi_with_unknown_input);
@@ -776,6 +946,22 @@
void UpdateValueRecordForStoreElimination(/*inout*/ValueRecord* value_record);
void FindOldValueForPhiPlaceholder(PhiPlaceholder phi_placeholder, DataType::Type type);
void FindStoresWritingOldValues();
+ void FinishFullLSE();
+ void PrepareForPartialPhiComputation();
+ // Create materialization block and materialization object for the given predecessor of entry.
+ HInstruction* SetupPartialMaterialization(PartialLoadStoreEliminationHelper& helper,
+ HeapRefHolder&& holder,
+ size_t pred_idx,
+ HBasicBlock* blk);
+ // Returns the value that would be read by the 'read' instruction on
+ // 'orig_new_inst' if 'orig_new_inst' has not escaped.
+ HInstruction* GetPartialValueAt(HNewInstance* orig_new_inst, HInstruction* read);
+ void MovePartialEscapes();
+
+ void VisitPredicatedInstanceFieldGet(HPredicatedInstanceFieldGet* instruction) override {
+ LOG(FATAL) << "Visited instruction " << instruction->DumpWithoutArgs()
+ << " but LSE should be the only source of predicated-ifield-gets!";
+ }
void VisitInstanceFieldGet(HInstanceFieldGet* instruction) override {
HInstruction* object = instruction->InputAt(0);
@@ -914,7 +1100,7 @@
}
if (side_effects.DoesAnyWrite()) {
// The value may be clobbered.
- heap_values[i].value = Value::PureUnknown();
+ heap_values[i].value = Value::PartialUnknown(heap_values[i].value);
}
}
}
@@ -1010,6 +1196,12 @@
}
}
+ bool ShouldPerformPartialLSE() const {
+ return perform_partial_lse_ && !GetGraph()->IsCompilingOsr();
+ }
+
+ bool perform_partial_lse_;
+
const HeapLocationCollector& heap_location_collector_;
// Use local allocator for allocating memory.
@@ -1035,6 +1227,12 @@
// in the end. These are indexed by the load's id.
ScopedArenaVector<HInstruction*> substitute_instructions_for_loads_;
+ // Value at the start of the given instruction for instructions which directly
+ // read from a heap-location (i.e. FieldGet). The mapping to heap-location is
+ // implicit through the fact that each instruction can only directly refer to
+ // a single heap-location.
+ ScopedArenaHashMap<HInstruction*, Value> intermediate_values_;
+
// Record stores to keep in a bit vector indexed by instruction ID.
ArenaBitVector kept_stores_;
// When we need to keep all stores that feed a Phi placeholder, we just record the
@@ -1063,23 +1261,70 @@
ScopedArenaVector<HInstruction*> singleton_new_instances_;
+ // The field infos for each heap location (if relevant).
+ ScopedArenaVector<const FieldInfo*> field_infos_;
+
Phase current_phase_;
+ friend class PartialLoadStoreEliminationHelper;
+ friend struct ScopedRestoreHeapValues;
+
friend std::ostream& operator<<(std::ostream& os, const Value& v);
- friend std::ostream& operator<<(std::ostream& os, const Phase& phase);
+ friend std::ostream& operator<<(std::ostream& os, const PriorValueHolder& v);
+ friend std::ostream& operator<<(std::ostream& oss, const LSEVisitor::Phase& phase);
DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
};
+std::ostream& operator<<(std::ostream& oss, const LSEVisitor::PriorValueHolder& p) {
+ p.Dump(oss);
+ return oss;
+}
+
std::ostream& operator<<(std::ostream& oss, const LSEVisitor::Phase& phase) {
switch (phase) {
case LSEVisitor::Phase::kLoadElimination:
return oss << "kLoadElimination";
case LSEVisitor::Phase::kStoreElimination:
return oss << "kStoreElimination";
+ case LSEVisitor::Phase::kPartialElimination:
+ return oss << "kPartialElimination";
}
}
+void LSEVisitor::PriorValueHolder::Dump(std::ostream& oss) const {
+ if (IsDefault()) {
+ oss << "Default";
+ } else if (IsPhi()) {
+ oss << "Phi: " << GetPhiPlaceholder();
+ } else {
+ oss << "Instruction: " << *GetInstruction();
+ }
+}
+
+constexpr LSEVisitor::PriorValueHolder::PriorValueHolder(Value val)
+ : value_(Marker{}) {
+ DCHECK(!val.IsInvalid() && !val.IsPureUnknown());
+ if (val.IsPartialUnknown()) {
+ value_ = val.GetPriorValue().value_;
+ } else if (val.IsMergedUnknown() || val.NeedsPhi()) {
+ value_ = val.GetPhiPlaceholder();
+ } else if (val.IsInstruction()) {
+ value_ = val.GetInstruction();
+ } else {
+ DCHECK(val.IsDefault());
+ }
+}
+
+constexpr bool operator==(const LSEVisitor::Marker&, const LSEVisitor::Marker&) {
+ return true;
+}
+
+constexpr bool operator==(const LSEVisitor::PriorValueHolder& p1,
+ const LSEVisitor::PriorValueHolder& p2) {
+ return p1.Equals(p2);
+}
+
constexpr bool operator==(const LSEVisitor::PhiPlaceholder& p1,
const LSEVisitor::PhiPlaceholder& p2) {
return p1.Equals(p2);
@@ -1105,6 +1350,20 @@
return oss;
}
+LSEVisitor::Value LSEVisitor::PriorValueHolder::ToValue() const {
+ if (IsDefault()) {
+ return Value::Default();
+ } else if (IsPhi()) {
+ return Value::ForLoopPhiPlaceholder(GetPhiPlaceholder());
+ } else {
+ return Value::ForInstruction(GetInstruction());
+ }
+}
+
+constexpr bool LSEVisitor::Value::ExactEquals(LSEVisitor::Value other) const {
+ return value_ == other.value_;
+}
+
constexpr bool LSEVisitor::Value::Equals(LSEVisitor::Value other) const {
// Only valid values can be compared.
DCHECK(IsValid());
@@ -1129,6 +1388,8 @@
case ValuelessType::kInvalid:
return os << "Invalid";
}
+ } else if (IsPartialUnknown()) {
+ return os << "PartialUnknown[" << GetPriorValue() << "]";
} else if (IsInstruction()) {
return os << "Instruction[id: " << GetInstruction()->GetId()
<< ", block: " << GetInstruction()->GetBlock()->GetBlockId() << "]";
@@ -1151,8 +1412,10 @@
LSEVisitor::LSEVisitor(HGraph* graph,
const HeapLocationCollector& heap_location_collector,
+ bool perform_partial_lse,
OptimizingCompilerStats* stats)
: HGraphDelegateVisitor(graph, stats),
+ perform_partial_lse_(perform_partial_lse),
heap_location_collector_(heap_location_collector),
allocator_(graph->GetArenaStack()),
num_phi_placeholders_(GetGraph()->GetBlocks().size() *
@@ -1166,13 +1429,14 @@
substitute_instructions_for_loads_(graph->GetCurrentInstructionId(),
nullptr,
allocator_.Adapter(kArenaAllocLSE)),
+ intermediate_values_(allocator_.Adapter(kArenaAllocLSE)),
kept_stores_(&allocator_,
- /*start_bits=*/ graph->GetCurrentInstructionId(),
- /*expandable=*/ false,
+ /*start_bits=*/graph->GetCurrentInstructionId(),
+ /*expandable=*/false,
kArenaAllocLSE),
phi_placeholders_to_search_for_kept_stores_(&allocator_,
num_phi_placeholders_,
- /*expandable=*/ false,
+ /*expandable=*/false,
kArenaAllocLSE),
loads_requiring_loop_phi_(allocator_.Adapter(kArenaAllocLSE)),
store_records_(allocator_.Adapter(kArenaAllocLSE)),
@@ -1180,10 +1444,12 @@
Value::Invalid(),
allocator_.Adapter(kArenaAllocLSE)),
kept_merged_unknowns_(&allocator_,
- /*start_bits=*/ num_phi_placeholders_,
- /*expandable=*/ false,
+ /*start_bits=*/num_phi_placeholders_,
+ /*expandable=*/false,
kArenaAllocLSE),
singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)),
+ field_infos_(heap_location_collector_.GetNumberOfHeapLocations(),
+ allocator_.Adapter(kArenaAllocLSE)),
current_phase_(Phase::kLoadElimination) {
// Clear bit vectors.
phi_placeholders_to_search_for_kept_stores_.ClearAllBits();
@@ -1249,9 +1515,13 @@
// Don't eliminate loads in irreducible loops.
if (block->GetLoopInformation()->IsIrreducible()) {
heap_values.resize(num_heap_locations,
- {/*value=*/Value::PureUnknown(), /*stored_by=*/Value::PureUnknown()});
+ {/*value=*/Value::Invalid(), /*stored_by=*/Value::PureUnknown()});
// Also keep the stores before the loop header, including in blocks that were not visited yet.
+ bool is_osr = GetGraph()->IsCompilingOsr();
for (size_t idx = 0u; idx != num_heap_locations; ++idx) {
+ heap_values[idx].value =
+ is_osr ? Value::PureUnknown()
+ : Value::MergedUnknown(GetPhiPlaceholder(block->GetBlockId(), idx));
KeepStores(Value::ForLoopPhiPlaceholder(GetPhiPlaceholder(block->GetBlockId(), idx)));
}
return;
@@ -1410,9 +1680,10 @@
phi_inputs.clear();
for (HBasicBlock* predecessor : current_block->GetPredecessors()) {
Value pred_value = ReplacementOrValue(heap_values_for_[predecessor->GetBlockId()][idx].value);
- DCHECK(!pred_value.IsUnknown())
- << "block " << current_block->GetBlockId() << " pred: " << predecessor->GetBlockId();
- if (pred_value.NeedsNonLoopPhi()) {
+ DCHECK(!pred_value.IsPureUnknown()) << pred_value << " block " << current_block->GetBlockId()
+ << " pred: " << predecessor->GetBlockId();
+ if (pred_value.NeedsNonLoopPhi() ||
+ (current_phase_ == Phase::kPartialElimination && pred_value.IsMergedUnknown())) {
// We need to process the Phi placeholder first.
work_queue.push_back(pred_value.GetPhiPlaceholder());
} else if (pred_value.IsDefault()) {
@@ -1439,7 +1710,17 @@
uint32_t block_id = instruction->GetBlock()->GetBlockId();
ScopedArenaVector<ValueRecord>& heap_values = heap_values_for_[block_id];
ValueRecord& record = heap_values[idx];
+ if (instruction->IsFieldAccess()) {
+ RecordFieldInfo(&instruction->GetFieldInfo(), idx);
+ }
DCHECK(record.value.IsUnknown() || record.value.Equals(ReplacementOrValue(record.value)));
+ // If we are unknown, we either come from somewhere untracked or we can reconstruct the partial
+ // value.
+ DCHECK(!record.value.IsPureUnknown() ||
+ heap_location_collector_.GetHeapLocation(idx)->GetReferenceInfo() == nullptr ||
+ !heap_location_collector_.GetHeapLocation(idx)->GetReferenceInfo()->IsPartialSingleton())
+ << "In " << GetGraph()->PrettyMethod() << ": " << record.value << " for " << *instruction;
+ intermediate_values_.insert({instruction, record.value});
loads_and_stores_.push_back({ instruction, idx });
if ((record.value.IsDefault() || record.value.NeedsNonLoopPhi()) &&
!IsDefaultOrPhiAllowedForLoad(instruction)) {
@@ -1475,6 +1756,9 @@
void LSEVisitor::VisitSetLocation(HInstruction* instruction, size_t idx, HInstruction* value) {
DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
DCHECK(!IsStore(value)) << value->DebugName();
+ if (instruction->IsFieldAccess()) {
+ RecordFieldInfo(&instruction->GetFieldInfo(), idx);
+ }
// value may already have a substitute.
value = FindSubstitute(value);
HBasicBlock* block = instruction->GetBlock();
@@ -1533,7 +1817,7 @@
// Kill heap locations that may alias and keep previous stores to these locations.
KeepStores(heap_values[i].stored_by);
heap_values[i].stored_by = Value::PureUnknown();
- heap_values[i].value = Value::PureUnknown();
+ heap_values[i].value = Value::PartialUnknown(heap_values[i].value);
}
}
@@ -1753,6 +2037,8 @@
if (!phi_placeholders_to_materialize->IsBitSet(PhiPlaceholderIndex(value))) {
phi_placeholders_to_materialize->SetBit(PhiPlaceholderIndex(value));
work_queue.push_back(value.GetPhiPlaceholder());
+ LSE_VLOG << "For materialization of " << phi_placeholder
+ << " we need to materialize " << value;
}
}
}
@@ -1764,6 +2050,11 @@
bool LSEVisitor::MaterializeLoopPhis(const ScopedArenaVector<size_t>& phi_placeholder_indexes,
DataType::Type type) {
+ return MaterializeLoopPhis(ArrayRef<const size_t>(phi_placeholder_indexes), type);
+}
+
+bool LSEVisitor::MaterializeLoopPhis(ArrayRef<const size_t> phi_placeholder_indexes,
+ DataType::Type type) {
// Materialize all predecessors that do not need a loop Phi and determine if all inputs
// other than loop Phis are the same.
const ArenaVector<HBasicBlock*>& blocks = GetGraph()->GetBlocks();
@@ -1775,8 +2066,11 @@
size_t idx = phi_placeholder.GetHeapLocation();
for (HBasicBlock* predecessor : block->GetPredecessors()) {
Value value = ReplacementOrValue(heap_values_for_[predecessor->GetBlockId()][idx].value);
- if (value.NeedsNonLoopPhi()) {
- DCHECK(current_phase_ == Phase::kLoadElimination);
+ if (value.NeedsNonLoopPhi() ||
+ (current_phase_ == Phase::kPartialElimination && value.IsMergedUnknown())) {
+ DCHECK(current_phase_ == Phase::kLoadElimination ||
+ current_phase_ == Phase::kPartialElimination)
+ << current_phase_;
MaterializeNonLoopPhis(value.GetPhiPlaceholder(), type);
value = Replacement(value);
}
@@ -2001,6 +2295,15 @@
return true;
}
+bool LSEVisitor::FullyMaterializePhi(PhiPlaceholder phi_placeholder, DataType::Type type) {
+ ScopedArenaAllocator saa(GetGraph()->GetArenaStack());
+ ArenaBitVector abv(&saa, num_phi_placeholders_, false, ArenaAllocKind::kArenaAllocLSE);
+ auto res =
+ FindLoopPhisToMaterialize(phi_placeholder, &abv, type, /* can_use_default_or_phi=*/true);
+ CHECK(!res.has_value()) << *res;
+ return MaterializeLoopPhis(abv, type);
+}
+
std::optional<LSEVisitor::PhiPlaceholder> LSEVisitor::TryToMaterializeLoopPhis(
PhiPlaceholder phi_placeholder, HInstruction* load) {
DCHECK(phi_placeholder_replacements_[PhiPlaceholderIndex(phi_placeholder)].IsInvalid());
@@ -2144,7 +2447,7 @@
// propagated as a value to this load) and store the load as the new heap value.
found_unreplaceable_load = true;
KeepStores(record.value);
- record.value = Value::PureUnknown();
+ record.value = Value::MergedUnknown(record.value.GetPhiPlaceholder());
local_heap_values[idx] = Value::ForInstruction(load_or_store);
} else if (local_heap_values[idx].NeedsLoopPhi()) {
// The load may still be replaced with a Phi later.
@@ -2386,7 +2689,57 @@
!success);
}
+struct ScopedRestoreHeapValues {
+ public:
+ ScopedRestoreHeapValues(ArenaStack* alloc,
+ size_t num_heap_locs,
+ ScopedArenaVector<ScopedArenaVector<LSEVisitor::ValueRecord>>& to_restore)
+ : alloc_(alloc),
+ updated_values_(alloc_.Adapter(kArenaAllocLSE)),
+ to_restore_(to_restore) {
+ updated_values_.reserve(num_heap_locs * to_restore_.size());
+ }
+
+ ~ScopedRestoreHeapValues() {
+ for (const auto& rec : updated_values_) {
+ to_restore_[rec.blk_id][rec.heap_loc].value = rec.val_;
+ }
+ }
+
+ template<typename Func>
+ void ForEachRecord(Func func) {
+ for (size_t blk_id : Range(to_restore_.size())) {
+ for (size_t heap_loc : Range(to_restore_[blk_id].size())) {
+ LSEVisitor::ValueRecord* vr = &to_restore_[blk_id][heap_loc];
+ LSEVisitor::Value initial = vr->value;
+ func(vr);
+ if (!vr->value.ExactEquals(initial)) {
+ updated_values_.push_back({blk_id, heap_loc, initial});
+ }
+ }
+ }
+ }
+
+ private:
+ struct UpdateRecord {
+ size_t blk_id;
+ size_t heap_loc;
+ LSEVisitor::Value val_;
+ };
+ ScopedArenaAllocator alloc_;
+ ScopedArenaVector<UpdateRecord> updated_values_;
+ ScopedArenaVector<ScopedArenaVector<LSEVisitor::ValueRecord>>& to_restore_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedRestoreHeapValues);
+};
+
void LSEVisitor::FindStoresWritingOldValues() {
+ // Partial LSE relies on knowing the real heap-values not the
+ // store-replacement versions so we need to restore the map after removing
+ // stores.
+ ScopedRestoreHeapValues heap_vals(allocator_.GetArenaStack(),
+ heap_location_collector_.GetNumberOfHeapLocations(),
+ heap_values_for_);
// The Phi placeholder replacements have so far been used for eliminating loads,
// tracking values that would be stored if all stores were kept. As we want to
// compare actual old values after removing unmarked stores, prune the Phi
@@ -2401,10 +2754,14 @@
}
// Update heap values at end of blocks.
- for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) {
- for (ValueRecord& value_record : heap_values_for_[block->GetBlockId()]) {
- UpdateValueRecordForStoreElimination(&value_record);
- }
+ heap_vals.ForEachRecord([&](ValueRecord* rec) {
+ UpdateValueRecordForStoreElimination(rec);
+ });
+
+ if (kIsDebugBuild) {
+ heap_vals.ForEachRecord([](ValueRecord* rec) {
+ DCHECK(!rec->value.NeedsNonLoopPhi()) << rec->value;
+ });
}
// Use local allocator to reduce peak memory usage.
@@ -2458,7 +2815,903 @@
FindStoresWritingOldValues();
// 4. Replace loads and remove unnecessary stores and singleton allocations.
+ FinishFullLSE();
+ // 5. Move partial escapes down and fixup with PHIs.
+ current_phase_ = Phase::kPartialElimination;
+ MovePartialEscapes();
+}
+
+// Clear unknown loop-phi results. Here we'll be able to use partial-unknowns so we need to
+// retry all of them with more information about where they come from.
+void LSEVisitor::PrepareForPartialPhiComputation() {
+ std::replace_if(
+ phi_placeholder_replacements_.begin(),
+ phi_placeholder_replacements_.end(),
+ [](const Value& val) { return val.IsPureUnknown(); },
+ Value::Invalid());
+}
+
+class PartialLoadStoreEliminationHelper {
+ public:
+ PartialLoadStoreEliminationHelper(LSEVisitor* lse, ScopedArenaAllocator* alloc)
+ : lse_(lse),
+ alloc_(alloc),
+ new_ref_phis_(alloc_->Adapter(kArenaAllocLSE)),
+ heap_refs_(alloc_->Adapter(kArenaAllocLSE)),
+ max_preds_per_block_((*std::max_element(GetGraph()->GetActiveBlocks().begin(),
+ GetGraph()->GetActiveBlocks().end(),
+ [](HBasicBlock* a, HBasicBlock* b) {
+ return a->GetNumberOfPredecessors() <
+ b->GetNumberOfPredecessors();
+ }))
+ ->GetNumberOfPredecessors()),
+ materialization_blocks_(GetGraph()->GetBlocks().size() * max_preds_per_block_,
+ nullptr,
+ alloc_->Adapter(kArenaAllocLSE)),
+ first_materialization_block_id_(GetGraph()->GetBlocks().size()) {
+ heap_refs_.reserve(lse_->heap_location_collector_.GetNumberOfReferenceInfos());
+ new_ref_phis_.reserve(lse_->heap_location_collector_.GetNumberOfReferenceInfos() *
+ GetGraph()->GetBlocks().size());
+ CollectInterestingHeapRefs();
+ }
+
+ ~PartialLoadStoreEliminationHelper() {
+ if (heap_refs_.empty()) {
+ return;
+ }
+ ReferenceTypePropagation rtp_fixup(GetGraph(),
+ Handle<mirror::ClassLoader>(),
+ Handle<mirror::DexCache>(),
+ /* is_first_run= */ false);
+ rtp_fixup.Visit(ArrayRef<HInstruction* const>(new_ref_phis_));
+ GetGraph()->ClearLoopInformation();
+ GetGraph()->ClearDominanceInformation();
+ GetGraph()->ClearReachabilityInformation();
+ GetGraph()->BuildDominatorTree();
+ GetGraph()->ComputeReachabilityInformation();
+ }
+
+ class IdxToHeapLoc {
+ public:
+ explicit IdxToHeapLoc(const HeapLocationCollector* hlc) : collector_(hlc) {}
+ HeapLocation* operator()(size_t idx) const {
+ return collector_->GetHeapLocation(idx);
+ }
+
+ private:
+ const HeapLocationCollector* collector_;
+ };
+
+
+ class HeapReferenceData {
+ public:
+ using LocIterator = IterationRange<TransformIterator<BitVector::IndexIterator, IdxToHeapLoc>>;
+ HeapReferenceData(PartialLoadStoreEliminationHelper* helper,
+ HNewInstance* new_inst,
+ const ExecutionSubgraph* subgraph,
+ ScopedArenaAllocator* alloc)
+ : new_instance_(new_inst),
+ helper_(helper),
+ heap_locs_(alloc,
+ helper->lse_->heap_location_collector_.GetNumberOfHeapLocations(),
+ /* expandable= */ false,
+ kArenaAllocLSE),
+ materializations_(
+ // We generally won't need to create too many materialization blocks and we can expand
+ // this as needed so just start off with 2x.
+ 2 * helper->lse_->GetGraph()->GetBlocks().size(),
+ nullptr,
+ alloc->Adapter(kArenaAllocLSE)),
+ collector_(helper->lse_->heap_location_collector_),
+ subgraph_(subgraph) {}
+
+ LocIterator IterateLocations() {
+ auto idxs = heap_locs_.Indexes();
+ return MakeTransformRange(idxs, IdxToHeapLoc(&collector_));
+ }
+
+ void AddHeapLocation(size_t idx) {
+ heap_locs_.SetBit(idx);
+ }
+
+ const ExecutionSubgraph* GetNoEscapeSubgraph() const {
+ return subgraph_;
+ }
+
+ bool IsPostEscape(HBasicBlock* blk) {
+ return std::any_of(
+ subgraph_->GetExcludedCohorts().cbegin(),
+ subgraph_->GetExcludedCohorts().cend(),
+ [&](const ExecutionSubgraph::ExcludedCohort& ec) { return ec.PrecedesBlock(blk); });
+ }
+
+ bool InEscapeCohort(HBasicBlock* blk) {
+ return std::any_of(
+ subgraph_->GetExcludedCohorts().cbegin(),
+ subgraph_->GetExcludedCohorts().cend(),
+ [&](const ExecutionSubgraph::ExcludedCohort& ec) { return ec.ContainsBlock(blk); });
+ }
+
+ bool BeforeAllEscapes(HBasicBlock* b) {
+ return std::none_of(subgraph_->GetExcludedCohorts().cbegin(),
+ subgraph_->GetExcludedCohorts().cend(),
+ [&](const ExecutionSubgraph::ExcludedCohort& ec) {
+ return ec.PrecedesBlock(b) || ec.ContainsBlock(b);
+ });
+ }
+
+ HNewInstance* OriginalNewInstance() const {
+ return new_instance_;
+ }
+
+ // Collect and replace all uses. We need to perform this twice since we will
+ // generate PHIs and additional uses as we create the default-values for
+ // pred-gets. These values might be other references that are also being
+ // partially eliminated. By running just the replacement part again we are
+ // able to avoid having to keep another whole in-progress partial map
+ // around. Since we will have already handled all the other uses in the
+ // first pass the second one will be quite fast.
+ void FixupUses(bool first_pass) {
+ ScopedArenaAllocator saa(GetGraph()->GetArenaStack());
+ // Replace uses with materialized values.
+ ScopedArenaVector<InstructionUse<HInstruction>> to_replace(saa.Adapter(kArenaAllocLSE));
+ ScopedArenaVector<HInstruction*> to_remove(saa.Adapter(kArenaAllocLSE));
+ // Do we need to add a constructor-fence.
+ ScopedArenaVector<InstructionUse<HConstructorFence>> constructor_fences(
+ saa.Adapter(kArenaAllocLSE));
+ ScopedArenaVector<InstructionUse<HInstruction>> to_predicate(saa.Adapter(kArenaAllocLSE));
+
+ CollectReplacements(to_replace, to_remove, constructor_fences, to_predicate);
+
+ if (!first_pass) {
+ // If another partial creates new references they can only be in Phis or pred-get defaults
+ // so they must be in the to_replace group.
+ DCHECK(to_predicate.empty());
+ DCHECK(constructor_fences.empty());
+ DCHECK(to_remove.empty());
+ }
+
+ ReplaceInput(to_replace);
+ RemoveInput(to_remove);
+ CreateConstructorFences(constructor_fences);
+ PredicateInstructions(to_predicate);
+
+ CHECK(OriginalNewInstance()->GetUses().empty())
+ << OriginalNewInstance()->GetUses() << ", " << OriginalNewInstance()->GetEnvUses();
+ }
+
+ void AddMaterialization(HBasicBlock* blk, HInstruction* ins) {
+ if (blk->GetBlockId() >= materializations_.size()) {
+ // Make sure the materialization array is large enough, try to avoid
+ // re-sizing too many times by giving extra space.
+ materializations_.resize(blk->GetBlockId() * 2, nullptr);
+ }
+ DCHECK(materializations_[blk->GetBlockId()] == nullptr)
+ << "Already have a materialization in block " << blk->GetBlockId() << ": "
+ << *materializations_[blk->GetBlockId()] << " when trying to set materialization to "
+ << *ins;
+ materializations_[blk->GetBlockId()] = ins;
+ LSE_VLOG << "In block " << blk->GetBlockId() << " materialization is " << *ins;
+ helper_->NotifyNewMaterialization(ins);
+ }
+
+ bool HasMaterialization(HBasicBlock* blk) const {
+ return blk->GetBlockId() < materializations_.size() &&
+ materializations_[blk->GetBlockId()] != nullptr;
+ }
+
+ HInstruction* GetMaterialization(HBasicBlock* blk) const {
+ if (materializations_.size() <= blk->GetBlockId() ||
+ materializations_[blk->GetBlockId()] == nullptr) {
+ // This must be a materialization block added after the partial LSE of
+ // the current reference finished. Since every edge can only have at
+ // most one materialization block added to it we can just check the
+ // blocks predecessor.
+ DCHECK(helper_->IsMaterializationBlock(blk));
+ blk = helper_->FindDominatingNonMaterializationBlock(blk);
+ DCHECK(!helper_->IsMaterializationBlock(blk));
+ }
+ DCHECK_GT(materializations_.size(), blk->GetBlockId());
+ DCHECK(materializations_[blk->GetBlockId()] != nullptr);
+ return materializations_[blk->GetBlockId()];
+ }
+
+ void GenerateMaterializationValueFromPredecessors(HBasicBlock* blk) {
+ DCHECK(std::none_of(GetNoEscapeSubgraph()->GetExcludedCohorts().begin(),
+ GetNoEscapeSubgraph()->GetExcludedCohorts().end(),
+ [&](const ExecutionSubgraph::ExcludedCohort& cohort) {
+ return cohort.IsEntryBlock(blk);
+ }));
+ DCHECK(!HasMaterialization(blk));
+ if (blk->IsExitBlock()) {
+ return;
+ } else if (blk->IsLoopHeader()) {
+ // See comment in execution_subgraph.h. Currently we act as though every
+ // allocation for partial elimination takes place in the entry block.
+ // This simplifies the analysis by making it so any escape cohort
+ // expands to contain any loops it is a part of. This is something that
+ // we should rectify at some point. In either case however we can still
+ // special case the loop-header since (1) currently the loop can't have
+ // any merges between different cohort entries since the pre-header will
+ // be the earliest place entry can happen and (2) even if the analysis
+ // is improved to consider lifetime of the object WRT loops any values
+ // which would require loop-phis would have to make the whole loop
+ // escape anyway.
+ // This all means we can always use value from the pre-header when the
+ // block is the loop-header and we didn't already create a
+ // materialization block. (NB when we do improve the analysis we will
+ // need to modify the materialization creation code to deal with this
+ // correctly.)
+ HInstruction* pre_header_val =
+ GetMaterialization(blk->GetLoopInformation()->GetPreHeader());
+ AddMaterialization(blk, pre_header_val);
+ return;
+ }
+ ScopedArenaAllocator saa(GetGraph()->GetArenaStack());
+ ScopedArenaVector<HInstruction*> pred_vals(saa.Adapter(kArenaAllocLSE));
+ pred_vals.reserve(blk->GetNumberOfPredecessors());
+ for (HBasicBlock* pred : blk->GetPredecessors()) {
+ DCHECK(HasMaterialization(pred));
+ pred_vals.push_back(GetMaterialization(pred));
+ }
+ GenerateMaterializationValueFromPredecessorsDirect(blk, pred_vals);
+ }
+
+ void GenerateMaterializationValueFromPredecessorsForEntry(
+ HBasicBlock* entry, const ScopedArenaVector<HInstruction*>& pred_vals) {
+ DCHECK(std::any_of(GetNoEscapeSubgraph()->GetExcludedCohorts().begin(),
+ GetNoEscapeSubgraph()->GetExcludedCohorts().end(),
+ [&](const ExecutionSubgraph::ExcludedCohort& cohort) {
+ return cohort.IsEntryBlock(entry);
+ }));
+ GenerateMaterializationValueFromPredecessorsDirect(entry, pred_vals);
+ }
+
+ private:
+ template <typename InstructionType>
+ struct InstructionUse {
+ InstructionType* instruction_;
+ size_t index_;
+ };
+
+ void ReplaceInput(const ScopedArenaVector<InstructionUse<HInstruction>>& to_replace) {
+ for (auto& [ins, idx] : to_replace) {
+ HInstruction* merged_inst = GetMaterialization(ins->GetBlock());
+ if (ins->IsPhi() && merged_inst->IsPhi() && ins->GetBlock() == merged_inst->GetBlock()) {
+ // Phis we just pass through the appropriate inputs.
+ ins->ReplaceInput(merged_inst->InputAt(idx), idx);
+ } else {
+ ins->ReplaceInput(merged_inst, idx);
+ }
+ }
+ }
+
+ void RemoveInput(const ScopedArenaVector<HInstruction*>& to_remove) {
+ for (HInstruction* ins : to_remove) {
+ if (ins->GetBlock() == nullptr) {
+ // Already dealt with.
+ continue;
+ }
+ DCHECK(BeforeAllEscapes(ins->GetBlock())) << *ins;
+ if (ins->IsInstanceFieldGet() || ins->IsInstanceFieldSet()) {
+ ins->GetBlock()->RemoveInstruction(ins);
+ } else {
+ // Can only be obj == other, obj != other, obj == obj (!?) or, obj != obj (!?)
+ // Since PHIs are escapes as far as LSE is concerned and we are before
+ // any escapes these are the only 4 options.
+ DCHECK(ins->IsEqual() || ins->IsNotEqual()) << *ins;
+ HInstruction* replacement;
+ if (UNLIKELY(ins->InputAt(0) == ins->InputAt(1))) {
+ replacement = ins->IsEqual() ? GetGraph()->GetIntConstant(1)
+ : GetGraph()->GetIntConstant(0);
+ } else {
+ replacement = ins->IsEqual() ? GetGraph()->GetIntConstant(0)
+ : GetGraph()->GetIntConstant(1);
+ }
+ ins->ReplaceWith(replacement);
+ ins->GetBlock()->RemoveInstruction(ins);
+ }
+ }
+ }
+
+ void CreateConstructorFences(
+ const ScopedArenaVector<InstructionUse<HConstructorFence>>& constructor_fences) {
+ if (!constructor_fences.empty()) {
+ uint32_t pc = constructor_fences.front().instruction_->GetDexPc();
+ for (auto& [cf, idx] : constructor_fences) {
+ if (cf->GetInputs().size() == 1) {
+ cf->GetBlock()->RemoveInstruction(cf);
+ } else {
+ cf->RemoveInputAt(idx);
+ }
+ }
+ for (const ExecutionSubgraph::ExcludedCohort& ec :
+ GetNoEscapeSubgraph()->GetExcludedCohorts()) {
+ for (HBasicBlock* blk : ec.EntryBlocks()) {
+ for (HBasicBlock* materializer :
+ Filter(MakeIterationRange(blk->GetPredecessors()),
+ [&](HBasicBlock* blk) { return helper_->IsMaterializationBlock(blk); })) {
+ HInstruction* new_cf = new (GetGraph()->GetAllocator()) HConstructorFence(
+ GetMaterialization(materializer), pc, GetGraph()->GetAllocator());
+ materializer->InsertInstructionBefore(new_cf, materializer->GetLastInstruction());
+ }
+ }
+ }
+ }
+ }
+
+ void PredicateInstructions(
+ const ScopedArenaVector<InstructionUse<HInstruction>>& to_predicate) {
+ for (auto& [ins, idx] : to_predicate) {
+ if (UNLIKELY(ins->GetBlock() == nullptr)) {
+ // Already handled due to obj == obj;
+ continue;
+ } else if (ins->IsInstanceFieldGet()) {
+ // IFieldGet[obj] => PredicatedIFieldGet[PartialValue, obj]
+ HInstruction* new_fget = new (GetGraph()->GetAllocator()) HPredicatedInstanceFieldGet(
+ ins->AsInstanceFieldGet(),
+ GetMaterialization(ins->GetBlock()),
+ helper_->lse_->GetPartialValueAt(OriginalNewInstance(), ins));
+ MaybeRecordStat(helper_->lse_->stats_, MethodCompilationStat::kPredicatedLoadAdded);
+ ins->GetBlock()->InsertInstructionBefore(new_fget, ins);
+ if (ins->GetType() == DataType::Type::kReference) {
+ // Reference info is the same
+ new_fget->SetReferenceTypeInfo(ins->GetReferenceTypeInfo());
+ }
+ ins->ReplaceWith(new_fget);
+ ins->ReplaceEnvUsesDominatedBy(ins, new_fget);
+ CHECK(ins->GetEnvUses().empty() && ins->GetUses().empty())
+ << "Instruction: " << *ins << " uses: " << ins->GetUses()
+ << ", env: " << ins->GetEnvUses();
+ ins->GetBlock()->RemoveInstruction(ins);
+ } else if (ins->IsInstanceFieldSet()) {
+ // Any predicated sets shouldn't require movement.
+ ins->AsInstanceFieldSet()->SetIsPredicatedSet();
+ MaybeRecordStat(helper_->lse_->stats_, MethodCompilationStat::kPredicatedStoreAdded);
+ HInstruction* merged_inst = GetMaterialization(ins->GetBlock());
+ ins->ReplaceInput(merged_inst, idx);
+ } else {
+ // comparisons need to be split into 2.
+ DCHECK(ins->IsEqual() || ins->IsNotEqual()) << "bad instruction " << *ins;
+ bool this_is_first = idx == 0;
+ if (ins->InputAt(0) == ins->InputAt(1)) {
+ // This is a obj == obj or obj != obj.
+ // No idea why anyone would do this but whatever.
+ ins->ReplaceWith(GetGraph()->GetIntConstant(ins->IsEqual() ? 1 : 0));
+ ins->GetBlock()->RemoveInstruction(ins);
+ continue;
+ } else {
+ HInstruction* is_escaped = new (GetGraph()->GetAllocator())
+ HNotEqual(GetMaterialization(ins->GetBlock()), GetGraph()->GetNullConstant());
+ HInstruction* combine_inst =
+ ins->IsEqual() ? static_cast<HInstruction*>(new (GetGraph()->GetAllocator()) HAnd(
+ DataType::Type::kBool, is_escaped, ins))
+ : static_cast<HInstruction*>(new (GetGraph()->GetAllocator()) HOr(
+ DataType::Type::kBool, is_escaped, ins));
+ ins->ReplaceInput(GetMaterialization(ins->GetBlock()), this_is_first ? 0 : 1);
+ ins->GetBlock()->InsertInstructionBefore(is_escaped, ins);
+ ins->GetBlock()->InsertInstructionAfter(combine_inst, ins);
+ ins->ReplaceWith(combine_inst);
+ combine_inst->ReplaceInput(ins, 1);
+ }
+ }
+ }
+ }
+
+ // Figure out all the instructions we need to
+ // fixup/replace/remove/duplicate. Since this requires an iteration of an
+ // intrusive linked list we want to do it only once and collect all the data
+ // here.
+ void CollectReplacements(
+ ScopedArenaVector<InstructionUse<HInstruction>>& to_replace,
+ ScopedArenaVector<HInstruction*>& to_remove,
+ ScopedArenaVector<InstructionUse<HConstructorFence>>& constructor_fences,
+ ScopedArenaVector<InstructionUse<HInstruction>>& to_predicate) {
+ size_t size = new_instance_->GetUses().SizeSlow();
+ to_replace.reserve(size);
+ to_remove.reserve(size);
+ constructor_fences.reserve(size);
+ to_predicate.reserve(size);
+ for (auto& use : new_instance_->GetUses()) {
+ HBasicBlock* blk =
+ helper_->FindDominatingNonMaterializationBlock(use.GetUser()->GetBlock());
+ if (InEscapeCohort(blk)) {
+ LSE_VLOG << "Replacing " << *new_instance_ << " use in " << *use.GetUser() << " with "
+ << *GetMaterialization(blk);
+ to_replace.push_back({use.GetUser(), use.GetIndex()});
+ } else if (IsPostEscape(blk)) {
+ LSE_VLOG << "User " << *use.GetUser() << " after escapes!";
+ // The fields + cmp are normal uses. Phi can only be here if it was
+ // generated by full LSE so whatever store+load that created the phi
+ // is the escape.
+ if (use.GetUser()->IsPhi()) {
+ to_replace.push_back({use.GetUser(), use.GetIndex()});
+ } else {
+ DCHECK(use.GetUser()->IsFieldAccess() ||
+ use.GetUser()->IsEqual() ||
+ use.GetUser()->IsNotEqual())
+ << *use.GetUser() << "@" << use.GetIndex();
+ to_predicate.push_back({use.GetUser(), use.GetIndex()});
+ }
+ } else if (use.GetUser()->IsConstructorFence()) {
+ LSE_VLOG << "User " << *use.GetUser() << " being moved to materialization!";
+ constructor_fences.push_back({use.GetUser()->AsConstructorFence(), use.GetIndex()});
+ } else {
+ LSE_VLOG << "User " << *use.GetUser() << " not contained in cohort!";
+ to_remove.push_back(use.GetUser());
+ }
+ }
+ DCHECK_EQ(
+ to_replace.size() + to_remove.size() + constructor_fences.size() + to_predicate.size(),
+ size);
+ }
+
+ void GenerateMaterializationValueFromPredecessorsDirect(
+ HBasicBlock* blk, const ScopedArenaVector<HInstruction*>& pred_vals) {
+ DCHECK(!pred_vals.empty());
+ bool all_equal = std::all_of(pred_vals.begin() + 1, pred_vals.end(), [&](HInstruction* val) {
+ return val == pred_vals.front();
+ });
+ if (LIKELY(all_equal)) {
+ AddMaterialization(blk, pred_vals.front());
+ } else {
+ // Make a PHI for the predecessors.
+ HPhi* phi = new (GetGraph()->GetAllocator()) HPhi(
+ GetGraph()->GetAllocator(), kNoRegNumber, pred_vals.size(), DataType::Type::kReference);
+ for (const auto& [ins, off] : ZipCount(MakeIterationRange(pred_vals))) {
+ phi->SetRawInputAt(off, ins);
+ }
+ blk->AddPhi(phi);
+ AddMaterialization(blk, phi);
+ }
+ }
+
+ HGraph* GetGraph() const {
+ return helper_->GetGraph();
+ }
+
+ HNewInstance* new_instance_;
+ PartialLoadStoreEliminationHelper* helper_;
+ ArenaBitVector heap_locs_;
+ ScopedArenaVector<HInstruction*> materializations_;
+ const HeapLocationCollector& collector_;
+ const ExecutionSubgraph* subgraph_;
+ };
+
+ ArrayRef<HeapReferenceData> GetHeapRefs() {
+ return ArrayRef<HeapReferenceData>(heap_refs_);
+ }
+
+ bool IsMaterializationBlock(HBasicBlock* blk) const {
+ return blk->GetBlockId() >= first_materialization_block_id_;
+ }
+
+ HBasicBlock* GetOrCreateMaterializationBlock(HBasicBlock* entry, size_t pred_num) {
+ size_t idx = GetMaterializationBlockIndex(entry, pred_num);
+ HBasicBlock* blk = materialization_blocks_[idx];
+ if (blk == nullptr) {
+ blk = new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph());
+ GetGraph()->AddBlock(blk);
+ LSE_VLOG << "creating materialization block " << blk->GetBlockId() << " on edge "
+ << entry->GetPredecessors()[pred_num]->GetBlockId() << "->" << entry->GetBlockId();
+ blk->AddInstruction(new (GetGraph()->GetAllocator()) HGoto());
+ materialization_blocks_[idx] = blk;
+ }
+ return blk;
+ }
+
+ HBasicBlock* GetMaterializationBlock(HBasicBlock* entry, size_t pred_num) {
+ HBasicBlock* out = materialization_blocks_[GetMaterializationBlockIndex(entry, pred_num)];
+ DCHECK(out != nullptr) << "No materialization block for edge " << entry->GetBlockId() << "->"
+ << entry->GetPredecessors()[pred_num]->GetBlockId();
+ return out;
+ }
+
+ IterationRange<ArenaVector<HBasicBlock*>::const_iterator> IterateMaterializationBlocks() {
+ return MakeIterationRange(GetGraph()->GetBlocks().begin() + first_materialization_block_id_,
+ GetGraph()->GetBlocks().end());
+ }
+
+ void FixupPartialObjectUsers() {
+ for (PartialLoadStoreEliminationHelper::HeapReferenceData& ref_data : GetHeapRefs()) {
+ // Use the materialized instances to replace original instance
+ ref_data.FixupUses(/*first_pass=*/true);
+ CHECK(ref_data.OriginalNewInstance()->GetUses().empty())
+ << ref_data.OriginalNewInstance()->GetUses() << ", "
+ << ref_data.OriginalNewInstance()->GetEnvUses();
+ }
+ // This can cause new uses to be created due to the creation of phis/pred-get defaults
+ for (PartialLoadStoreEliminationHelper::HeapReferenceData& ref_data : GetHeapRefs()) {
+ // Only need to handle new phis/pred-get defaults. DCHECK that's all we find.
+ ref_data.FixupUses(/*first_pass=*/false);
+ CHECK(ref_data.OriginalNewInstance()->GetUses().empty())
+ << ref_data.OriginalNewInstance()->GetUses() << ", "
+ << ref_data.OriginalNewInstance()->GetEnvUses();
+ }
+ }
+
+ // Finds the first block which either is or dominates the given block which is
+ // not a materialization block
+ HBasicBlock* FindDominatingNonMaterializationBlock(HBasicBlock* blk) {
+ if (LIKELY(!IsMaterializationBlock(blk))) {
+ // Not a materialization block so itself.
+ return blk;
+ } else if (blk->GetNumberOfPredecessors() != 0) {
+ // We're far enough along that the materialization blocks have been
+ // inserted into the graph so no need to go searching.
+ return blk->GetSinglePredecessor();
+ }
+ // Search through the materialization blocks to find where it will be
+ // inserted.
+ for (auto [mat, idx] : ZipCount(MakeIterationRange(materialization_blocks_))) {
+ if (mat == blk) {
+ size_t cur_pred_idx = idx % max_preds_per_block_;
+ HBasicBlock* entry = GetGraph()->GetBlocks()[idx / max_preds_per_block_];
+ return entry->GetPredecessors()[cur_pred_idx];
+ }
+ }
+ LOG(FATAL) << "Unable to find materialization block position for " << blk->GetBlockId() << "!";
+ return nullptr;
+ }
+
+ void InsertMaterializationBlocks() {
+ for (auto [mat, idx] : ZipCount(MakeIterationRange(materialization_blocks_))) {
+ if (mat == nullptr) {
+ continue;
+ }
+ size_t cur_pred_idx = idx % max_preds_per_block_;
+ HBasicBlock* entry = GetGraph()->GetBlocks()[idx / max_preds_per_block_];
+ HBasicBlock* pred = entry->GetPredecessors()[cur_pred_idx];
+ mat->InsertBetween(pred, entry);
+ LSE_VLOG << "Adding materialization block " << mat->GetBlockId() << " on edge "
+ << pred->GetBlockId() << "->" << entry->GetBlockId();
+ }
+ }
+
+ // Replace any env-uses remaining of the partial singletons with the
+ // appropriate phis and remove the instructions.
+ void RemoveReplacedInstructions() {
+ for (HeapReferenceData& ref_data : GetHeapRefs()) {
+ CHECK(ref_data.OriginalNewInstance()->GetUses().empty())
+ << ref_data.OriginalNewInstance()->GetUses() << ", "
+ << ref_data.OriginalNewInstance()->GetEnvUses()
+ << " inst is: " << ref_data.OriginalNewInstance();
+ const auto& env_uses = ref_data.OriginalNewInstance()->GetEnvUses();
+ while (!env_uses.empty()) {
+ const HUseListNode<HEnvironment*>& use = env_uses.front();
+ HInstruction* merged_inst =
+ ref_data.GetMaterialization(use.GetUser()->GetHolder()->GetBlock());
+ LSE_VLOG << "Replacing env use of " << *use.GetUser()->GetHolder() << "@" << use.GetIndex()
+ << " with " << *merged_inst;
+ use.GetUser()->ReplaceInput(merged_inst, use.GetIndex());
+ }
+ ref_data.OriginalNewInstance()->GetBlock()->RemoveInstruction(ref_data.OriginalNewInstance());
+ }
+ }
+
+ // We need to make sure any allocations dominate their environment uses.
+ // Technically we could probably remove the env-uses and be fine but this is easy.
+ void ReorderMaterializationsForEnvDominance() {
+ for (HBasicBlock* blk : IterateMaterializationBlocks()) {
+ ScopedArenaAllocator alloc(alloc_->GetArenaStack());
+ ArenaBitVector still_unsorted(
+ &alloc, GetGraph()->GetCurrentInstructionId(), false, kArenaAllocLSE);
+ // This is guaranteed to be very short (since we will abandon LSE if there
+ // are >= kMaxNumberOfHeapLocations (32) heap locations so that is the
+ // absolute maximum size this list can be) so doing a selection sort is
+ // fine. This avoids the need to do a complicated recursive check to
+ // ensure transitivity for std::sort.
+ ScopedArenaVector<HNewInstance*> materializations(alloc.Adapter(kArenaAllocLSE));
+ materializations.reserve(GetHeapRefs().size());
+ for (HInstruction* ins :
+ MakeSTLInstructionIteratorRange(HInstructionIterator(blk->GetInstructions()))) {
+ if (ins->IsNewInstance()) {
+ materializations.push_back(ins->AsNewInstance());
+ still_unsorted.SetBit(ins->GetId());
+ }
+ }
+ using Iter = ScopedArenaVector<HNewInstance*>::iterator;
+ Iter unsorted_start = materializations.begin();
+ Iter unsorted_end = materializations.end();
+ // selection sort. Required since the only check we can easily perform a
+ // is-before-all-unsorted check.
+ while (unsorted_start != unsorted_end) {
+ bool found_instruction = false;
+ for (Iter candidate = unsorted_start; candidate != unsorted_end; ++candidate) {
+ HNewInstance* ni = *candidate;
+ if (std::none_of(ni->GetAllEnvironments().cbegin(),
+ ni->GetAllEnvironments().cend(),
+ [&](const HEnvironment* env) {
+ return std::any_of(
+ env->GetEnvInputs().cbegin(),
+ env->GetEnvInputs().cend(),
+ [&](const HInstruction* env_element) {
+ return env_element != nullptr &&
+ still_unsorted.IsBitSet(env_element->GetId());
+ });
+ })) {
+ still_unsorted.ClearBit(ni->GetId());
+ std::swap(*unsorted_start, *candidate);
+ ++unsorted_start;
+ found_instruction = true;
+ break;
+ }
+ }
+ CHECK(found_instruction) << "Unable to select next materialization instruction."
+ << " Environments have a dependency loop!";
+ }
+ // Reverse so we as we prepend them we end up with the correct order.
+ auto reverse_iter = MakeIterationRange(materializations.rbegin(), materializations.rend());
+ for (HNewInstance* ins : reverse_iter) {
+ if (blk->GetFirstInstruction() != ins) {
+ // Don't do checks since that makes sure the move is safe WRT
+ // ins->CanBeMoved which for NewInstance is false.
+ ins->MoveBefore(blk->GetFirstInstruction(), /*do_checks=*/false);
+ }
+ }
+ }
+ }
+
+ private:
+ void CollectInterestingHeapRefs() {
+ // Get all the partials we need to move around.
+ for (size_t i = 0; i < lse_->heap_location_collector_.GetNumberOfHeapLocations(); ++i) {
+ ReferenceInfo* ri = lse_->heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
+ if (ri->IsPartialSingleton() &&
+ ri->GetReference()->GetBlock() != nullptr &&
+ ri->GetNoEscapeSubgraph()->ContainsBlock(ri->GetReference()->GetBlock())) {
+ RecordHeapRefField(ri->GetReference()->AsNewInstance(), i);
+ }
+ }
+ }
+
+ void RecordHeapRefField(HNewInstance* ni, size_t loc) {
+ DCHECK(ni != nullptr);
+ // This is likely to be very short so just do a linear search.
+ auto it = std::find_if(heap_refs_.begin(), heap_refs_.end(), [&](HeapReferenceData& data) {
+ return data.OriginalNewInstance() == ni;
+ });
+ HeapReferenceData& cur_ref =
+ (it == heap_refs_.end())
+ ? heap_refs_.emplace_back(this,
+ ni,
+ lse_->heap_location_collector_.GetHeapLocation(loc)
+ ->GetReferenceInfo()
+ ->GetNoEscapeSubgraph(),
+ alloc_)
+ : *it;
+ cur_ref.AddHeapLocation(loc);
+ }
+
+
+ void NotifyNewMaterialization(HInstruction* ins) {
+ if (ins->IsPhi()) {
+ new_ref_phis_.push_back(ins->AsPhi());
+ }
+ }
+
+ size_t GetMaterializationBlockIndex(HBasicBlock* blk, size_t pred_num) const {
+ DCHECK_LT(blk->GetBlockId(), first_materialization_block_id_)
+ << "block is a materialization block!";
+ DCHECK_LT(pred_num, max_preds_per_block_);
+ return blk->GetBlockId() * max_preds_per_block_ + pred_num;
+ }
+
+ HGraph* GetGraph() const {
+ return lse_->GetGraph();
+ }
+
+ LSEVisitor* lse_;
+ ScopedArenaAllocator* alloc_;
+ ScopedArenaVector<HInstruction*> new_ref_phis_;
+ ScopedArenaVector<HeapReferenceData> heap_refs_;
+ size_t max_preds_per_block_;
+ // An array of (# of non-materialization blocks) * max_preds_per_block
+ // arranged in block-id major order. Since we can only have at most one
+ // materialization block on each edge this is the maximum possible number of
+ // materialization blocks.
+ ScopedArenaVector<HBasicBlock*> materialization_blocks_;
+ size_t first_materialization_block_id_;
+
+ friend void LSEVisitor::MovePartialEscapes();
+ friend class HeapReferenceData;
+};
+
+// Work around c++ type checking annoyances with not being able to forward-declare inner types.
+class HeapRefHolder
+ : public std::reference_wrapper<PartialLoadStoreEliminationHelper::HeapReferenceData> {};
+
+HInstruction* LSEVisitor::SetupPartialMaterialization(PartialLoadStoreEliminationHelper& helper,
+ HeapRefHolder&& holder,
+ size_t pred_idx,
+ HBasicBlock* entry) {
+ PartialLoadStoreEliminationHelper::HeapReferenceData& ref_data = holder.get();
+ HBasicBlock* old_pred = entry->GetPredecessors()[pred_idx];
+ HInstruction* new_inst = ref_data.OriginalNewInstance();
+ if (UNLIKELY(!new_inst->GetBlock()->Dominates(entry))) {
+ LSE_VLOG << "Initial materialization in non-dominating block " << entry->GetBlockId()
+ << " is null!";
+ return GetGraph()->GetNullConstant();
+ }
+ HBasicBlock* bb = helper.GetOrCreateMaterializationBlock(entry, pred_idx);
+ CHECK(bb != nullptr) << "entry " << entry->GetBlockId() << " -> " << old_pred->GetBlockId();
+ HNewInstance* repl_create = new_inst->Clone(GetGraph()->GetAllocator())->AsNewInstance();
+ repl_create->SetPartialMaterialization();
+ bb->InsertInstructionBefore(repl_create, bb->GetLastInstruction());
+ repl_create->CopyEnvironmentFrom(new_inst->GetEnvironment());
+ MaybeRecordStat(stats_, MethodCompilationStat::kPartialAllocationMoved);
+ LSE_VLOG << "In blk " << bb->GetBlockId() << " initial materialization is " << *repl_create;
+ ref_data.AddMaterialization(bb, repl_create);
+ const FieldInfo* info = nullptr;
+ for (const HeapLocation* loc : ref_data.IterateLocations()) {
+ size_t loc_off = heap_location_collector_.GetHeapLocationIndex(loc);
+ info = field_infos_[loc_off];
+ DCHECK(loc->GetIndex() == nullptr);
+ Value value = ReplacementOrValue(heap_values_for_[old_pred->GetBlockId()][loc_off].value);
+ if (value.NeedsLoopPhi()) {
+ Value repl = phi_placeholder_replacements_[PhiPlaceholderIndex(value.GetPhiPlaceholder())];
+ DCHECK(!repl.IsUnknown());
+ DCHECK(repl.IsDefault() || repl.IsInvalid() || repl.IsInstruction())
+ << repl << " from " << value << " pred is " << old_pred->GetBlockId();
+ if (!repl.IsInvalid()) {
+ value = repl;
+ } else {
+ FullyMaterializePhi(value.GetPhiPlaceholder(), info->GetFieldType());
+ value = phi_placeholder_replacements_[PhiPlaceholderIndex(value.GetPhiPlaceholder())];
+ }
+ } else if (value.NeedsNonLoopPhi()) {
+ Value repl = phi_placeholder_replacements_[PhiPlaceholderIndex(value.GetPhiPlaceholder())];
+ DCHECK(repl.IsDefault() || repl.IsInvalid() || repl.IsInstruction())
+ << repl << " from " << value << " pred is " << old_pred->GetBlockId();
+ if (!repl.IsInvalid()) {
+ value = repl;
+ } else {
+ MaterializeNonLoopPhis(value.GetPhiPlaceholder(), info->GetFieldType());
+ value = phi_placeholder_replacements_[PhiPlaceholderIndex(value.GetPhiPlaceholder())];
+ }
+ }
+ DCHECK(value.IsDefault() || value.IsInstruction())
+ << GetGraph()->PrettyMethod() << ": " << value;
+
+ if (!value.IsDefault() &&
+ // shadow$_klass_ doesn't need to be manually initialized.
+ MemberOffset(loc->GetOffset()) != mirror::Object::ClassOffset()) {
+ CHECK(info != nullptr);
+ HInstruction* set_value =
+ new (GetGraph()->GetAllocator()) HInstanceFieldSet(repl_create,
+ value.GetInstruction(),
+ field_infos_[loc_off]->GetField(),
+ loc->GetType(),
+ MemberOffset(loc->GetOffset()),
+ false,
+ field_infos_[loc_off]->GetFieldIndex(),
+ loc->GetDeclaringClassDefIndex(),
+ field_infos_[loc_off]->GetDexFile(),
+ 0u);
+ bb->InsertInstructionAfter(set_value, repl_create);
+ LSE_VLOG << "Adding " << *set_value << " for materialization setup!";
+ }
+ }
+ return repl_create;
+}
+
+HInstruction* LSEVisitor::GetPartialValueAt(HNewInstance* orig_new_inst, HInstruction* read) {
+ size_t loc = heap_location_collector_.GetFieldHeapLocation(orig_new_inst, &read->GetFieldInfo());
+ Value pred = ReplacementOrValue(intermediate_values_.find(read)->second);
+ LSE_VLOG << "using " << pred << " as default value for " << *read;
+ if (pred.IsInstruction()) {
+ return pred.GetInstruction();
+ } else if (pred.IsMergedUnknown() || pred.NeedsPhi()) {
+ FullyMaterializePhi(pred.GetPhiPlaceholder(),
+ heap_location_collector_.GetHeapLocation(loc)->GetType());
+ HInstruction* res = Replacement(pred).GetInstruction();
+ LSE_VLOG << pred << " materialized to " << res->DumpWithArgs();
+ return res;
+ }
+ LOG(FATAL) << "Unable to find unescaped value at " << read->DumpWithArgs()
+ << "! This should be impossible!";
+ UNREACHABLE();
+}
+
+void LSEVisitor::MovePartialEscapes() {
+ if (!ShouldPerformPartialLSE()) {
+ return;
+ }
+
+ ScopedArenaAllocator saa(allocator_.GetArenaStack());
+ PartialLoadStoreEliminationHelper helper(this, &saa);
+
+ // Since for PHIs we now will have more information (since we know the object
+ // hasn't escaped) we need to clear the old phi-replacements where we weren't
+ // able to find the value.
+ PrepareForPartialPhiComputation();
+
+ for (PartialLoadStoreEliminationHelper::HeapReferenceData& ref_data : helper.GetHeapRefs()) {
+ LSE_VLOG << "Creating materializations for " << *ref_data.OriginalNewInstance();
+ // Setup entry and exit blocks.
+ for (const auto& excluded_cohort : ref_data.GetNoEscapeSubgraph()->GetExcludedCohorts()) {
+ // Setup materialization blocks.
+ for (HBasicBlock* entry : excluded_cohort.EntryBlocksReversePostOrder()) {
+ // Setup entries.
+ // TODO Assuming we correctly break critical edges every entry block
+ // must have only a single predecessor so we could just put all this
+ // stuff in there. OTOH simplifier can do it for us and this is simpler
+ // to implement - giving clean separation between the original graph and
+ // materialization blocks - so for now we might as well have these new
+ // blocks.
+ ScopedArenaAllocator pred_alloc(saa.GetArenaStack());
+ ScopedArenaVector<HInstruction*> pred_vals(pred_alloc.Adapter(kArenaAllocLSE));
+ pred_vals.reserve(entry->GetNumberOfPredecessors());
+ for (const auto& [pred, pred_idx] :
+ ZipCount(MakeIterationRange(entry->GetPredecessors()))) {
+ DCHECK(!helper.IsMaterializationBlock(pred));
+ if (excluded_cohort.IsEntryBlock(pred)) {
+ pred_vals.push_back(ref_data.GetMaterialization(pred));
+ continue;
+ } else {
+ pred_vals.push_back(SetupPartialMaterialization(helper, {ref_data}, pred_idx, entry));
+ }
+ }
+ ref_data.GenerateMaterializationValueFromPredecessorsForEntry(entry, pred_vals);
+ }
+
+ // Setup exit block heap-values for later phi-generation.
+ for (HBasicBlock* exit : excluded_cohort.ExitBlocks()) {
+ // mark every exit of cohorts as having a value so we can easily
+ // materialize the PHIs.
+ // TODO By setting this we can easily use the normal MaterializeLoopPhis
+ // (via FullyMaterializePhis) in order to generate the default-values
+ // for predicated-gets. This has the unfortunate side effect of creating
+ // somewhat more phis than are really needed (in some cases). We really
+ // should try to eventually know that we can lower these PHIs to only
+ // the non-escaping value in cases where it is possible. Currently this
+ // is done to some extent in instruction_simplifier but we have more
+ // information here to do the right thing.
+ for (const HeapLocation* loc : ref_data.IterateLocations()) {
+ size_t loc_off = heap_location_collector_.GetHeapLocationIndex(loc);
+ // This Value::Default() is only used to fill in PHIs used as the
+ // default value for PredicatedInstanceFieldGets. The actual value
+ // stored there is meaningless since the Predicated-iget will use the
+ // actual field value instead on these paths.
+ heap_values_for_[exit->GetBlockId()][loc_off].value = Value::Default();
+ }
+ }
+ }
+
+ // string materialization through the graph.
+ // // Visit RPO to PHI the materialized object through the cohort.
+ for (HBasicBlock* blk : GetGraph()->GetReversePostOrder()) {
+ // NB This doesn't include materialization blocks.
+ DCHECK(!helper.IsMaterializationBlock(blk))
+ << "Materialization blocks should not be in RPO yet.";
+ if (ref_data.HasMaterialization(blk)) {
+ continue;
+ } else if (ref_data.BeforeAllEscapes(blk)) {
+ ref_data.AddMaterialization(blk, GetGraph()->GetNullConstant());
+ continue;
+ } else {
+ ref_data.GenerateMaterializationValueFromPredecessors(blk);
+ }
+ }
+ }
+
+ // Once we've generated all the materializations we can update the users.
+ helper.FixupPartialObjectUsers();
+
+ // Actually put materialization blocks into the graph
+ helper.InsertMaterializationBlocks();
+
+ // Get rid of the original instructions.
+ helper.RemoveReplacedInstructions();
+
+ // Ensure everything is ordered correctly in the materialization blocks. This
+ // involves moving every NewInstance to the top and ordering them so that any
+ // required env-uses are correctly ordered.
+ helper.ReorderMaterializationsForEnvDominance();
+}
+
+void LSEVisitor::FinishFullLSE() {
// Remove recorded load instructions that should be eliminated.
for (const LoadStoreRecord& record : loads_and_stores_) {
size_t id = dchecked_integral_cast<size_t>(record.load_or_store->GetId());
@@ -2505,7 +3758,7 @@
}
}
-bool LoadStoreElimination::Run() {
+bool LoadStoreElimination::Run(bool enable_partial_lse) {
if (graph_->IsDebuggable() || graph_->HasTryCatch()) {
// Debugger may set heap values or trigger deoptimization of callers.
// Try/catch support not implemented yet.
@@ -2519,7 +3772,11 @@
// O(1) though.
graph_->ComputeReachabilityInformation();
ScopedArenaAllocator allocator(graph_->GetArenaStack());
- LoadStoreAnalysis lsa(graph_, stats_, &allocator, /*for_elimination=*/true);
+ LoadStoreAnalysis lsa(graph_,
+ stats_,
+ &allocator,
+ enable_partial_lse ? LoadStoreAnalysisType::kFull
+ : LoadStoreAnalysisType::kNoPredicatedInstructions);
lsa.Run();
const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
if (heap_location_collector.GetNumberOfHeapLocations() == 0) {
@@ -2527,9 +3784,11 @@
return false;
}
- LSEVisitor lse_visitor(graph_, heap_location_collector, stats_);
+ LSEVisitor lse_visitor(graph_, heap_location_collector, enable_partial_lse, stats_);
lse_visitor.Run();
return true;
}
+#undef LSE_VLOG
+
} // namespace art
diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h
index 60c547c..e73ef5e 100644
--- a/compiler/optimizing/load_store_elimination.h
+++ b/compiler/optimizing/load_store_elimination.h
@@ -25,12 +25,24 @@
class LoadStoreElimination : public HOptimization {
public:
+ // Whether or not we should attempt partial Load-store-elimination which
+ // requires additional blocks and predicated instructions.
+ static constexpr bool kEnablePartialLSE = true;
+
+ // Controls whether to enable VLOG(compiler) logs explaining the transforms taking place.
+ static constexpr bool kVerboseLoggingMode = false;
+
LoadStoreElimination(HGraph* graph,
OptimizingCompilerStats* stats,
const char* name = kLoadStoreEliminationPassName)
: HOptimization(graph, name, stats) {}
- bool Run() override;
+ bool Run() override {
+ return Run(kEnablePartialLSE);
+ }
+
+ // Exposed for testing.
+ bool Run(bool enable_partial_lse);
static constexpr const char* kLoadStoreEliminationPassName = "load_store_elimination";
diff --git a/compiler/optimizing/load_store_elimination_test.cc b/compiler/optimizing/load_store_elimination_test.cc
index 9904192..9b000a1 100644
--- a/compiler/optimizing/load_store_elimination_test.cc
+++ b/compiler/optimizing/load_store_elimination_test.cc
@@ -14,38 +14,99 @@
* limitations under the License.
*/
-#include <tuple>
+#include "load_store_elimination.h"
+#include <initializer_list>
+#include <memory>
+#include <tuple>
+#include <variant>
+
+#include "base/iteration_range.h"
#include "compilation_kind.h"
+#include "dex/dex_file_types.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gtest/gtest.h"
#include "handle_scope.h"
#include "load_store_analysis.h"
-#include "load_store_elimination.h"
#include "nodes.h"
+#include "optimizing/data_type.h"
+#include "optimizing/instruction_simplifier.h"
+#include "optimizing/optimizing_compiler_stats.h"
#include "optimizing_unit_test.h"
-
-#include "gtest/gtest.h"
+#include "scoped_thread_state_change.h"
namespace art {
-class LoadStoreEliminationTest : public OptimizingUnitTest {
+struct InstructionDumper {
public:
- AdjacencyListGraph SetupFromAdjacencyList(
- const std::string_view entry_name,
- const std::string_view exit_name,
- const std::vector<AdjacencyListGraph::Edge>& adj) {
+ HInstruction* ins_;
+};
+
+bool operator==(const InstructionDumper& a, const InstructionDumper& b) {
+ return a.ins_ == b.ins_;
+}
+bool operator!=(const InstructionDumper& a, const InstructionDumper& b) {
+ return !(a == b);
+}
+
+std::ostream& operator<<(std::ostream& os, const InstructionDumper& id) {
+ if (id.ins_ == nullptr) {
+ return os << "NULL";
+ } else {
+ return os << "(" << id.ins_ << "): " << id.ins_->DumpWithArgs();
+ }
+}
+
+#define CHECK_SUBROUTINE_FAILURE() \
+ do { \
+ if (HasFatalFailure()) { \
+ return; \
+ } \
+ } while (false)
+
+#define EXPECT_INS_EQ(a, b) EXPECT_EQ(InstructionDumper{a}, InstructionDumper{b})
+#define EXPECT_INS_REMOVED(a) EXPECT_TRUE(IsRemoved(a)) << "Not removed: " << (InstructionDumper{a})
+#define EXPECT_INS_RETAINED(a) EXPECT_FALSE(IsRemoved(a)) << "Removed: " << (InstructionDumper{a})
+#define ASSERT_INS_EQ(a, b) ASSERT_EQ(InstructionDumper{a}, InstructionDumper{b})
+#define ASSERT_INS_REMOVED(a) ASSERT_TRUE(IsRemoved(a)) << "Not removed: " << (InstructionDumper{a})
+#define ASSERT_INS_RETAINED(a) ASSERT_FALSE(IsRemoved(a)) << "Removed: " << (InstructionDumper{a})
+
+template <typename SuperTest>
+class LoadStoreEliminationTestBase : public SuperTest, public OptimizingUnitTestHelper {
+ public:
+ void SetUp() override {
+ SuperTest::SetUp();
+ gLogVerbosity.compiler = true;
+ }
+
+ void TearDown() override {
+ SuperTest::TearDown();
+ gLogVerbosity.compiler = false;
+ }
+
+ AdjacencyListGraph SetupFromAdjacencyList(const std::string_view entry_name,
+ const std::string_view exit_name,
+ const std::vector<AdjacencyListGraph::Edge>& adj) {
return AdjacencyListGraph(graph_, GetAllocator(), entry_name, exit_name, adj);
}
- void PerformLSE() {
+ void PerformLSE(bool with_partial = true) {
graph_->BuildDominatorTree();
- LoadStoreElimination lse(graph_, /*stats=*/ nullptr);
- lse.Run();
+ LoadStoreElimination lse(graph_, /*stats=*/nullptr);
+ lse.Run(with_partial);
std::ostringstream oss;
EXPECT_TRUE(CheckGraphSkipRefTypeInfoChecks(oss)) << oss.str();
}
+ void PerformLSEWithPartial() {
+ PerformLSE(true);
+ }
+
+ void PerformLSENoPartial() {
+ PerformLSE(false);
+ }
+
// Create instructions shared among tests.
void CreateEntryBlockInstructions() {
HInstruction* c1 = graph_->GetIntConstant(1);
@@ -108,9 +169,7 @@
}
void CreateEnvForSuspendCheck() {
- ArenaVector<HInstruction*> current_locals({array_, i_, j_},
- GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(suspend_check_, ¤t_locals);
+ ManuallyBuildEnvFor(suspend_check_, {array_, i_, j_});
}
// Create the diamond-shaped CFG:
@@ -153,15 +212,15 @@
DCHECK(block != nullptr);
DCHECK(array != nullptr);
DCHECK(index != nullptr);
- HInstruction* vload = new (GetAllocator()) HVecLoad(
- GetAllocator(),
- array,
- index,
- DataType::Type::kInt32,
- SideEffects::ArrayReadOfType(DataType::Type::kInt32),
- 4,
- /*is_string_char_at*/ false,
- kNoDexPc);
+ HInstruction* vload =
+ new (GetAllocator()) HVecLoad(GetAllocator(),
+ array,
+ index,
+ DataType::Type::kInt32,
+ SideEffects::ArrayReadOfType(DataType::Type::kInt32),
+ 4,
+ /*is_string_char_at*/ false,
+ kNoDexPc);
block->InsertInstructionBefore(vload, block->GetLastInstruction());
return vload;
}
@@ -179,22 +238,19 @@
DCHECK(index != nullptr);
if (vdata == nullptr) {
HInstruction* c1 = graph_->GetIntConstant(1);
- vdata = new (GetAllocator()) HVecReplicateScalar(GetAllocator(),
- c1,
- DataType::Type::kInt32,
- 4,
- kNoDexPc);
+ vdata = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), c1, DataType::Type::kInt32, 4, kNoDexPc);
block->InsertInstructionBefore(vdata, block->GetLastInstruction());
}
- HInstruction* vstore = new (GetAllocator()) HVecStore(
- GetAllocator(),
- array,
- index,
- vdata,
- DataType::Type::kInt32,
- SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
- 4,
- kNoDexPc);
+ HInstruction* vstore =
+ new (GetAllocator()) HVecStore(GetAllocator(),
+ array,
+ index,
+ vdata,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
block->InsertInstructionBefore(vstore, block->GetLastInstruction());
return vstore;
}
@@ -225,34 +281,153 @@
if (data == nullptr) {
data = graph_->GetIntConstant(1);
}
- HInstruction* store = new (GetAllocator()) HArraySet(array,
- index,
- data,
- DataType::Type::kInt32,
- 0);
+ HInstruction* store =
+ new (GetAllocator()) HArraySet(array, index, data, DataType::Type::kInt32, 0);
block->InsertInstructionBefore(store, block->GetLastInstruction());
return store;
}
void InitGraphAndParameters() {
InitGraph();
- AddParameter(new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32));
+ AddParameter(new (GetAllocator()) HParameterValue(
+ graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32));
array_ = parameters_.back();
- AddParameter(new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(1),
- 1,
- DataType::Type::kInt32));
+ AddParameter(new (GetAllocator()) HParameterValue(
+ graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32));
i_ = parameters_.back();
- AddParameter(new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(1),
- 2,
- DataType::Type::kInt32));
+ AddParameter(new (GetAllocator()) HParameterValue(
+ graph_->GetDexFile(), dex::TypeIndex(1), 2, DataType::Type::kInt32));
j_ = parameters_.back();
}
+ void ManuallyBuildEnvFor(HInstruction* ins, const std::initializer_list<HInstruction*>& env) {
+ ArenaVector<HInstruction*> current_locals(env, GetAllocator()->Adapter(kArenaAllocInstruction));
+ OptimizingUnitTestHelper::ManuallyBuildEnvFor(ins, ¤t_locals);
+ }
+
+ HLoadClass* MakeClassLoad(std::optional<dex::TypeIndex> ti = std::nullopt) {
+ return new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
+ ti ? *ti : dex::TypeIndex(class_idx_++),
+ graph_->GetDexFile(),
+ /* klass= */ null_klass_,
+ /* is_referrers_class= */ false,
+ /* dex_pc= */ 0,
+ /* needs_access_check= */ false);
+ }
+
+ HNewInstance* MakeNewInstance(HInstruction* cls, uint32_t dex_pc = 0u) {
+ EXPECT_TRUE(cls->IsLoadClass() || cls->IsClinitCheck()) << *cls;
+ HLoadClass* load =
+ cls->IsLoadClass() ? cls->AsLoadClass() : cls->AsClinitCheck()->GetLoadClass();
+ return new (GetAllocator()) HNewInstance(cls,
+ dex_pc,
+ load->GetTypeIndex(),
+ graph_->GetDexFile(),
+ /* finalizable= */ false,
+ QuickEntrypointEnum::kQuickAllocObjectInitialized);
+ }
+
+ HInstanceFieldSet* MakeIFieldSet(HInstruction* inst,
+ HInstruction* data,
+ MemberOffset off,
+ uint32_t dex_pc = 0u) {
+ return new (GetAllocator()) HInstanceFieldSet(inst,
+ data,
+ /* field= */ nullptr,
+ /* field_type= */ data->GetType(),
+ /* field_offset= */ off,
+ /* is_volatile= */ false,
+ /* field_idx= */ 0,
+ /* declaring_class_def_index= */ 0,
+ graph_->GetDexFile(),
+ dex_pc);
+ }
+
+ HInstanceFieldGet* MakeIFieldGet(HInstruction* inst,
+ DataType::Type type,
+ MemberOffset off,
+ uint32_t dex_pc = 0u) {
+ return new (GetAllocator()) HInstanceFieldGet(inst,
+ /* field= */ nullptr,
+ /* field_type= */ type,
+ /* field_offset= */ off,
+ /* is_volatile= */ false,
+ /* field_idx= */ 0,
+ /* declaring_class_def_index= */ 0,
+ graph_->GetDexFile(),
+ dex_pc);
+ }
+
+ HInvokeStaticOrDirect* MakeInvoke(DataType::Type return_type,
+ const std::vector<HInstruction*>& args) {
+ MethodReference method_reference{/* file= */ &graph_->GetDexFile(), /* index= */ method_idx_++};
+ HInvokeStaticOrDirect* res = new (GetAllocator())
+ HInvokeStaticOrDirect(GetAllocator(),
+ args.size(),
+ return_type,
+ /* dex_pc= */ 0,
+ method_reference,
+ /* resolved_method= */ nullptr,
+ HInvokeStaticOrDirect::DispatchInfo{},
+ InvokeType::kStatic,
+ /* resolved_method_reference= */ method_reference,
+ HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ for (auto [ins, idx] : ZipCount(MakeIterationRange(args))) {
+ res->SetRawInputAt(idx, ins);
+ }
+ return res;
+ }
+
+ HPhi* MakePhi(const std::vector<HInstruction*>& ins) {
+ EXPECT_GE(ins.size(), 2u) << "Phi requires at least 2 inputs";
+ HPhi* phi =
+ new (GetAllocator()) HPhi(GetAllocator(), kNoRegNumber, ins.size(), ins[0]->GetType());
+ for (auto [i, idx] : ZipCount(MakeIterationRange(ins))) {
+ phi->SetRawInputAt(idx, i);
+ }
+ return phi;
+ }
+
+ void SetupExit(HBasicBlock* exit) {
+ exit->AddInstruction(new (GetAllocator()) HExit());
+ }
+
+ dex::TypeIndex DefaultTypeIndexForType(DataType::Type type) {
+ switch (type) {
+ case DataType::Type::kBool:
+ return dex::TypeIndex(1);
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ return dex::TypeIndex(2);
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ return dex::TypeIndex(3);
+ case DataType::Type::kUint32:
+ case DataType::Type::kInt32:
+ return dex::TypeIndex(4);
+ case DataType::Type::kUint64:
+ case DataType::Type::kInt64:
+ return dex::TypeIndex(5);
+ case DataType::Type::kReference:
+ return dex::TypeIndex(6);
+ case DataType::Type::kFloat32:
+ return dex::TypeIndex(7);
+ case DataType::Type::kFloat64:
+ return dex::TypeIndex(8);
+ case DataType::Type::kVoid:
+ EXPECT_TRUE(false) << "No type for void!";
+ return dex::TypeIndex(1000);
+ }
+ }
+
+ // Creates a parameter. The instruction is automatically added to the entry-block
+ HParameterValue* MakeParam(DataType::Type type, std::optional<dex::TypeIndex> ti = std::nullopt) {
+ HParameterValue* val = new (GetAllocator()) HParameterValue(
+ graph_->GetDexFile(), ti ? *ti : DefaultTypeIndexForType(type), param_count_++, type);
+ graph_->GetEntryBlock()->AddInstruction(val);
+ return val;
+ }
+
HBasicBlock* pre_header_;
HBasicBlock* loop_;
@@ -264,6 +439,208 @@
HInstruction* suspend_check_;
HPhi* phi_;
+
+ size_t param_count_ = 0;
+ size_t class_idx_ = 42;
+ uint32_t method_idx_ = 100;
+
+ ScopedNullHandle<mirror::Class> null_klass_;
+};
+
+class LoadStoreEliminationTest : public LoadStoreEliminationTestBase<CommonCompilerTest> {};
+
+enum class TestOrder { kSameAsAlloc, kReverseOfAlloc };
+std::ostream& operator<<(std::ostream& os, const TestOrder& ord) {
+ switch (ord) {
+ case TestOrder::kSameAsAlloc:
+ return os << "SameAsAlloc";
+ case TestOrder::kReverseOfAlloc:
+ return os << "ReverseOfAlloc";
+ }
+}
+
+class OrderDependentTestGroup
+ : public LoadStoreEliminationTestBase<CommonCompilerTestWithParam<TestOrder>> {};
+
+// Various configs we can use for testing. Currently used in PartialComparison tests.
+struct PartialComparisonKind {
+ public:
+ enum class Type : uint8_t { kEquals, kNotEquals };
+ enum class Target : uint8_t { kNull, kValue, kSelf };
+ enum class Position : uint8_t { kLeft, kRight };
+
+ const Type type_;
+ const Target target_;
+ const Position position_;
+
+ bool IsDefinitelyFalse() const {
+ return !IsPossiblyTrue();
+ }
+ bool IsPossiblyFalse() const {
+ return !IsDefinitelyTrue();
+ }
+ bool IsDefinitelyTrue() const {
+ if (target_ == Target::kSelf) {
+ return type_ == Type::kEquals;
+ } else if (target_ == Target::kNull) {
+ return type_ == Type::kNotEquals;
+ } else {
+ return false;
+ }
+ }
+ bool IsPossiblyTrue() const {
+ if (target_ == Target::kSelf) {
+ return type_ == Type::kEquals;
+ } else if (target_ == Target::kNull) {
+ return type_ == Type::kNotEquals;
+ } else {
+ return true;
+ }
+ }
+ std::ostream& Dump(std::ostream& os) const {
+ os << "PartialComparisonKind{" << (type_ == Type::kEquals ? "kEquals" : "kNotEquals") << ", "
+ << (target_ == Target::kNull ? "kNull" : (target_ == Target::kSelf ? "kSelf" : "kValue"))
+ << ", " << (position_ == Position::kLeft ? "kLeft" : "kRight") << "}";
+ return os;
+ }
+};
+
+std::ostream& operator<<(std::ostream& os, const PartialComparisonKind& comp) {
+ return comp.Dump(os);
+}
+
+class PartialComparisonTestGroup
+ : public LoadStoreEliminationTestBase<CommonCompilerTestWithParam<PartialComparisonKind>> {
+ public:
+ enum class ComparisonPlacement {
+ kBeforeEscape,
+ kInEscape,
+ kAfterEscape,
+ };
+ void CheckFinalInstruction(HInstruction* ins, ComparisonPlacement placement) {
+ using Target = PartialComparisonKind::Target;
+ using Type = PartialComparisonKind::Type;
+ using Position = PartialComparisonKind::Position;
+ PartialComparisonKind kind = GetParam();
+ if (ins->IsIntConstant()) {
+ if (kind.IsDefinitelyTrue()) {
+ EXPECT_TRUE(ins->AsIntConstant()->IsTrue()) << kind << " " << *ins;
+ } else if (kind.IsDefinitelyFalse()) {
+ EXPECT_TRUE(ins->AsIntConstant()->IsFalse()) << kind << " " << *ins;
+ } else {
+ EXPECT_EQ(placement, ComparisonPlacement::kBeforeEscape);
+ EXPECT_EQ(kind.target_, Target::kValue);
+ // We are before escape so value is not the object
+ if (kind.type_ == Type::kEquals) {
+ EXPECT_TRUE(ins->AsIntConstant()->IsFalse()) << kind << " " << *ins;
+ } else {
+ EXPECT_TRUE(ins->AsIntConstant()->IsTrue()) << kind << " " << *ins;
+ }
+ }
+ return;
+ }
+ EXPECT_NE(placement, ComparisonPlacement::kBeforeEscape)
+ << "For comparisons before escape we should always be able to transform into a constant."
+ << " Instead we got:" << std::endl << ins->DumpWithArgs();
+ if (placement == ComparisonPlacement::kInEscape) {
+ // Should be the same type.
+ ASSERT_TRUE(ins->IsEqual() || ins->IsNotEqual()) << *ins;
+ HInstruction* other = kind.position_ == Position::kLeft ? ins->AsBinaryOperation()->GetRight()
+ : ins->AsBinaryOperation()->GetLeft();
+ if (kind.target_ == Target::kSelf) {
+ EXPECT_INS_EQ(ins->AsBinaryOperation()->GetLeft(), ins->AsBinaryOperation()->GetRight())
+ << " ins is: " << *ins;
+ } else if (kind.target_ == Target::kNull) {
+ EXPECT_INS_EQ(other, graph_->GetNullConstant()) << " ins is: " << *ins;
+ } else {
+ EXPECT_TRUE(other->IsStaticFieldGet()) << " ins is: " << *ins;
+ }
+ if (kind.type_ == Type::kEquals) {
+ EXPECT_TRUE(ins->IsEqual()) << *ins;
+ } else {
+ EXPECT_TRUE(ins->IsNotEqual()) << *ins;
+ }
+ } else {
+ ASSERT_EQ(placement, ComparisonPlacement::kAfterEscape);
+ if (kind.type_ == Type::kEquals) {
+ // obj == <anything> can only be true if (1) it's obj == obj or (2) obj has escaped.
+ ASSERT_TRUE(ins->IsAnd()) << ins->DumpWithArgs();
+ EXPECT_TRUE(ins->InputAt(1)->IsEqual()) << ins->DumpWithArgs();
+ } else {
+ // obj != <anything> is true if (2) obj has escaped.
+ ASSERT_TRUE(ins->IsOr()) << ins->DumpWithArgs();
+ EXPECT_TRUE(ins->InputAt(1)->IsNotEqual()) << ins->DumpWithArgs();
+ }
+ // Check the first part of AND is the obj-has-escaped
+ ASSERT_TRUE(ins->InputAt(0)->IsNotEqual()) << ins->DumpWithArgs();
+ EXPECT_TRUE(ins->InputAt(0)->InputAt(0)->IsPhi()) << ins->DumpWithArgs();
+ EXPECT_TRUE(ins->InputAt(0)->InputAt(1)->IsNullConstant()) << ins->DumpWithArgs();
+ // Check the second part of AND is the eq other
+ EXPECT_INS_EQ(ins->InputAt(1)->InputAt(kind.position_ == Position::kLeft ? 0 : 1),
+ ins->InputAt(0)->InputAt(0))
+ << ins->DumpWithArgs();
+ }
+ }
+
+ struct ComparisonInstructions {
+ void AddSetup(HBasicBlock* blk) const {
+ for (HInstruction* i : setup_instructions_) {
+ blk->AddInstruction(i);
+ }
+ }
+
+ void AddEnvironment(HEnvironment* env) const {
+ for (HInstruction* i : setup_instructions_) {
+ if (i->NeedsEnvironment()) {
+ i->CopyEnvironmentFrom(env);
+ }
+ }
+ }
+
+ const std::vector<HInstruction*> setup_instructions_;
+ HInstruction* const cmp_;
+ };
+
+ ComparisonInstructions GetComparisonInstructions(HInstruction* partial) {
+ PartialComparisonKind kind = GetParam();
+ std::vector<HInstruction*> setup;
+ HInstruction* target_other;
+ switch (kind.target_) {
+ case PartialComparisonKind::Target::kSelf:
+ target_other = partial;
+ break;
+ case PartialComparisonKind::Target::kNull:
+ target_other = graph_->GetNullConstant();
+ break;
+ case PartialComparisonKind::Target::kValue: {
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* static_read =
+ new (GetAllocator()) HStaticFieldGet(cls,
+ /* field= */ nullptr,
+ DataType::Type::kReference,
+ /* field_offset= */ MemberOffset(40),
+ /* is_volatile= */ false,
+ /* field_idx= */ 0,
+ /* declaring_class_def_index= */ 0,
+ graph_->GetDexFile(),
+ /* dex_pc= */ 0);
+ setup.push_back(cls);
+ setup.push_back(static_read);
+ target_other = static_read;
+ break;
+ }
+ }
+ HInstruction* target_left;
+ HInstruction* target_right;
+ std::tie(target_left, target_right) = kind.position_ == PartialComparisonKind::Position::kLeft
+ ? std::pair{partial, target_other}
+ : std::pair{target_other, partial};
+ HInstruction* cmp =
+ kind.type_ == PartialComparisonKind::Type::kEquals
+ ? static_cast<HInstruction*>(new (GetAllocator()) HEqual(target_left, target_right))
+ : static_cast<HInstruction*>(new (GetAllocator()) HNotEqual(target_left, target_right));
+ return {setup, cmp};
+ }
};
TEST_F(LoadStoreEliminationTest, ArrayGetSetElimination) {
@@ -669,10 +1046,8 @@
// Add another array parameter that may alias with `array_`.
// Note: We're not adding it to the suspend check environment.
- AddParameter(new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 3,
- DataType::Type::kInt32));
+ AddParameter(new (GetAllocator()) HParameterValue(
+ graph_->GetDexFile(), dex::TypeIndex(0), 3, DataType::Type::kInt32));
HInstruction* array2 = parameters_.back();
HInstruction* c0 = graph_->GetIntConstant(0);
@@ -931,43 +1306,14 @@
HInstruction* suspend_check = new (GetAllocator()) HSuspendCheck();
entry->AddInstruction(suspend_check);
entry->AddInstruction(new (GetAllocator()) HGoto());
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(suspend_check, ¤t_locals);
+ ManuallyBuildEnvFor(suspend_check, {});
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* const_fence = new (GetAllocator()) HConstructorFence(new_inst, 0, GetAllocator());
- HInstruction* set_field = new (GetAllocator()) HInstanceFieldSet(new_inst,
- graph_->GetIntConstant(33),
- nullptr,
- DataType::Type::kReference,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* get_field = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kReference,
- mirror::Object::ClassOffset(),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* set_field = MakeIFieldSet(new_inst, graph_->GetIntConstant(33), MemberOffset(32));
+ HInstruction* get_field =
+ MakeIFieldGet(new_inst, DataType::Type::kReference, mirror::Object::ClassOffset());
HInstruction* return_val = new (GetAllocator()) HReturn(get_field);
main->AddInstruction(cls);
main->AddInstruction(new_inst);
@@ -978,17 +1324,17 @@
cls->CopyEnvironmentFrom(suspend_check->GetEnvironment());
new_inst->CopyEnvironmentFrom(suspend_check->GetEnvironment());
- exit->AddInstruction(new (GetAllocator()) HExit());
+ SetupExit(exit);
graph_->ClearDominanceInformation();
PerformLSE();
- EXPECT_TRUE(IsRemoved(new_inst));
- EXPECT_TRUE(IsRemoved(const_fence));
- EXPECT_TRUE(IsRemoved(get_field));
- EXPECT_TRUE(IsRemoved(set_field));
- EXPECT_FALSE(IsRemoved(cls));
- EXPECT_EQ(cls, return_val->InputAt(0));
+ EXPECT_INS_REMOVED(new_inst);
+ EXPECT_INS_REMOVED(const_fence);
+ EXPECT_INS_REMOVED(get_field);
+ EXPECT_INS_REMOVED(set_field);
+ EXPECT_INS_RETAINED(cls);
+ EXPECT_INS_EQ(cls, return_val->InputAt(0));
}
// Object o = new Obj();
@@ -1011,43 +1357,14 @@
HInstruction* suspend_check = new (GetAllocator()) HSuspendCheck();
entry->AddInstruction(suspend_check);
entry->AddInstruction(new (GetAllocator()) HGoto());
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(suspend_check, ¤t_locals);
+ ManuallyBuildEnvFor(suspend_check, {});
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* const_fence = new (GetAllocator()) HConstructorFence(new_inst, 0, GetAllocator());
- HInstruction* set_field = new (GetAllocator()) HInstanceFieldSet(new_inst,
- graph_->GetIntConstant(33),
- nullptr,
- DataType::Type::kReference,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* get_field = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- mirror::Object::MonitorOffset(),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* set_field = MakeIFieldSet(new_inst, graph_->GetIntConstant(33), MemberOffset(32));
+ HInstruction* get_field =
+ MakeIFieldGet(new_inst, DataType::Type::kInt32, mirror::Object::MonitorOffset());
HInstruction* return_val = new (GetAllocator()) HReturn(get_field);
main->AddInstruction(cls);
main->AddInstruction(new_inst);
@@ -1058,17 +1375,17 @@
cls->CopyEnvironmentFrom(suspend_check->GetEnvironment());
new_inst->CopyEnvironmentFrom(suspend_check->GetEnvironment());
- exit->AddInstruction(new (GetAllocator()) HExit());
+ SetupExit(exit);
graph_->ClearDominanceInformation();
PerformLSE();
- EXPECT_TRUE(IsRemoved(new_inst));
- EXPECT_TRUE(IsRemoved(const_fence));
- EXPECT_TRUE(IsRemoved(get_field));
- EXPECT_TRUE(IsRemoved(set_field));
- EXPECT_FALSE(IsRemoved(cls));
- EXPECT_EQ(graph_->GetIntConstant(0), return_val->InputAt(0));
+ EXPECT_INS_REMOVED(new_inst);
+ EXPECT_INS_REMOVED(const_fence);
+ EXPECT_INS_REMOVED(get_field);
+ EXPECT_INS_REMOVED(set_field);
+ EXPECT_INS_RETAINED(cls);
+ EXPECT_INS_EQ(graph_->GetIntConstant(0), return_val->InputAt(0));
}
// void DO_CAL() {
@@ -1083,7 +1400,8 @@
// return t;
// }
TEST_F(LoadStoreEliminationTest, ArrayLoopOverlap) {
- CreateGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blocks(graph_,
GetAllocator(),
"entry",
@@ -1114,8 +1432,7 @@
loop_pre_header->AddInstruction(alloc_w);
loop_pre_header->AddInstruction(pre_header_goto);
// environment
- ArenaVector<HInstruction*> alloc_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(alloc_w, &alloc_locals);
+ ManuallyBuildEnvFor(alloc_w, {});
// loop-start
HPhi* i_phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
@@ -1140,44 +1457,18 @@
t_phi->AddInput(zero_const);
// environment
- ArenaVector<HInstruction*> suspend_locals({ alloc_w, i_phi, t_phi },
- GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(suspend, &suspend_locals);
+ ManuallyBuildEnvFor(suspend, { alloc_w, i_phi, t_phi });
// BODY
HInstruction* last_i = new (GetAllocator()) HSub(DataType::Type::kInt32, i_phi, one_const);
HInstruction* last_get =
new (GetAllocator()) HArrayGet(alloc_w, last_i, DataType::Type::kInt32, 0);
- HInvoke* body_value = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 2,
- DataType::Type::kInt32,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- body_value->SetRawInputAt(0, last_get);
- body_value->SetRawInputAt(1, one_const);
+ HInvoke* body_value = MakeInvoke(DataType::Type::kInt32, { last_get, one_const });
HInstruction* body_set =
new (GetAllocator()) HArraySet(alloc_w, i_phi, body_value, DataType::Type::kInt32, 0);
HInstruction* body_get =
new (GetAllocator()) HArrayGet(alloc_w, i_phi, DataType::Type::kInt32, 0);
- HInvoke* t_next = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 2,
- DataType::Type::kInt32,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- t_next->SetRawInputAt(0, body_get);
- t_next->SetRawInputAt(1, t_phi);
+ HInvoke* t_next = MakeInvoke(DataType::Type::kInt32, { body_get, t_phi });
HInstruction* i_next = new (GetAllocator()) HAdd(DataType::Type::kInt32, i_phi, one_const);
HInstruction* body_goto = new (GetAllocator()) HGoto();
loop_body->AddInstruction(last_i);
@@ -1199,8 +1490,7 @@
loop_post->AddInstruction(return_inst);
// exit
- HInstruction* exit_inst = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_inst);
+ SetupExit(exit);
graph_->ClearDominanceInformation();
graph_->ClearLoopInformation();
@@ -1211,18 +1501,17 @@
// back into the array.
if (IsRemoved(last_get)) {
// If we were able to remove the previous read the entire array should be removable.
- EXPECT_TRUE(IsRemoved(body_set));
- EXPECT_TRUE(IsRemoved(alloc_w));
+ EXPECT_INS_REMOVED(body_set);
+ EXPECT_INS_REMOVED(alloc_w);
} else {
// This is the branch we actually take for now. If we rely on being able to
// read the array we'd better remember to write to it as well.
- EXPECT_FALSE(IsRemoved(body_set));
+ EXPECT_INS_RETAINED(body_set);
}
// The last 'get' should always be removable.
- EXPECT_TRUE(IsRemoved(body_get));
+ EXPECT_INS_REMOVED(body_get);
}
-
// void DO_CAL2() {
// int i = 1;
// int[] w = new int[80];
@@ -1239,7 +1528,8 @@
// return t;
// }
TEST_F(LoadStoreEliminationTest, ArrayLoopOverlap2) {
- CreateGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blocks(graph_,
GetAllocator(),
"entry",
@@ -1270,8 +1560,7 @@
loop_pre_header->AddInstruction(alloc_w);
loop_pre_header->AddInstruction(pre_header_goto);
// environment
- ArenaVector<HInstruction*> alloc_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(alloc_w, &alloc_locals);
+ ManuallyBuildEnvFor(alloc_w, {});
// loop-start
HPhi* i_phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
@@ -1296,50 +1585,24 @@
t_phi->AddInput(zero_const);
// environment
- ArenaVector<HInstruction*> suspend_locals({ alloc_w, i_phi, t_phi },
- GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(suspend, &suspend_locals);
+ ManuallyBuildEnvFor(suspend, { alloc_w, i_phi, t_phi });
// BODY
HInstruction* last_i = new (GetAllocator()) HSub(DataType::Type::kInt32, i_phi, one_const);
- HInstruction* last_get_1, *last_get_2, *last_get_3;
- HInstruction* body_value_1, *body_value_2, *body_value_3;
- HInstruction* body_set_1, *body_set_2, *body_set_3;
- HInstruction* body_get_1, *body_get_2, *body_get_3;
- HInstruction* t_next_1, *t_next_2, *t_next_3;
+ HInstruction *last_get_1, *last_get_2, *last_get_3;
+ HInstruction *body_value_1, *body_value_2, *body_value_3;
+ HInstruction *body_set_1, *body_set_2, *body_set_3;
+ HInstruction *body_get_1, *body_get_2, *body_get_3;
+ HInstruction *t_next_1, *t_next_2, *t_next_3;
auto make_instructions = [&](HInstruction* last_t_value) {
HInstruction* last_get =
new (GetAllocator()) HArrayGet(alloc_w, last_i, DataType::Type::kInt32, 0);
- HInvoke* body_value = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 2,
- DataType::Type::kInt32,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- body_value->SetRawInputAt(0, last_get);
- body_value->SetRawInputAt(1, one_const);
+ HInvoke* body_value = MakeInvoke(DataType::Type::kInt32, { last_get, one_const });
HInstruction* body_set =
new (GetAllocator()) HArraySet(alloc_w, i_phi, body_value, DataType::Type::kInt32, 0);
HInstruction* body_get =
new (GetAllocator()) HArrayGet(alloc_w, i_phi, DataType::Type::kInt32, 0);
- HInvoke* t_next = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 2,
- DataType::Type::kInt32,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- t_next->SetRawInputAt(0, body_get);
- t_next->SetRawInputAt(1, last_t_value);
+ HInvoke* t_next = MakeInvoke(DataType::Type::kInt32, { body_get, last_t_value });
loop_body->AddInstruction(last_get);
loop_body->AddInstruction(body_value);
loop_body->AddInstruction(body_set);
@@ -1372,8 +1635,7 @@
loop_post->AddInstruction(return_inst);
// exit
- HInstruction* exit_inst = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_inst);
+ SetupExit(exit);
graph_->ClearDominanceInformation();
graph_->ClearLoopInformation();
@@ -1384,28 +1646,29 @@
// back into the array.
if (IsRemoved(last_get_1)) {
// If we were able to remove the previous read the entire array should be removable.
- EXPECT_TRUE(IsRemoved(body_set_1));
- EXPECT_TRUE(IsRemoved(body_set_2));
- EXPECT_TRUE(IsRemoved(body_set_3));
- EXPECT_TRUE(IsRemoved(last_get_1));
- EXPECT_TRUE(IsRemoved(last_get_2));
- EXPECT_TRUE(IsRemoved(alloc_w));
+ EXPECT_INS_REMOVED(body_set_1);
+ EXPECT_INS_REMOVED(body_set_2);
+ EXPECT_INS_REMOVED(body_set_3);
+ EXPECT_INS_REMOVED(last_get_1);
+ EXPECT_INS_REMOVED(last_get_2);
+ EXPECT_INS_REMOVED(alloc_w);
} else {
// This is the branch we actually take for now. If we rely on being able to
// read the array we'd better remember to write to it as well.
- EXPECT_FALSE(IsRemoved(body_set_3));
+ EXPECT_INS_RETAINED(body_set_3);
}
// The last 'get' should always be removable.
- EXPECT_TRUE(IsRemoved(body_get_1));
- EXPECT_TRUE(IsRemoved(body_get_2));
- EXPECT_TRUE(IsRemoved(body_get_3));
+ EXPECT_INS_REMOVED(body_get_1);
+ EXPECT_INS_REMOVED(body_get_2);
+ EXPECT_INS_REMOVED(body_get_3);
// shadowed writes should always be removed
- EXPECT_TRUE(IsRemoved(body_set_1));
- EXPECT_TRUE(IsRemoved(body_set_2));
+ EXPECT_INS_REMOVED(body_set_1);
+ EXPECT_INS_REMOVED(body_set_2);
}
TEST_F(LoadStoreEliminationTest, ArrayNonLoopPhi) {
- CreateGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blocks(graph_,
GetAllocator(),
"entry",
@@ -1428,10 +1691,9 @@
HInstruction* zero_const = graph_->GetConstant(DataType::Type::kInt32, 0);
HInstruction* one_const = graph_->GetConstant(DataType::Type::kInt32, 1);
HInstruction* two_const = graph_->GetConstant(DataType::Type::kInt32, 2);
- HInstruction* param = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 0, DataType::Type::kBool);
+ HInstruction* param = MakeParam(DataType::Type::kBool);
+
HInstruction* entry_goto = new (GetAllocator()) HGoto();
- entry->AddInstruction(param);
entry->AddInstruction(entry_goto);
HInstruction* alloc_w = new (GetAllocator()) HNewArray(zero_const, two_const, 0, 0);
@@ -1439,22 +1701,10 @@
start->AddInstruction(alloc_w);
start->AddInstruction(branch);
// environment
- ArenaVector<HInstruction*> alloc_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(alloc_w, &alloc_locals);
+ ManuallyBuildEnvFor(alloc_w, {});
// left
- HInvoke* left_value = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kInt32,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- left_value->SetRawInputAt(0, zero_const);
+ HInvoke* left_value = MakeInvoke(DataType::Type::kInt32, { zero_const });
HInstruction* left_set_1 =
new (GetAllocator()) HArraySet(alloc_w, zero_const, left_value, DataType::Type::kInt32, 0);
HInstruction* left_set_2 =
@@ -1464,23 +1714,10 @@
left->AddInstruction(left_set_1);
left->AddInstruction(left_set_2);
left->AddInstruction(left_goto);
- ArenaVector<HInstruction*> left_locals({ alloc_w },
- GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(left_value, &alloc_locals);
+ ManuallyBuildEnvFor(left_value, { alloc_w });
// right
- HInvoke* right_value = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kInt32,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- right_value->SetRawInputAt(0, one_const);
+ HInvoke* right_value = MakeInvoke(DataType::Type::kInt32, { one_const });
HInstruction* right_set_1 =
new (GetAllocator()) HArraySet(alloc_w, zero_const, right_value, DataType::Type::kInt32, 0);
HInstruction* right_set_2 =
@@ -1490,9 +1727,7 @@
right->AddInstruction(right_set_1);
right->AddInstruction(right_set_2);
right->AddInstruction(right_goto);
- ArenaVector<HInstruction*> right_locals({ alloc_w },
- GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(right_value, &alloc_locals);
+ ManuallyBuildEnvFor(right_value, { alloc_w });
// ret
HInstruction* read_1 =
@@ -1507,27 +1742,27 @@
ret->AddInstruction(return_inst);
// exit
- HInstruction* exit_inst = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_inst);
+ SetupExit(exit);
graph_->ClearDominanceInformation();
graph_->ClearLoopInformation();
PerformLSE();
- EXPECT_TRUE(IsRemoved(read_1));
- EXPECT_TRUE(IsRemoved(read_2));
- EXPECT_TRUE(IsRemoved(left_set_1));
- EXPECT_TRUE(IsRemoved(left_set_2));
- EXPECT_TRUE(IsRemoved(right_set_1));
- EXPECT_TRUE(IsRemoved(right_set_2));
- EXPECT_TRUE(IsRemoved(alloc_w));
+ EXPECT_INS_REMOVED(read_1);
+ EXPECT_INS_REMOVED(read_2);
+ EXPECT_INS_REMOVED(left_set_1);
+ EXPECT_INS_REMOVED(left_set_2);
+ EXPECT_INS_REMOVED(right_set_1);
+ EXPECT_INS_REMOVED(right_set_2);
+ EXPECT_INS_REMOVED(alloc_w);
- EXPECT_FALSE(IsRemoved(left_value));
- EXPECT_FALSE(IsRemoved(right_value));
+ EXPECT_INS_RETAINED(left_value);
+ EXPECT_INS_RETAINED(right_value);
}
TEST_F(LoadStoreEliminationTest, ArrayMergeDefault) {
- CreateGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blocks(graph_,
GetAllocator(),
"entry",
@@ -1550,10 +1785,9 @@
HInstruction* zero_const = graph_->GetConstant(DataType::Type::kInt32, 0);
HInstruction* one_const = graph_->GetConstant(DataType::Type::kInt32, 1);
HInstruction* two_const = graph_->GetConstant(DataType::Type::kInt32, 2);
- HInstruction* param = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 0, DataType::Type::kBool);
+ HInstruction* param = MakeParam(DataType::Type::kBool);
HInstruction* entry_goto = new (GetAllocator()) HGoto();
- entry->AddInstruction(param);
+
entry->AddInstruction(entry_goto);
HInstruction* alloc_w = new (GetAllocator()) HNewArray(zero_const, two_const, 0, 0);
@@ -1562,7 +1796,7 @@
start->AddInstruction(branch);
// environment
ArenaVector<HInstruction*> alloc_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(alloc_w, &alloc_locals);
+ ManuallyBuildEnvFor(alloc_w, {});
// left
HInstruction* left_set_1 =
@@ -1597,20 +1831,19 @@
ret->AddInstruction(return_inst);
// exit
- HInstruction* exit_inst = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_inst);
+ SetupExit(exit);
graph_->ClearDominanceInformation();
graph_->ClearLoopInformation();
PerformLSE();
- EXPECT_TRUE(IsRemoved(read_1));
- EXPECT_TRUE(IsRemoved(read_2));
- EXPECT_TRUE(IsRemoved(left_set_1));
- EXPECT_TRUE(IsRemoved(left_set_2));
- EXPECT_TRUE(IsRemoved(right_set_1));
- EXPECT_TRUE(IsRemoved(right_set_2));
- EXPECT_TRUE(IsRemoved(alloc_w));
+ EXPECT_INS_REMOVED(read_1);
+ EXPECT_INS_REMOVED(read_2);
+ EXPECT_INS_REMOVED(left_set_1);
+ EXPECT_INS_REMOVED(left_set_2);
+ EXPECT_INS_REMOVED(right_set_1);
+ EXPECT_INS_REMOVED(right_set_2);
+ EXPECT_INS_REMOVED(alloc_w);
}
// // ENTRY
@@ -1651,22 +1884,22 @@
CreateGraph();
AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
"exit",
- { { "entry", "bswitch" },
- { "bswitch", "case1" },
- { "bswitch", "case2" },
- { "bswitch", "case3" },
- { "case1", "breturn" },
- { "case2", "breturn" },
- { "case3", "loop_pre_header" },
- { "loop_pre_header", "loop_header" },
- { "loop_header", "loop_body" },
- { "loop_body", "loop_if_left" },
- { "loop_body", "loop_if_right" },
- { "loop_if_left", "loop_end" },
- { "loop_if_right", "loop_end" },
- { "loop_end", "loop_header" },
- { "loop_header", "breturn" },
- { "breturn", "exit" } }));
+ {{"entry", "bswitch"},
+ {"bswitch", "case1"},
+ {"bswitch", "case2"},
+ {"bswitch", "case3"},
+ {"case1", "breturn"},
+ {"case2", "breturn"},
+ {"case3", "loop_pre_header"},
+ {"loop_pre_header", "loop_header"},
+ {"loop_header", "loop_body"},
+ {"loop_body", "loop_if_left"},
+ {"loop_body", "loop_if_right"},
+ {"loop_if_left", "loop_end"},
+ {"loop_if_right", "loop_end"},
+ {"loop_end", "loop_header"},
+ {"loop_header", "breturn"},
+ {"breturn", "exit"}}));
#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
GET_BLOCK(entry);
GET_BLOCK(bswitch);
@@ -1683,104 +1916,41 @@
GET_BLOCK(loop_if_right);
GET_BLOCK(loop_end);
#undef GET_BLOCK
- HInstruction* switch_val = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
+ HInstruction* switch_val = MakeParam(DataType::Type::kInt32);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
HInstruction* c3 = graph_->GetIntConstant(3);
HInstruction* c5 = graph_->GetIntConstant(5);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* entry_goto = new (GetAllocator()) HGoto();
- entry->AddInstruction(switch_val);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(entry_goto);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, switch_val);
bswitch->AddInstruction(switch_inst);
- HInstruction* write_c1 = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call_c1 = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* write_c1 = MakeIFieldSet(new_inst, c1, MemberOffset(32));
+ HInstruction* call_c1 = MakeInvoke(DataType::Type::kVoid, { new_inst });
HInstruction* goto_c1 = new (GetAllocator()) HGoto();
- call_c1->AsInvoke()->SetRawInputAt(0, new_inst);
case1->AddInstruction(write_c1);
case1->AddInstruction(call_c1);
case1->AddInstruction(goto_c1);
call_c1->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_c2 = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call_c2 = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* write_c2 = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* call_c2 = MakeInvoke(DataType::Type::kVoid, { new_inst });
HInstruction* goto_c2 = new (GetAllocator()) HGoto();
- call_c2->AsInvoke()->SetRawInputAt(0, new_inst);
case2->AddInstruction(write_c2);
case2->AddInstruction(call_c2);
case2->AddInstruction(goto_c2);
call_c2->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_c3 = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c3,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_c3 = MakeIFieldSet(new_inst, c3, MemberOffset(32));
HInstruction* goto_c3 = new (GetAllocator()) HGoto();
case3->AddInstruction(write_c3);
case3->AddInstruction(goto_c3);
@@ -1789,17 +1959,7 @@
loop_pre_header->AddInstruction(goto_preheader);
HInstruction* suspend_check_header = new (GetAllocator()) HSuspendCheck();
- HInstruction* call_loop_header = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 0,
- DataType::Type::kBool,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* call_loop_header = MakeInvoke(DataType::Type::kBool, {});
HInstruction* if_loop_header = new (GetAllocator()) HIf(call_loop_header);
loop_header->AddInstruction(suspend_check_header);
loop_header->AddInstruction(call_loop_header);
@@ -1807,17 +1967,7 @@
call_loop_header->CopyEnvironmentFrom(cls->GetEnvironment());
suspend_check_header->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* call_loop_body = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 0,
- DataType::Type::kBool,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* call_loop_body = MakeInvoke(DataType::Type::kBool, {});
HInstruction* if_loop_body = new (GetAllocator()) HIf(call_loop_body);
loop_body->AddInstruction(call_loop_body);
loop_body->AddInstruction(if_loop_body);
@@ -1826,16 +1976,7 @@
HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
loop_if_left->AddInstruction(goto_loop_left);
- HInstruction* write_loop_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c5,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_loop_right = MakeIFieldSet(new_inst, c5, MemberOffset(32));
HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
loop_if_right->AddInstruction(write_loop_right);
loop_if_right->AddInstruction(goto_loop_right);
@@ -1843,31 +1984,23 @@
HInstruction* goto_loop_end = new (GetAllocator()) HGoto();
loop_end->AddInstruction(goto_loop_end);
- HInstruction* read_bottom = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
breturn->AddInstruction(read_bottom);
breturn->AddInstruction(return_exit);
- HInstruction* exit_ins = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_ins);
+ SetupExit(exit);
+
// PerformLSE expects this to be empty.
graph_->ClearDominanceInformation();
- PerformLSE();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSENoPartial();
- EXPECT_FALSE(IsRemoved(read_bottom));
- EXPECT_FALSE(IsRemoved(write_c1));
- EXPECT_FALSE(IsRemoved(write_c2));
- EXPECT_FALSE(IsRemoved(write_c3));
- // EXPECT_FALSE(IsRemoved(write_loop_left));
- EXPECT_FALSE(IsRemoved(write_loop_right));
+ EXPECT_INS_RETAINED(read_bottom);
+ EXPECT_INS_RETAINED(write_c1);
+ EXPECT_INS_RETAINED(write_c2);
+ EXPECT_INS_RETAINED(write_c3);
+ EXPECT_INS_RETAINED(write_loop_right);
}
// // ENTRY
@@ -1887,7 +2020,8 @@
// EXIT
// return PHI(foo_l, foo_r)
TEST_F(LoadStoreEliminationTest, PartialLoadElimination) {
- InitGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
"exit_REAL",
{ { "entry", "left" },
@@ -1899,99 +2033,37 @@
HBasicBlock* left = blks.Get("left");
HBasicBlock* right = blks.Get("right");
HBasicBlock* exit = blks.Get("exit");
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(if_inst);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_left = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- HInstruction* read_left = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(16),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* read_left = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(16));
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
left->AddInstruction(write_left);
left->AddInstruction(call_left);
left->AddInstruction(read_left);
left->AddInstruction(goto_left);
call_left->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(16),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* read_right = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(16),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(16));
+ HInstruction* read_right = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(16));
HInstruction* goto_right = new (GetAllocator()) HGoto();
right->AddInstruction(write_right);
right->AddInstruction(read_right);
right->AddInstruction(goto_right);
- HInstruction* phi_final =
- new (GetAllocator()) HPhi(GetAllocator(), 12, 2, DataType::Type::kInt32);
- phi_final->SetRawInputAt(0, read_left);
- phi_final->SetRawInputAt(1, read_right);
+ HInstruction* phi_final = MakePhi({read_left, read_right});
HInstruction* return_exit = new (GetAllocator()) HReturn(phi_final);
exit->AddPhi(phi_final->AsPhi());
exit->AddInstruction(return_exit);
@@ -2022,10 +2094,10 @@
// }
// EXIT
// return obj.field
-// TODO We eventually want to be able to eliminate the right write along with the final read but
-// will need either new blocks or new instructions.
+// This test runs with partial LSE disabled.
TEST_F(LoadStoreEliminationTest, PartialLoadPreserved) {
- InitGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
"exit_REAL",
{ { "entry", "left" },
@@ -2037,93 +2109,42 @@
HBasicBlock* left = blks.Get("left");
HBasicBlock* right = blks.Get("right");
HBasicBlock* exit = blks.Get("exit");
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(if_inst);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_left = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
left->AddInstruction(write_left);
left->AddInstruction(call_left);
left->AddInstruction(goto_left);
call_left->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
HInstruction* goto_right = new (GetAllocator()) HGoto();
right->AddInstruction(write_right);
right->AddInstruction(goto_right);
- HInstruction* read_bottom = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
exit->AddInstruction(read_bottom);
exit->AddInstruction(return_exit);
// PerformLSE expects this to be empty.
graph_->ClearDominanceInformation();
- PerformLSE();
+ PerformLSENoPartial();
- ASSERT_FALSE(IsRemoved(read_bottom));
- ASSERT_FALSE(IsRemoved(write_right));
+ EXPECT_INS_RETAINED(read_bottom) << *read_bottom;
+ EXPECT_INS_RETAINED(write_right) << *write_right;
}
// // ENTRY
@@ -2144,10 +2165,10 @@
// }
// EXIT
// return obj.field
-// TODO We eventually want to be able to eliminate the right write along with the final read but
-// will need either new blocks or new instructions.
+// NB This test is for non-partial LSE flow. Normally the obj.field writes will be removed
TEST_F(LoadStoreEliminationTest, PartialLoadPreserved2) {
- InitGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
"exit_REAL",
{ { "entry", "left" },
@@ -2166,60 +2187,24 @@
HBasicBlock* right_second = blks.Get("right_second");
HBasicBlock* right_end = blks.Get("right_end");
HBasicBlock* exit = blks.Get("exit");
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
- HInstruction* bool_value_2 = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 2, DataType::Type::kBool);
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* bool_value_2 = MakeParam(DataType::Type::kBool);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
- entry->AddInstruction(bool_value_2);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(if_inst);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_left = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
left->AddInstruction(write_left);
left->AddInstruction(call_left);
left->AddInstruction(goto_left);
@@ -2228,30 +2213,12 @@
HInstruction* right_if = new (GetAllocator()) HIf(bool_value_2);
right_start->AddInstruction(right_if);
- HInstruction* write_right_first = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_right_first = MakeIFieldSet(new_inst, c2, MemberOffset(32));
HInstruction* goto_right_first = new (GetAllocator()) HGoto();
right_first->AddInstruction(write_right_first);
right_first->AddInstruction(goto_right_first);
- HInstruction* write_right_second = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c3,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_right_second = MakeIFieldSet(new_inst, c3, MemberOffset(32));
HInstruction* goto_right_second = new (GetAllocator()) HGoto();
right_second->AddInstruction(write_right_second);
right_second->AddInstruction(goto_right_second);
@@ -2259,25 +2226,17 @@
HInstruction* goto_right_end = new (GetAllocator()) HGoto();
right_end->AddInstruction(goto_right_end);
- HInstruction* read_bottom = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
exit->AddInstruction(read_bottom);
exit->AddInstruction(return_exit);
// PerformLSE expects this to be empty.
graph_->ClearDominanceInformation();
- PerformLSE();
+ PerformLSENoPartial();
- ASSERT_FALSE(IsRemoved(read_bottom));
- EXPECT_FALSE(IsRemoved(write_right_first));
- EXPECT_FALSE(IsRemoved(write_right_second));
+ EXPECT_INS_RETAINED(read_bottom);
+ EXPECT_INS_RETAINED(write_right_first);
+ EXPECT_INS_RETAINED(write_right_second);
}
// // ENTRY
@@ -2296,14 +2255,15 @@
// ELIMINATE
// return obj.field
TEST_F(LoadStoreEliminationTest, PartialLoadElimination2) {
- InitGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
"exit",
- { { "entry", "left" },
- { "entry", "right" },
- { "left", "breturn"},
- { "right", "breturn" },
- { "breturn", "exit" } }));
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
GET_BLOCK(entry);
GET_BLOCK(exit);
@@ -2311,98 +2271,1888 @@
GET_BLOCK(left);
GET_BLOCK(right);
#undef GET_BLOCK
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(if_inst);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- HInstruction* write_left = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
left->AddInstruction(call_left);
left->AddInstruction(write_left);
left->AddInstruction(goto_left);
call_left->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
HInstruction* goto_right = new (GetAllocator()) HGoto();
right->AddInstruction(write_right);
right->AddInstruction(goto_right);
- HInstruction* read_bottom = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
breturn->AddInstruction(read_bottom);
breturn->AddInstruction(return_exit);
- HInstruction* exit_instruction = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_instruction);
+ SetupExit(exit);
+
// PerformLSE expects this to be empty.
graph_->ClearDominanceInformation();
PerformLSE();
- EXPECT_TRUE(IsRemoved(read_bottom));
- EXPECT_TRUE(IsRemoved(write_right));
- EXPECT_FALSE(IsRemoved(write_left));
- EXPECT_FALSE(IsRemoved(call_left));
+ EXPECT_INS_REMOVED(read_bottom);
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_RETAINED(write_left);
+ EXPECT_INS_RETAINED(call_left);
+}
+
+class PatternMatchGraphVisitor : public HGraphVisitor {
+ private:
+ struct HandlerWrapper {
+ public:
+ virtual ~HandlerWrapper() {}
+ virtual void operator()(HInstruction* h) = 0;
+ };
+
+ template <HInstruction::InstructionKind kKind, typename F>
+ struct KindWrapper;
+
+#define GEN_HANDLER(nm, unused) \
+ template <typename F> \
+ struct KindWrapper<HInstruction::InstructionKind::k##nm, F> : public HandlerWrapper { \
+ public: \
+ explicit KindWrapper(F f) : f_(f) {} \
+ void operator()(HInstruction* h) override { \
+ if constexpr (std::is_invocable_v<F, H##nm*>) { \
+ f_(h->As##nm()); \
+ } else { \
+ LOG(FATAL) << "Incorrect call with " << #nm; \
+ } \
+ } \
+ \
+ private: \
+ F f_; \
+ };
+
+ FOR_EACH_CONCRETE_INSTRUCTION(GEN_HANDLER)
+#undef GEN_HANDLER
+
+ template <typename F>
+ std::unique_ptr<HandlerWrapper> GetWrapper(HInstruction::InstructionKind kind, F f) {
+ switch (kind) {
+#define GEN_GETTER(nm, unused) \
+ case HInstruction::InstructionKind::k##nm: \
+ return std::unique_ptr<HandlerWrapper>( \
+ new KindWrapper<HInstruction::InstructionKind::k##nm, F>(f));
+ FOR_EACH_CONCRETE_INSTRUCTION(GEN_GETTER)
+#undef GEN_GETTER
+ default:
+ LOG(FATAL) << "Unable to handle kind " << kind;
+ return nullptr;
+ }
+ }
+
+ public:
+ template <typename... Inst>
+ explicit PatternMatchGraphVisitor(HGraph* graph, Inst... handlers) : HGraphVisitor(graph) {
+ FillHandlers(handlers...);
+ }
+
+ void VisitInstruction(HInstruction* instruction) override {
+ auto& h = handlers_[instruction->GetKind()];
+ if (h.get() != nullptr) {
+ (*h)(instruction);
+ }
+ }
+
+ private:
+ template <typename Func>
+ constexpr HInstruction::InstructionKind GetKind() {
+#define CHECK_INST(nm, unused) \
+ if constexpr (std::is_invocable_v<Func, H##nm*>) { \
+ return HInstruction::InstructionKind::k##nm; \
+ }
+ FOR_EACH_CONCRETE_INSTRUCTION(CHECK_INST);
+#undef CHECK_INST
+ static_assert(!std::is_invocable_v<Func, HInstruction*>,
+ "Use on generic HInstruction not allowed");
+#define STATIC_ASSERT_ABSTRACT(nm, unused) && !std::is_invocable_v<Func, H##nm*>
+ static_assert(true FOR_EACH_ABSTRACT_INSTRUCTION(STATIC_ASSERT_ABSTRACT),
+ "Must not be abstract instruction");
+#undef STATIC_ASSERT_ABSTRACT
+#define STATIC_ASSERT_CONCRETE(nm, unused) || std::is_invocable_v<Func, H##nm*>
+ static_assert(false FOR_EACH_CONCRETE_INSTRUCTION(STATIC_ASSERT_CONCRETE),
+ "Must be a concrete instruction");
+#undef STATIC_ASSERT_CONCRETE
+ return HInstruction::InstructionKind::kLastInstructionKind;
+ }
+ template <typename First>
+ void FillHandlers(First h1) {
+ HInstruction::InstructionKind type = GetKind<First>();
+ CHECK_NE(type, HInstruction::kLastInstructionKind)
+ << "Unknown instruction kind. Only concrete ones please.";
+ handlers_[type] = GetWrapper(type, h1);
+ }
+
+ template <typename First, typename... Inst>
+ void FillHandlers(First h1, Inst... handlers) {
+ FillHandlers(h1);
+ FillHandlers<Inst...>(handlers...);
+ }
+
+ std::array<std::unique_ptr<HandlerWrapper>, HInstruction::InstructionKind::kLastInstructionKind>
+ handlers_;
+};
+
+template <typename... Target>
+std::tuple<std::vector<Target*>...> FindAllInstructions(
+ HGraph* graph,
+ std::variant<std::nullopt_t, HBasicBlock*, std::initializer_list<HBasicBlock*>> blks =
+ std::nullopt) {
+ std::tuple<std::vector<Target*>...> res;
+ PatternMatchGraphVisitor vis(
+ graph, [&](Target* t) { std::get<std::vector<Target*>>(res).push_back(t); }...);
+
+ if (std::holds_alternative<std::initializer_list<HBasicBlock*>>(blks)) {
+ for (HBasicBlock* blk : std::get<std::initializer_list<HBasicBlock*>>(blks)) {
+ vis.VisitBasicBlock(blk);
+ }
+ } else if (std::holds_alternative<std::nullopt_t>(blks)) {
+ vis.VisitInsertionOrder();
+ } else {
+ vis.VisitBasicBlock(std::get<HBasicBlock*>(blks));
+ }
+ return res;
+}
+
+template <typename... Target>
+std::tuple<Target*...> FindSingleInstructions(
+ HGraph* graph,
+ std::variant<std::nullopt_t, HBasicBlock*, std::initializer_list<HBasicBlock*>> blks =
+ std::nullopt) {
+ std::tuple<Target*...> res;
+ PatternMatchGraphVisitor vis(graph, [&](Target* t) {
+ EXPECT_EQ(std::get<Target*>(res), nullptr)
+ << *std::get<Target*>(res) << " already found but found " << *t << "!";
+ std::get<Target*>(res) = t;
+ }...);
+ if (std::holds_alternative<std::initializer_list<HBasicBlock*>>(blks)) {
+ for (HBasicBlock* blk : std::get<std::initializer_list<HBasicBlock*>>(blks)) {
+ vis.VisitBasicBlock(blk);
+ }
+ } else if (std::holds_alternative<std::nullopt_t>(blks)) {
+ vis.VisitInsertionOrder();
+ } else {
+ vis.VisitBasicBlock(std::get<HBasicBlock*>(blks));
+ }
+ return res;
+}
+
+template <typename Target>
+Target* FindSingleInstruction(
+ HGraph* graph,
+ std::variant<std::nullopt_t, HBasicBlock*, std::initializer_list<HBasicBlock*>> blks =
+ std::nullopt) {
+ return std::get<Target*>(FindSingleInstructions<Target>(graph, blks));
+}
+
+template<typename Iter, typename Func>
+typename Iter::value_type FindOrNull(Iter begin, Iter end, Func func) {
+ static_assert(std::is_pointer_v<typename Iter::value_type>);
+ auto it = std::find_if(begin, end, func);
+ if (it == end) {
+ return nullptr;
+ } else {
+ return *it;
+ }
+}
+
+// // ENTRY
+// Obj new_inst = new Obj();
+// new_inst.foo = 12;
+// Obj obj;
+// Obj out;
+// int first;
+// if (param0) {
+// // ESCAPE_ROUTE
+// if (param1) {
+// // LEFT_START
+// if (param2) {
+// // LEFT_LEFT
+// obj = new_inst;
+// } else {
+// // LEFT_RIGHT
+// obj = obj_param;
+// }
+// // LEFT_MERGE
+// // technically the phi is enough to cause an escape but might as well be
+// // thorough.
+// // obj = phi[new_inst, param]
+// escape(obj);
+// out = obj;
+// } else {
+// // RIGHT
+// out = obj_param;
+// }
+// // EXIT
+// // Can't do anything with this since we don't have good tracking for the heap-locations
+// // out = phi[param, phi[new_inst, param]]
+// first = out.foo
+// } else {
+// new_inst.foo = 15;
+// first = 13;
+// }
+// // first = phi[out.foo, 13]
+// return first + new_inst.foo;
+TEST_F(LoadStoreEliminationTest, PartialPhiPropagation) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "escape_route"},
+ {"entry", "noescape_route"},
+ {"escape_route", "left"},
+ {"escape_route", "right"},
+ {"left", "left_left"},
+ {"left", "left_right"},
+ {"left_left", "left_merge"},
+ {"left_right", "left_merge"},
+ {"left_merge", "escape_end"},
+ {"right", "escape_end"},
+ {"escape_end", "breturn"},
+ {"noescape_route", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+ GET_BLOCK(left_left);
+ GET_BLOCK(left_right);
+ GET_BLOCK(left_merge);
+ GET_BLOCK(escape_end);
+ GET_BLOCK(escape_route);
+ GET_BLOCK(noescape_route);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(escape_end, {left_merge, right});
+ EnsurePredecessorOrder(left_merge, {left_left, left_right});
+ EnsurePredecessorOrder(breturn, {escape_end, noescape_route});
+ HInstruction* param0 = MakeParam(DataType::Type::kBool);
+ HInstruction* param1 = MakeParam(DataType::Type::kBool);
+ HInstruction* param2 = MakeParam(DataType::Type::kBool);
+ HInstruction* obj_param = MakeParam(DataType::Type::kReference);
+ HInstruction* c12 = graph_->GetIntConstant(12);
+ HInstruction* c13 = graph_->GetIntConstant(13);
+ HInstruction* c15 = graph_->GetIntConstant(15);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* store = MakeIFieldSet(new_inst, c12, MemberOffset(32));
+ HInstruction* if_param0 = new (GetAllocator()) HIf(param0);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(store);
+ entry->AddInstruction(if_param0);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* store_noescape = MakeIFieldSet(new_inst, c15, MemberOffset(32));
+ noescape_route->AddInstruction(store_noescape);
+ noescape_route->AddInstruction(new (GetAllocator()) HGoto());
+
+ escape_route->AddInstruction(new (GetAllocator()) HIf(param1));
+
+ HInstruction* if_left = new (GetAllocator()) HIf(param2);
+ left->AddInstruction(if_left);
+
+ HInstruction* goto_left_left = new (GetAllocator()) HGoto();
+ left_left->AddInstruction(goto_left_left);
+
+ HInstruction* goto_left_right = new (GetAllocator()) HGoto();
+ left_right->AddInstruction(goto_left_right);
+
+ HPhi* left_phi = MakePhi({obj_param, new_inst});
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { left_phi });
+ HInstruction* goto_left_merge = new (GetAllocator()) HGoto();
+ left_merge->AddPhi(left_phi);
+ left_merge->AddInstruction(call_left);
+ left_merge->AddInstruction(goto_left_merge);
+ left_phi->SetCanBeNull(true);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(goto_right);
+
+ HPhi* escape_end_phi = MakePhi({left_phi, obj_param});
+ HInstruction* read_escape_end =
+ MakeIFieldGet(escape_end_phi, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* goto_escape_end = new (GetAllocator()) HGoto();
+ escape_end->AddPhi(escape_end_phi);
+ escape_end->AddInstruction(read_escape_end);
+ escape_end->AddInstruction(goto_escape_end);
+
+ HPhi* return_phi = MakePhi({read_escape_end, c13});
+ HInstruction* read_exit = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* add_exit = new (GetAllocator()) HAdd(DataType::Type::kInt32, return_phi, read_exit);
+ HInstruction* return_exit = new (GetAllocator()) HReturn(add_exit);
+ breturn->AddPhi(return_phi);
+ breturn->AddInstruction(read_exit);
+ breturn->AddInstruction(add_exit);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_);
+ std::vector<HPhi*> all_return_phis;
+ std::tie(all_return_phis) = FindAllInstructions<HPhi>(graph_, breturn);
+ EXPECT_EQ(all_return_phis.size(), 3u);
+ EXPECT_INS_RETAINED(return_phi);
+ EXPECT_TRUE(std::find(all_return_phis.begin(), all_return_phis.end(), return_phi) !=
+ all_return_phis.end());
+ HPhi* instance_phi =
+ FindOrNull(all_return_phis.begin(), all_return_phis.end(), [&](HPhi* phi) {
+ return phi != return_phi && phi->GetType() == DataType::Type::kReference;
+ });
+ ASSERT_NE(instance_phi, nullptr);
+ HPhi* value_phi = FindOrNull(all_return_phis.begin(), all_return_phis.end(), [&](HPhi* phi) {
+ return phi != return_phi && phi->GetType() == DataType::Type::kInt32;
+ });
+ ASSERT_NE(value_phi, nullptr);
+ EXPECT_INS_EQ(
+ instance_phi->InputAt(0),
+ FindSingleInstruction<HNewInstance>(graph_, escape_route->GetSinglePredecessor()));
+ // Check materialize block
+ EXPECT_INS_EQ(FindSingleInstruction<HInstanceFieldSet>(
+ graph_, escape_route->GetSinglePredecessor())
+ ->InputAt(1),
+ c12);
+
+ EXPECT_INS_EQ(instance_phi->InputAt(1), graph_->GetNullConstant());
+ EXPECT_INS_EQ(value_phi->InputAt(0), graph_->GetIntConstant(0));
+ EXPECT_INS_EQ(value_phi->InputAt(1), c15);
+ EXPECT_INS_REMOVED(store_noescape);
+ EXPECT_INS_EQ(pred_get->GetTarget(), instance_phi);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue(), value_phi);
+}
+
+// // ENTRY
+// // To be moved
+// // NB Order important. By having alloc and store of obj1 before obj2 that
+// // ensure we'll build the materialization for obj1 first (just due to how
+// // we iterate.)
+// obj1 = new Obj();
+// obj2 = new Obj(); // has env[obj1]
+// // Swap the order of these
+// obj1.foo = param_obj1;
+// obj2.foo = param_obj2;
+// if (param1) {
+// // LEFT
+// obj2.foo = obj1;
+// if (param2) {
+// // LEFT_LEFT
+// escape(obj2);
+// } else {}
+// } else {}
+// return select(param3, obj1.foo, obj2.foo);
+// EXIT
+TEST_P(OrderDependentTestGroup, PredicatedUse) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "left_left"},
+ {"left", "left_right"},
+ {"left_left", "left_end"},
+ {"left_right", "left_end"},
+ {"left_end", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(right);
+ GET_BLOCK(left);
+ GET_BLOCK(left_left);
+ GET_BLOCK(left_right);
+ GET_BLOCK(left_end);
+#undef GET_BLOCK
+ TestOrder order = GetParam();
+ EnsurePredecessorOrder(breturn, {left_end, right});
+ EnsurePredecessorOrder(left_end, {left_left, left_right});
+ HInstruction* param1 = MakeParam(DataType::Type::kBool);
+ HInstruction* param2 = MakeParam(DataType::Type::kBool);
+ HInstruction* param3 = MakeParam(DataType::Type::kBool);
+ HInstruction* param_obj1 = MakeParam(DataType::Type::kReference);
+ HInstruction* param_obj2 = MakeParam(DataType::Type::kReference);
+
+ HInstruction* cls1 = MakeClassLoad();
+ HInstruction* cls2 = MakeClassLoad();
+ HInstruction* new_inst1 = MakeNewInstance(cls1);
+ HInstruction* new_inst2 = MakeNewInstance(cls2);
+ HInstruction* store1 = MakeIFieldSet(new_inst1, param_obj1, MemberOffset(32));
+ HInstruction* store2 = MakeIFieldSet(new_inst2, param_obj2, MemberOffset(32));
+ HInstruction* null_const = graph_->GetNullConstant();
+ HInstruction* if_inst = new (GetAllocator()) HIf(param1);
+ entry->AddInstruction(cls1);
+ entry->AddInstruction(cls2);
+ entry->AddInstruction(new_inst1);
+ entry->AddInstruction(new_inst2);
+ if (order == TestOrder::kSameAsAlloc) {
+ entry->AddInstruction(store1);
+ entry->AddInstruction(store2);
+ } else {
+ entry->AddInstruction(store2);
+ entry->AddInstruction(store1);
+ }
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls1, {});
+ cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
+ new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
+ new_inst2->CopyEnvironmentFrom(cls1->GetEnvironment());
+
+ // This is the escape of new_inst1
+ HInstruction* store_left = MakeIFieldSet(new_inst2, new_inst1, MemberOffset(32));
+ HInstruction* if_left = new (GetAllocator()) HIf(param2);
+ left->AddInstruction(store_left);
+ left->AddInstruction(if_left);
+
+ HInstruction* call_left_left = MakeInvoke(DataType::Type::kVoid, { new_inst2 });
+ HInstruction* goto_left_left = new (GetAllocator()) HGoto();
+ left_left->AddInstruction(call_left_left);
+ left_left->AddInstruction(goto_left_left);
+ call_left_left->CopyEnvironmentFrom(new_inst2->GetEnvironment());
+
+ left_right->AddInstruction(new (GetAllocator()) HGoto());
+ left_end->AddInstruction(new (GetAllocator()) HGoto());
+
+ right->AddInstruction(new (GetAllocator()) HGoto());
+
+ // Used to distinguish the pred-gets without having to dig through the
+ // multiple phi layers.
+ constexpr uint32_t kRead1DexPc = 10;
+ constexpr uint32_t kRead2DexPc = 20;
+ HInstruction* read1 =
+ MakeIFieldGet(new_inst1, DataType::Type::kReference, MemberOffset(32), kRead1DexPc);
+ read1->SetReferenceTypeInfo(
+ ReferenceTypeInfo::CreateUnchecked(graph_->GetHandleCache()->GetObjectClassHandle(), false));
+ HInstruction* read2 =
+ MakeIFieldGet(new_inst2, DataType::Type::kReference, MemberOffset(32), kRead2DexPc);
+ read2->SetReferenceTypeInfo(
+ ReferenceTypeInfo::CreateUnchecked(graph_->GetHandleCache()->GetObjectClassHandle(), false));
+ HInstruction* sel_return = new (GetAllocator()) HSelect(param3, read1, read2, 0);
+ HInstruction* return_exit = new (GetAllocator()) HReturn(sel_return);
+ breturn->AddInstruction(read1);
+ breturn->AddInstruction(read2);
+ breturn->AddInstruction(sel_return);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_RETAINED(call_left_left);
+ EXPECT_INS_REMOVED(read1);
+ EXPECT_INS_REMOVED(read2);
+ EXPECT_INS_REMOVED(new_inst1);
+ EXPECT_INS_REMOVED(new_inst2);
+ EXPECT_TRUE(new_inst1->GetUses().empty()) << *new_inst1 << " " << new_inst1->GetUses();
+ EXPECT_TRUE(new_inst2->GetUses().empty()) << *new_inst2 << " " << new_inst2->GetUses();
+ EXPECT_INS_RETAINED(sel_return);
+ // Make sure the selector is the same
+ EXPECT_INS_EQ(sel_return->InputAt(2), param3);
+ std::vector<HPredicatedInstanceFieldGet*> pred_gets;
+ std::tie(pred_gets) = FindAllInstructions<HPredicatedInstanceFieldGet>(graph_, breturn);
+ HPredicatedInstanceFieldGet* pred1 = FindOrNull(pred_gets.begin(), pred_gets.end(), [&](auto i) {
+ return i->GetDexPc() == kRead1DexPc;
+ });
+ HPredicatedInstanceFieldGet* pred2 = FindOrNull(pred_gets.begin(), pred_gets.end(), [&](auto i) {
+ return i->GetDexPc() == kRead2DexPc;
+ });
+ ASSERT_NE(pred1, nullptr);
+ ASSERT_NE(pred2, nullptr);
+ EXPECT_INS_EQ(sel_return->InputAt(0), pred2);
+ EXPECT_INS_EQ(sel_return->InputAt(1), pred1);
+ // Check targets
+ EXPECT_TRUE(pred1->GetTarget()->IsPhi()) << pred1->DumpWithArgs();
+ EXPECT_TRUE(pred2->GetTarget()->IsPhi()) << pred2->DumpWithArgs();
+ HInstruction* mat1 = FindSingleInstruction<HNewInstance>(graph_, left->GetSinglePredecessor());
+ HInstruction* mat2 =
+ FindSingleInstruction<HNewInstance>(graph_, left_left->GetSinglePredecessor());
+ EXPECT_INS_EQ(pred1->GetTarget()->InputAt(0), mat1);
+ EXPECT_INS_EQ(pred1->GetTarget()->InputAt(1), null_const);
+ EXPECT_TRUE(pred2->GetTarget()->InputAt(0)->IsPhi()) << pred2->DumpWithArgs();
+ EXPECT_INS_EQ(pred2->GetTarget()->InputAt(0)->InputAt(0), mat2);
+ EXPECT_INS_EQ(pred2->GetTarget()->InputAt(0)->InputAt(1), null_const);
+ EXPECT_INS_EQ(pred2->GetTarget()->InputAt(1), null_const);
+ // Check default values.
+ EXPECT_TRUE(pred1->GetDefaultValue()->IsPhi()) << pred1->DumpWithArgs();
+ EXPECT_TRUE(pred2->GetDefaultValue()->IsPhi()) << pred2->DumpWithArgs();
+ EXPECT_INS_EQ(pred1->GetDefaultValue()->InputAt(0), null_const);
+ EXPECT_INS_EQ(pred1->GetDefaultValue()->InputAt(1), param_obj1);
+ EXPECT_TRUE(pred2->GetDefaultValue()->InputAt(0)->IsPhi()) << pred2->DumpWithArgs();
+ EXPECT_INS_EQ(pred2->GetDefaultValue()->InputAt(0)->InputAt(0), null_const);
+ EXPECT_INS_EQ(pred2->GetDefaultValue()->InputAt(0)->InputAt(1), mat1);
+ EXPECT_INS_EQ(pred2->GetDefaultValue()->InputAt(1), param_obj2);
+}
+
+// // ENTRY
+// // To be moved
+// // NB Order important. By having alloc and store of obj1 before obj2 that
+// // ensure we'll build the materialization for obj1 first (just due to how
+// // we iterate.)
+// obj1 = new Obj();
+// obj.foo = 12;
+// obj2 = new Obj(); // has env[obj1]
+// obj2.foo = 15;
+// if (param1) {
+// // LEFT
+// // Need to update env to nullptr
+// escape(obj1/2);
+// if (param2) {
+// // LEFT_LEFT
+// escape(obj2/1);
+// } else {}
+// } else {}
+// return obj1.foo + obj2.foo;
+// EXIT
+TEST_P(OrderDependentTestGroup, PredicatedEnvUse) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "left_left"},
+ {"left", "left_right"},
+ {"left_left", "left_end"},
+ {"left_right", "left_end"},
+ {"left_end", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(right);
+ GET_BLOCK(left);
+ GET_BLOCK(left_left);
+ GET_BLOCK(left_right);
+ GET_BLOCK(left_end);
+#undef GET_BLOCK
+ TestOrder order = GetParam();
+ EnsurePredecessorOrder(breturn, {left_end, right});
+ EnsurePredecessorOrder(left_end, {left_left, left_right});
+ HInstruction* param1 = MakeParam(DataType::Type::kBool);
+ HInstruction* param2 = MakeParam(DataType::Type::kBool);
+ HInstruction* c12 = graph_->GetIntConstant(12);
+ HInstruction* c15 = graph_->GetIntConstant(15);
+
+ HInstruction* cls1 = MakeClassLoad();
+ HInstruction* cls2 = MakeClassLoad();
+ HInstruction* new_inst1 = MakeNewInstance(cls1);
+ HInstruction* store1 = MakeIFieldSet(new_inst1, c12, MemberOffset(32));
+ HInstruction* new_inst2 = MakeNewInstance(cls2);
+ HInstruction* store2 = MakeIFieldSet(new_inst2, c15, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(param1);
+ entry->AddInstruction(cls1);
+ entry->AddInstruction(cls2);
+ entry->AddInstruction(new_inst1);
+ entry->AddInstruction(store1);
+ entry->AddInstruction(new_inst2);
+ entry->AddInstruction(store2);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls1, {});
+ cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
+ new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
+ ManuallyBuildEnvFor(new_inst2, {new_inst1});
+
+ HInstruction* first_inst = new_inst1;
+ HInstruction* second_inst = new_inst2;
+
+ if (order == TestOrder::kReverseOfAlloc) {
+ std::swap(first_inst, second_inst);
+ }
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { first_inst });
+ HInstruction* if_left = new (GetAllocator()) HIf(param2);
+ left->AddInstruction(call_left);
+ left->AddInstruction(if_left);
+ call_left->CopyEnvironmentFrom(new_inst2->GetEnvironment());
+
+ HInstruction* call_left_left = MakeInvoke(DataType::Type::kVoid, { second_inst });
+ HInstruction* goto_left_left = new (GetAllocator()) HGoto();
+ left_left->AddInstruction(call_left_left);
+ left_left->AddInstruction(goto_left_left);
+ call_left_left->CopyEnvironmentFrom(new_inst2->GetEnvironment());
+
+ left_right->AddInstruction(new (GetAllocator()) HGoto());
+ left_end->AddInstruction(new (GetAllocator()) HGoto());
+
+ right->AddInstruction(new (GetAllocator()) HGoto());
+
+ HInstruction* read1 = MakeIFieldGet(new_inst1, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* read2 = MakeIFieldGet(new_inst2, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* add_return = new (GetAllocator()) HAdd(DataType::Type::kInt32, read1, read2);
+ HInstruction* return_exit = new (GetAllocator()) HReturn(add_return);
+ breturn->AddInstruction(read1);
+ breturn->AddInstruction(read2);
+ breturn->AddInstruction(add_return);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ HNewInstance* moved_new_inst1;
+ HInstanceFieldSet* moved_set1;
+ HNewInstance* moved_new_inst2;
+ HInstanceFieldSet* moved_set2;
+ HBasicBlock* first_mat_block = left->GetSinglePredecessor();
+ HBasicBlock* second_mat_block = left_left->GetSinglePredecessor();
+ if (order == TestOrder::kReverseOfAlloc) {
+ std::swap(first_mat_block, second_mat_block);
+ }
+ std::tie(moved_new_inst1, moved_set1) =
+ FindSingleInstructions<HNewInstance, HInstanceFieldSet>(graph_, first_mat_block);
+ std::tie(moved_new_inst2, moved_set2) =
+ FindSingleInstructions<HNewInstance, HInstanceFieldSet>(graph_, second_mat_block);
+ std::vector<HPredicatedInstanceFieldGet*> pred_gets;
+ std::vector<HPhi*> phis;
+ std::tie(pred_gets, phis) = FindAllInstructions<HPredicatedInstanceFieldGet, HPhi>(graph_);
+ EXPECT_NE(moved_new_inst1, nullptr);
+ EXPECT_NE(moved_new_inst2, nullptr);
+ EXPECT_NE(moved_set1, nullptr);
+ EXPECT_NE(moved_set2, nullptr);
+ EXPECT_INS_EQ(moved_set1->InputAt(1), c12);
+ EXPECT_INS_EQ(moved_set2->InputAt(1), c15);
+ EXPECT_INS_RETAINED(call_left);
+ EXPECT_INS_RETAINED(call_left_left);
+ EXPECT_INS_REMOVED(store1);
+ EXPECT_INS_REMOVED(store2);
+ EXPECT_INS_REMOVED(read1);
+ EXPECT_INS_REMOVED(read2);
+ EXPECT_INS_EQ(moved_new_inst2->GetEnvironment()->GetInstructionAt(0),
+ order == TestOrder::kSameAsAlloc
+ ? moved_new_inst1
+ : static_cast<HInstruction*>(graph_->GetNullConstant()));
+}
+
+// // ENTRY
+// obj1 = new Obj1();
+// obj2 = new Obj2();
+// val1 = 3;
+// val2 = 13;
+// // The exact order the stores are written affects what the order we perform
+// // partial LSE on the values
+// obj1/2.field = val1/2;
+// obj2/1.field = val2/1;
+// if (parameter_value) {
+// // LEFT
+// escape(obj1);
+// escape(obj2);
+// } else {
+// // RIGHT
+// // ELIMINATE
+// obj1.field = 2;
+// obj2.field = 12;
+// }
+// EXIT
+// predicated-ELIMINATE
+// return obj1.field + obj2.field
+TEST_P(OrderDependentTestGroup, FieldSetOrderEnv) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ TestOrder order = GetParam();
+ EnsurePredecessorOrder(breturn, {left, right});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c12 = graph_->GetIntConstant(12);
+ HInstruction* c13 = graph_->GetIntConstant(13);
+
+ HInstruction* cls1 = MakeClassLoad();
+ HInstruction* cls2 = MakeClassLoad();
+ HInstruction* new_inst1 = MakeNewInstance(cls1);
+ HInstruction* new_inst2 = MakeNewInstance(cls2);
+ HInstruction* write_entry1 = MakeIFieldSet(new_inst1, c3, MemberOffset(32));
+ HInstruction* write_entry2 = MakeIFieldSet(new_inst2, c13, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls1);
+ entry->AddInstruction(cls2);
+ entry->AddInstruction(new_inst1);
+ entry->AddInstruction(new_inst2);
+ if (order == TestOrder::kSameAsAlloc) {
+ entry->AddInstruction(write_entry1);
+ entry->AddInstruction(write_entry2);
+ } else {
+ entry->AddInstruction(write_entry2);
+ entry->AddInstruction(write_entry1);
+ }
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls1, {});
+ cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
+ new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
+ ManuallyBuildEnvFor(new_inst2, {new_inst1});
+
+ HInstruction* call_left1 = MakeInvoke(DataType::Type::kVoid, { new_inst1 });
+ HInstruction* call_left2 = MakeInvoke(DataType::Type::kVoid, { new_inst2 });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left1);
+ left->AddInstruction(call_left2);
+ left->AddInstruction(goto_left);
+ call_left1->CopyEnvironmentFrom(cls1->GetEnvironment());
+ call_left2->CopyEnvironmentFrom(cls1->GetEnvironment());
+
+ HInstruction* write_right1 = MakeIFieldSet(new_inst1, c2, MemberOffset(32));
+ HInstruction* write_right2 = MakeIFieldSet(new_inst2, c12, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right1);
+ right->AddInstruction(write_right2);
+ right->AddInstruction(goto_right);
+
+ HInstruction* read_bottom1 = MakeIFieldGet(new_inst1, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* read_bottom2 = MakeIFieldGet(new_inst2, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* combine =
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, read_bottom1, read_bottom2);
+ HInstruction* return_exit = new (GetAllocator()) HReturn(combine);
+ breturn->AddInstruction(read_bottom1);
+ breturn->AddInstruction(read_bottom2);
+ breturn->AddInstruction(combine);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(write_entry1);
+ EXPECT_INS_REMOVED(write_entry2);
+ EXPECT_INS_REMOVED(read_bottom1);
+ EXPECT_INS_REMOVED(read_bottom2);
+ EXPECT_INS_REMOVED(write_right1);
+ EXPECT_INS_REMOVED(write_right2);
+ EXPECT_INS_RETAINED(call_left1);
+ EXPECT_INS_RETAINED(call_left2);
+ std::vector<HPhi*> merges;
+ std::vector<HPredicatedInstanceFieldGet*> pred_gets;
+ std::vector<HNewInstance*> materializations;
+ std::tie(merges, pred_gets) =
+ FindAllInstructions<HPhi, HPredicatedInstanceFieldGet>(graph_, breturn);
+ std::tie(materializations) = FindAllInstructions<HNewInstance>(graph_);
+ ASSERT_EQ(merges.size(), 4u);
+ ASSERT_EQ(pred_gets.size(), 2u);
+ ASSERT_EQ(materializations.size(), 2u);
+ HPhi* merge_value_return1 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->InputAt(1) == c2;
+ });
+ HPhi* merge_value_return2 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->InputAt(1) == c12;
+ });
+ HNewInstance* mat_alloc1 = FindOrNull(materializations.begin(),
+ materializations.end(),
+ [&](HNewInstance* n) { return n->InputAt(0) == cls1; });
+ HNewInstance* mat_alloc2 = FindOrNull(materializations.begin(),
+ materializations.end(),
+ [&](HNewInstance* n) { return n->InputAt(0) == cls2; });
+ ASSERT_NE(mat_alloc1, nullptr);
+ ASSERT_NE(mat_alloc2, nullptr);
+ HPhi* merge_alloc1 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference && p->InputAt(0) == mat_alloc1;
+ });
+ HPhi* merge_alloc2 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference && p->InputAt(0) == mat_alloc2;
+ });
+ ASSERT_NE(merge_alloc1, nullptr);
+ HPredicatedInstanceFieldGet* pred_get1 =
+ FindOrNull(pred_gets.begin(), pred_gets.end(), [&](HPredicatedInstanceFieldGet* pg) {
+ return pg->GetTarget() == merge_alloc1;
+ });
+ ASSERT_NE(merge_alloc2, nullptr);
+ HPredicatedInstanceFieldGet* pred_get2 =
+ FindOrNull(pred_gets.begin(), pred_gets.end(), [&](HPredicatedInstanceFieldGet* pg) {
+ return pg->GetTarget() == merge_alloc2;
+ });
+ ASSERT_NE(merge_value_return1, nullptr);
+ ASSERT_NE(merge_value_return2, nullptr);
+ EXPECT_INS_EQ(merge_alloc1->InputAt(1), graph_->GetNullConstant());
+ EXPECT_INS_EQ(merge_alloc2->InputAt(1), graph_->GetNullConstant());
+ ASSERT_NE(pred_get1, nullptr);
+ EXPECT_INS_EQ(pred_get1->GetTarget(), merge_alloc1);
+ EXPECT_INS_EQ(pred_get1->GetDefaultValue(), merge_value_return1)
+ << " pred-get is: " << *pred_get1;
+ EXPECT_INS_EQ(merge_value_return1->InputAt(0), graph_->GetIntConstant(0))
+ << " merge val is: " << *merge_value_return1;
+ EXPECT_INS_EQ(merge_value_return1->InputAt(1), c2) << " merge val is: " << *merge_value_return1;
+ ASSERT_NE(pred_get2, nullptr);
+ EXPECT_INS_EQ(pred_get2->GetTarget(), merge_alloc2);
+ EXPECT_INS_EQ(pred_get2->GetDefaultValue(), merge_value_return2)
+ << " pred-get is: " << *pred_get2;
+ EXPECT_INS_EQ(merge_value_return2->InputAt(0), graph_->GetIntConstant(0))
+ << " merge val is: " << *merge_value_return1;
+ EXPECT_INS_EQ(merge_value_return2->InputAt(1), c12) << " merge val is: " << *merge_value_return1;
+ EXPECT_INS_EQ(mat_alloc2->GetEnvironment()->GetInstructionAt(0), mat_alloc1);
+}
+
+// // TODO We can compile this better if we are better able to understand lifetimes.
+// // ENTRY
+// obj1 = new Obj1();
+// obj2 = new Obj2();
+// // The exact order the stores are written affects what the order we perform
+// // partial LSE on the values
+// obj{1,2}.var = param_obj;
+// obj{2,1}.var = param_obj;
+// if (param_1) {
+// // EARLY_RETURN
+// return;
+// }
+// // escape of obj1
+// obj2.var = obj1;
+// if (param_2) {
+// // escape of obj2 with a materialization that uses obj1
+// escape(obj2);
+// }
+// // EXIT
+// return;
+TEST_P(OrderDependentTestGroup, MaterializationMovedUse) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "early_return"},
+ {"early_return", "exit"},
+ {"entry", "escape_1"},
+ {"escape_1", "escape_2"},
+ {"escape_1", "escape_1_crit_break"},
+ {"escape_1_crit_break", "exit"},
+ {"escape_2", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(early_return);
+ GET_BLOCK(escape_1);
+ GET_BLOCK(escape_1_crit_break);
+ GET_BLOCK(escape_2);
+#undef GET_BLOCK
+ TestOrder order = GetParam();
+ HInstruction* param_1 = MakeParam(DataType::Type::kBool);
+ HInstruction* param_2 = MakeParam(DataType::Type::kBool);
+ HInstruction* param_obj = MakeParam(DataType::Type::kReference);
+
+ HInstruction* cls1 = MakeClassLoad();
+ HInstruction* cls2 = MakeClassLoad();
+ HInstruction* new_inst1 = MakeNewInstance(cls1);
+ HInstruction* new_inst2 = MakeNewInstance(cls2);
+ HInstruction* write_entry1 = MakeIFieldSet(new_inst1, param_obj, MemberOffset(32));
+ HInstruction* write_entry2 = MakeIFieldSet(new_inst2, param_obj, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(param_1);
+ entry->AddInstruction(cls1);
+ entry->AddInstruction(cls2);
+ entry->AddInstruction(new_inst1);
+ entry->AddInstruction(new_inst2);
+ if (order == TestOrder::kSameAsAlloc) {
+ entry->AddInstruction(write_entry1);
+ entry->AddInstruction(write_entry2);
+ } else {
+ entry->AddInstruction(write_entry2);
+ entry->AddInstruction(write_entry1);
+ }
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls1, {});
+ cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
+ new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
+ new_inst2->CopyEnvironmentFrom(cls1->GetEnvironment());
+
+ early_return->AddInstruction(new (GetAllocator()) HReturnVoid());
+
+ HInstruction* escape_1_set = MakeIFieldSet(new_inst2, new_inst1, MemberOffset(32));
+ HInstruction* escape_1_if = new (GetAllocator()) HIf(param_2);
+ escape_1->AddInstruction(escape_1_set);
+ escape_1->AddInstruction(escape_1_if);
+
+ escape_1_crit_break->AddInstruction(new (GetAllocator()) HReturnVoid());
+
+ HInstruction* escape_2_call = MakeInvoke(DataType::Type::kVoid, {new_inst2});
+ HInstruction* escape_2_return = new (GetAllocator()) HReturnVoid();
+ escape_2->AddInstruction(escape_2_call);
+ escape_2->AddInstruction(escape_2_return);
+ escape_2_call->CopyEnvironmentFrom(cls1->GetEnvironment());
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(new_inst1);
+ EXPECT_INS_REMOVED(new_inst2);
+ EXPECT_INS_REMOVED(write_entry1);
+ EXPECT_INS_REMOVED(write_entry2);
+ EXPECT_INS_REMOVED(escape_1_set);
+ EXPECT_INS_RETAINED(escape_2_call);
+
+ HInstruction* obj1_mat =
+ FindSingleInstruction<HNewInstance>(graph_, escape_1->GetSinglePredecessor());
+ HInstruction* obj1_set =
+ FindSingleInstruction<HInstanceFieldSet>(graph_, escape_1->GetSinglePredecessor());
+ HInstruction* obj2_mat =
+ FindSingleInstruction<HNewInstance>(graph_, escape_2->GetSinglePredecessor());
+ HInstruction* obj2_set =
+ FindSingleInstruction<HInstanceFieldSet>(graph_, escape_2->GetSinglePredecessor());
+ ASSERT_TRUE(obj1_mat != nullptr);
+ ASSERT_TRUE(obj2_mat != nullptr);
+ ASSERT_TRUE(obj1_set != nullptr);
+ ASSERT_TRUE(obj2_set != nullptr);
+ EXPECT_INS_EQ(obj1_set->InputAt(0), obj1_mat);
+ EXPECT_INS_EQ(obj1_set->InputAt(1), param_obj);
+ EXPECT_INS_EQ(obj2_set->InputAt(0), obj2_mat);
+ EXPECT_INS_EQ(obj2_set->InputAt(1), obj1_mat);
+}
+
+INSTANTIATE_TEST_SUITE_P(LoadStoreEliminationTest,
+ OrderDependentTestGroup,
+ testing::Values(TestOrder::kSameAsAlloc, TestOrder::kReverseOfAlloc));
+
+// // ENTRY
+// // To be moved
+// obj = new Obj();
+// obj.foo = 12;
+// if (parameter_value) {
+// // LEFT
+// escape(obj);
+// } else {}
+// EXIT
+TEST_F(LoadStoreEliminationTest, MovePredicatedAlloc) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"right", "breturn"},
+ {"left", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {left, right});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* c12 = graph_->GetIntConstant(12);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* store = MakeIFieldSet(new_inst, c12, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(store);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ right->AddInstruction(new (GetAllocator()) HGoto());
+
+ HInstruction* return_exit = new (GetAllocator()) HReturnVoid();
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ HNewInstance* moved_new_inst = nullptr;
+ HInstanceFieldSet* moved_set = nullptr;
+ std::tie(moved_new_inst, moved_set) =
+ FindSingleInstructions<HNewInstance, HInstanceFieldSet>(graph_);
+ EXPECT_NE(moved_new_inst, nullptr);
+ EXPECT_NE(moved_set, nullptr);
+ EXPECT_INS_RETAINED(call_left);
+ // store removed or moved.
+ EXPECT_NE(store->GetBlock(), entry);
+ // New-inst removed or moved.
+ EXPECT_NE(new_inst->GetBlock(), entry);
+ EXPECT_INS_EQ(moved_set->InputAt(0), moved_new_inst);
+ EXPECT_INS_EQ(moved_set->InputAt(1), c12);
+}
+
+// // ENTRY
+// // To be moved
+// obj = new Obj();
+// obj.foo = 12;
+// if (parameter_value) {
+// // LEFT
+// escape(obj);
+// }
+// EXIT
+// int a = obj.foo;
+// obj.foo = 13;
+// noescape();
+// int b = obj.foo;
+// obj.foo = 14;
+// noescape();
+// int c = obj.foo;
+// obj.foo = 15;
+// noescape();
+// return a + b + c
+TEST_F(LoadStoreEliminationTest, MutiPartialLoadStore) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"right", "breturn"},
+ {"left", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {left, right});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* c12 = graph_->GetIntConstant(12);
+ HInstruction* c13 = graph_->GetIntConstant(13);
+ HInstruction* c14 = graph_->GetIntConstant(14);
+ HInstruction* c15 = graph_->GetIntConstant(15);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* store = MakeIFieldSet(new_inst, c12, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(store);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(goto_right);
+
+ HInstruction* a_val = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* a_reset = MakeIFieldSet(new_inst, c13, MemberOffset(32));
+ HInstruction* a_noescape = MakeInvoke(DataType::Type::kVoid, {});
+ HInstruction* b_val = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* b_reset = MakeIFieldSet(new_inst, c14, MemberOffset(32));
+ HInstruction* b_noescape = MakeInvoke(DataType::Type::kVoid, {});
+ HInstruction* c_val = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* c_reset = MakeIFieldSet(new_inst, c15, MemberOffset(32));
+ HInstruction* c_noescape = MakeInvoke(DataType::Type::kVoid, {});
+ HInstruction* add_1_exit = new (GetAllocator()) HAdd(DataType::Type::kInt32, a_val, b_val);
+ HInstruction* add_2_exit = new (GetAllocator()) HAdd(DataType::Type::kInt32, c_val, add_1_exit);
+ HInstruction* return_exit = new (GetAllocator()) HReturn(add_2_exit);
+ breturn->AddInstruction(a_val);
+ breturn->AddInstruction(a_reset);
+ breturn->AddInstruction(a_noescape);
+ breturn->AddInstruction(b_val);
+ breturn->AddInstruction(b_reset);
+ breturn->AddInstruction(b_noescape);
+ breturn->AddInstruction(c_val);
+ breturn->AddInstruction(c_reset);
+ breturn->AddInstruction(c_noescape);
+ breturn->AddInstruction(add_1_exit);
+ breturn->AddInstruction(add_2_exit);
+ breturn->AddInstruction(return_exit);
+ ManuallyBuildEnvFor(a_noescape, {new_inst, a_val});
+ ManuallyBuildEnvFor(b_noescape, {new_inst, a_val, b_val});
+ ManuallyBuildEnvFor(c_noescape, {new_inst, a_val, b_val, c_val});
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ HNewInstance* moved_new_inst = nullptr;
+ HInstanceFieldSet* moved_set = nullptr;
+ std::tie(moved_new_inst, moved_set) =
+ FindSingleInstructions<HNewInstance, HInstanceFieldSet>(graph_, left->GetSinglePredecessor());
+ std::vector<HPredicatedInstanceFieldGet*> pred_gets;
+ std::vector<HInstanceFieldSet*> pred_sets;
+ std::vector<HPhi*> return_phis;
+ std::tie(return_phis, pred_gets, pred_sets) =
+ FindAllInstructions<HPhi, HPredicatedInstanceFieldGet, HInstanceFieldSet>(graph_, breturn);
+ ASSERT_EQ(return_phis.size(), 2u);
+ HPhi* inst_phi = return_phis[0];
+ HPhi* val_phi = return_phis[1];
+ if (inst_phi->GetType() != DataType::Type::kReference) {
+ std::swap(inst_phi, val_phi);
+ }
+ ASSERT_NE(moved_new_inst, nullptr);
+ EXPECT_INS_EQ(inst_phi->InputAt(0), moved_new_inst);
+ EXPECT_INS_EQ(inst_phi->InputAt(1), graph_->GetNullConstant());
+ EXPECT_INS_EQ(val_phi->InputAt(0), graph_->GetIntConstant(0));
+ EXPECT_EQ(val_phi->InputAt(1), c12);
+ ASSERT_EQ(pred_gets.size(), 3u);
+ ASSERT_EQ(pred_gets.size(), pred_sets.size());
+ std::vector<HInstruction*> set_values{c13, c14, c15};
+ std::vector<HInstruction*> get_values{val_phi, c13, c14};
+ ASSERT_NE(moved_set, nullptr);
+ EXPECT_INS_EQ(moved_set->InputAt(0), moved_new_inst);
+ EXPECT_INS_EQ(moved_set->InputAt(1), c12);
+ EXPECT_INS_RETAINED(call_left);
+ // store removed or moved.
+ EXPECT_NE(store->GetBlock(), entry);
+ // New-inst removed or moved.
+ EXPECT_NE(new_inst->GetBlock(), entry);
+ for (auto [get, val] : ZipLeft(MakeIterationRange(pred_gets), MakeIterationRange(get_values))) {
+ EXPECT_INS_EQ(get->GetDefaultValue(), val);
+ }
+ for (auto [set, val] : ZipLeft(MakeIterationRange(pred_sets), MakeIterationRange(set_values))) {
+ EXPECT_INS_EQ(set->InputAt(1), val);
+ EXPECT_TRUE(set->GetIsPredicatedSet()) << *set;
+ }
+ EXPECT_INS_RETAINED(a_noescape);
+ EXPECT_INS_RETAINED(b_noescape);
+ EXPECT_INS_RETAINED(c_noescape);
+ EXPECT_INS_EQ(add_1_exit->InputAt(0), pred_gets[0]);
+ EXPECT_INS_EQ(add_1_exit->InputAt(1), pred_gets[1]);
+ EXPECT_INS_EQ(add_2_exit->InputAt(0), pred_gets[2]);
+
+ EXPECT_EQ(a_noescape->GetEnvironment()->Size(), 2u);
+ EXPECT_INS_EQ(a_noescape->GetEnvironment()->GetInstructionAt(0), inst_phi);
+ EXPECT_INS_EQ(a_noescape->GetEnvironment()->GetInstructionAt(1), pred_gets[0]);
+ EXPECT_EQ(b_noescape->GetEnvironment()->Size(), 3u);
+ EXPECT_INS_EQ(b_noescape->GetEnvironment()->GetInstructionAt(0), inst_phi);
+ EXPECT_INS_EQ(b_noescape->GetEnvironment()->GetInstructionAt(1), pred_gets[0]);
+ EXPECT_INS_EQ(b_noescape->GetEnvironment()->GetInstructionAt(2), pred_gets[1]);
+ EXPECT_EQ(c_noescape->GetEnvironment()->Size(), 4u);
+ EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(0), inst_phi);
+ EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(1), pred_gets[0]);
+ EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(2), pred_gets[1]);
+ EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(3), pred_gets[2]);
+}
+
+// // ENTRY
+// // To be moved
+// obj = new Obj();
+// obj.foo = 12;
+// int a = obj.foo;
+// obj.foo = 13;
+// noescape();
+// int b = obj.foo;
+// obj.foo = 14;
+// noescape();
+// int c = obj.foo;
+// obj.foo = 15;
+// noescape();
+// if (parameter_value) {
+// // LEFT
+// escape(obj);
+// }
+// EXIT
+// return a + b + c + obj.foo
+TEST_F(LoadStoreEliminationTest, MutiPartialLoadStore2) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ // Need to have an actual entry block since we check env-layout and the way we
+ // add constants would screw this up otherwise.
+ AdjacencyListGraph blks(SetupFromAdjacencyList("start",
+ "exit",
+ {{"start", "entry"},
+ {"entry", "left"},
+ {"entry", "right"},
+ {"right", "breturn"},
+ {"left", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(start);
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {left, right});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* c12 = graph_->GetIntConstant(12);
+ HInstruction* c13 = graph_->GetIntConstant(13);
+ HInstruction* c14 = graph_->GetIntConstant(14);
+ HInstruction* c15 = graph_->GetIntConstant(15);
+
+ HInstruction* start_suspend = new (GetAllocator()) HSuspendCheck();
+ HInstruction* start_goto = new (GetAllocator()) HGoto();
+
+ start->AddInstruction(start_suspend);
+ start->AddInstruction(start_goto);
+ ManuallyBuildEnvFor(start_suspend, {});
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* store = MakeIFieldSet(new_inst, c12, MemberOffset(32));
+
+ HInstruction* a_val = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* a_reset = MakeIFieldSet(new_inst, c13, MemberOffset(32));
+ HInstruction* a_noescape = MakeInvoke(DataType::Type::kVoid, {});
+ HInstruction* b_val = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* b_reset = MakeIFieldSet(new_inst, c14, MemberOffset(32));
+ HInstruction* b_noescape = MakeInvoke(DataType::Type::kVoid, {});
+ HInstruction* c_val = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* c_reset = MakeIFieldSet(new_inst, c15, MemberOffset(32));
+ HInstruction* c_noescape = MakeInvoke(DataType::Type::kVoid, {});
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(store);
+ entry->AddInstruction(a_val);
+ entry->AddInstruction(a_reset);
+ entry->AddInstruction(a_noescape);
+ entry->AddInstruction(b_val);
+ entry->AddInstruction(b_reset);
+ entry->AddInstruction(b_noescape);
+ entry->AddInstruction(c_val);
+ entry->AddInstruction(c_reset);
+ entry->AddInstruction(c_noescape);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+ ManuallyBuildEnvFor(a_noescape, {new_inst, a_val});
+ ManuallyBuildEnvFor(b_noescape, {new_inst, a_val, b_val});
+ ManuallyBuildEnvFor(c_noescape, {new_inst, a_val, b_val, c_val});
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(c_noescape->GetEnvironment());
+
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(goto_right);
+
+ HInstruction* val_exit = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* add_1_exit = new (GetAllocator()) HAdd(DataType::Type::kInt32, a_val, b_val);
+ HInstruction* add_2_exit = new (GetAllocator()) HAdd(DataType::Type::kInt32, c_val, add_1_exit);
+ HInstruction* add_3_exit =
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, val_exit, add_2_exit);
+ HInstruction* return_exit = new (GetAllocator()) HReturn(add_3_exit);
+ breturn->AddInstruction(val_exit);
+ breturn->AddInstruction(add_1_exit);
+ breturn->AddInstruction(add_2_exit);
+ breturn->AddInstruction(add_3_exit);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ HNewInstance* moved_new_inst = nullptr;
+ HInstanceFieldSet* moved_set = nullptr;
+ std::tie(moved_new_inst, moved_set) =
+ FindSingleInstructions<HNewInstance, HInstanceFieldSet>(graph_, left->GetSinglePredecessor());
+ std::vector<HPredicatedInstanceFieldGet*> pred_gets;
+ std::vector<HInstanceFieldSet*> pred_sets;
+ std::vector<HPhi*> return_phis;
+ std::tie(return_phis, pred_gets, pred_sets) =
+ FindAllInstructions<HPhi, HPredicatedInstanceFieldGet, HInstanceFieldSet>(graph_, breturn);
+ ASSERT_EQ(return_phis.size(), 2u);
+ HPhi* inst_phi = return_phis[0];
+ HPhi* val_phi = return_phis[1];
+ if (inst_phi->GetType() != DataType::Type::kReference) {
+ std::swap(inst_phi, val_phi);
+ }
+ ASSERT_NE(moved_new_inst, nullptr);
+ EXPECT_INS_EQ(inst_phi->InputAt(0), moved_new_inst);
+ EXPECT_INS_EQ(inst_phi->InputAt(1), graph_->GetNullConstant());
+ EXPECT_INS_EQ(val_phi->InputAt(0), graph_->GetIntConstant(0));
+ EXPECT_INS_EQ(val_phi->InputAt(1), c15);
+ ASSERT_EQ(pred_gets.size(), 1u);
+ ASSERT_EQ(pred_sets.size(), 0u);
+ ASSERT_NE(moved_set, nullptr);
+ EXPECT_INS_EQ(moved_set->InputAt(0), moved_new_inst);
+ EXPECT_INS_EQ(moved_set->InputAt(1), c15);
+ EXPECT_INS_RETAINED(call_left);
+ // store removed or moved.
+ EXPECT_NE(store->GetBlock(), entry);
+ // New-inst removed or moved.
+ EXPECT_NE(new_inst->GetBlock(), entry);
+ EXPECT_INS_REMOVED(a_val);
+ EXPECT_INS_REMOVED(b_val);
+ EXPECT_INS_REMOVED(c_val);
+ EXPECT_INS_RETAINED(a_noescape);
+ EXPECT_INS_RETAINED(b_noescape);
+ EXPECT_INS_RETAINED(c_noescape);
+ EXPECT_INS_EQ(add_1_exit->InputAt(0), c12);
+ EXPECT_INS_EQ(add_1_exit->InputAt(1), c13);
+ EXPECT_INS_EQ(add_2_exit->InputAt(0), c14);
+ EXPECT_INS_EQ(add_2_exit->InputAt(1), add_1_exit);
+ EXPECT_INS_EQ(add_3_exit->InputAt(0), pred_gets[0]);
+ EXPECT_INS_EQ(pred_gets[0]->GetDefaultValue(), val_phi);
+ EXPECT_INS_EQ(add_3_exit->InputAt(1), add_2_exit);
+ EXPECT_EQ(a_noescape->GetEnvironment()->Size(), 2u);
+ EXPECT_INS_EQ(a_noescape->GetEnvironment()->GetInstructionAt(0), graph_->GetNullConstant());
+ EXPECT_INS_EQ(a_noescape->GetEnvironment()->GetInstructionAt(1), c12);
+ EXPECT_EQ(b_noescape->GetEnvironment()->Size(), 3u);
+ EXPECT_INS_EQ(b_noescape->GetEnvironment()->GetInstructionAt(0), graph_->GetNullConstant());
+ EXPECT_INS_EQ(b_noescape->GetEnvironment()->GetInstructionAt(1), c12);
+ EXPECT_INS_EQ(b_noescape->GetEnvironment()->GetInstructionAt(2), c13);
+ EXPECT_EQ(c_noescape->GetEnvironment()->Size(), 4u);
+ EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(0), graph_->GetNullConstant());
+ EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(1), c12);
+ EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(2), c13);
+ EXPECT_INS_EQ(c_noescape->GetEnvironment()->GetInstructionAt(3), c14);
+}
+
+// // ENTRY
+// // To be moved
+// obj = new Obj();
+// // Transforms required for creation non-trivial and unimportant
+// if (parameter_value) {
+// obj.foo = 10
+// } else {
+// obj.foo = 12;
+// }
+// if (parameter_value_2) {
+// escape(obj);
+// }
+// EXIT
+TEST_F(LoadStoreEliminationTest, MovePredicatedAlloc2) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left_set"},
+ {"entry", "right_set"},
+ {"left_set", "merge_crit_break"},
+ {"right_set", "merge_crit_break"},
+ {"merge_crit_break", "merge"},
+ {"merge", "escape"},
+ {"escape", "breturn"},
+ {"merge", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left_set);
+ GET_BLOCK(right_set);
+ GET_BLOCK(merge);
+ GET_BLOCK(merge_crit_break);
+ GET_BLOCK(escape);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {merge, escape});
+ EnsurePredecessorOrder(merge_crit_break, {left_set, right_set});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* bool_value_2 = MakeParam(DataType::Type::kBool);
+ HInstruction* c10 = graph_->GetIntConstant(10);
+ HInstruction* c12 = graph_->GetIntConstant(12);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* store_left = MakeIFieldSet(new_inst, c10, MemberOffset(32));
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left_set->AddInstruction(store_left);
+ left_set->AddInstruction(goto_left);
+
+ HInstruction* store_right = MakeIFieldSet(new_inst, c12, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right_set->AddInstruction(store_right);
+ right_set->AddInstruction(goto_right);
+
+ merge_crit_break->AddInstruction(new (GetAllocator()) HGoto());
+ HInstruction* if_merge = new (GetAllocator()) HIf(bool_value_2);
+ merge->AddInstruction(if_merge);
+
+ HInstruction* escape_instruction = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* escape_goto = new (GetAllocator()) HGoto();
+ escape->AddInstruction(escape_instruction);
+ escape->AddInstruction(escape_goto);
+ escape_instruction->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* return_exit = new (GetAllocator()) HReturnVoid();
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ HNewInstance* moved_new_inst;
+ HInstanceFieldSet* moved_set;
+ std::tie(moved_new_inst, moved_set) =
+ FindSingleInstructions<HNewInstance, HInstanceFieldSet>(graph_);
+ HPhi* merge_phi = FindSingleInstruction<HPhi>(graph_, merge_crit_break);
+ HPhi* alloc_phi = FindSingleInstruction<HPhi>(graph_, breturn);
+ EXPECT_INS_EQ(moved_new_inst, moved_set->InputAt(0));
+ ASSERT_NE(alloc_phi, nullptr);
+ EXPECT_EQ(alloc_phi->InputAt(0), graph_->GetNullConstant())
+ << alloc_phi->GetBlock()->GetPredecessors()[0]->GetBlockId() << " " << *alloc_phi;
+ EXPECT_TRUE(alloc_phi->InputAt(1)->IsNewInstance()) << *alloc_phi;
+ ASSERT_NE(merge_phi, nullptr);
+ EXPECT_EQ(merge_phi->InputCount(), 2u);
+ EXPECT_INS_EQ(merge_phi->InputAt(0), c10);
+ EXPECT_INS_EQ(merge_phi->InputAt(1), c12);
+ EXPECT_TRUE(merge_phi->GetUses().HasExactlyOneElement());
+ EXPECT_INS_EQ(merge_phi->GetUses().front().GetUser(), moved_set);
+ EXPECT_INS_RETAINED(escape_instruction);
+ EXPECT_INS_EQ(escape_instruction->InputAt(0), moved_new_inst);
+ // store removed or moved.
+ EXPECT_NE(store_left->GetBlock(), left_set);
+ EXPECT_NE(store_right->GetBlock(), left_set);
+ // New-inst removed or moved.
+ EXPECT_NE(new_inst->GetBlock(), entry);
+}
+
+// // ENTRY
+// // To be moved
+// obj = new Obj();
+// switch(args) {
+// default:
+// return obj.a;
+// case b:
+// obj.a = 5; break;
+// case c:
+// obj.b = 4; break;
+// }
+// escape(obj);
+// return obj.a;
+// EXIT
+TEST_F(LoadStoreEliminationTest, MovePredicatedAlloc3) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "early_return"},
+ {"entry", "set_one"},
+ {"entry", "set_two"},
+ {"early_return", "exit"},
+ {"set_one", "escape"},
+ {"set_two", "escape"},
+ {"escape", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(escape);
+ GET_BLOCK(early_return);
+ GET_BLOCK(set_one);
+ GET_BLOCK(set_two);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(escape, {set_one, set_two});
+ HInstruction* int_val = MakeParam(DataType::Type::kInt32);
+ HInstruction* c0 = graph_->GetIntConstant(0);
+ HInstruction* c4 = graph_->GetIntConstant(4);
+ HInstruction* c5 = graph_->GetIntConstant(5);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, int_val);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(switch_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* store_one = MakeIFieldSet(new_inst, c4, MemberOffset(32));
+ HInstruction* goto_one = new (GetAllocator()) HGoto();
+ set_one->AddInstruction(store_one);
+ set_one->AddInstruction(goto_one);
+
+ HInstruction* store_two = MakeIFieldSet(new_inst, c5, MemberOffset(32));
+ HInstruction* goto_two = new (GetAllocator()) HGoto();
+ set_two->AddInstruction(store_two);
+ set_two->AddInstruction(goto_two);
+
+ HInstruction* read_early = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_early = new (GetAllocator()) HReturn(read_early);
+ early_return->AddInstruction(read_early);
+ early_return->AddInstruction(return_early);
+
+ HInstruction* escape_instruction = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* read_escape = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_escape = new (GetAllocator()) HReturn(read_escape);
+ escape->AddInstruction(escape_instruction);
+ escape->AddInstruction(read_escape);
+ escape->AddInstruction(return_escape);
+ escape_instruction->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ // Each escaping switch path gets its own materialization block.
+ // Blocks:
+ // early_return(5) -> [exit(4)]
+ // entry(3) -> [early_return(5), <Unnamed>(9), <Unnamed>(10)]
+ // escape(8) -> [exit(4)]
+ // exit(4) -> []
+ // set_one(6) -> [escape(8)]
+ // set_two(7) -> [escape(8)]
+ // <Unnamed>(10) -> [set_two(7)]
+ // <Unnamed>(9) -> [set_one(6)]
+ HBasicBlock* materialize_one = set_one->GetSinglePredecessor();
+ HBasicBlock* materialize_two = set_two->GetSinglePredecessor();
+ HNewInstance* materialization_ins_one =
+ FindSingleInstruction<HNewInstance>(graph_, materialize_one);
+ HNewInstance* materialization_ins_two =
+ FindSingleInstruction<HNewInstance>(graph_, materialize_two);
+ HPhi* new_phi = FindSingleInstruction<HPhi>(graph_, escape);
+ EXPECT_NE(materialization_ins_one, nullptr);
+ EXPECT_NE(materialization_ins_two, nullptr);
+ EXPECT_EQ(materialization_ins_one, new_phi->InputAt(0))
+ << *materialization_ins_one << " vs " << *new_phi;
+ EXPECT_EQ(materialization_ins_two, new_phi->InputAt(1))
+ << *materialization_ins_two << " vs " << *new_phi;
+
+ EXPECT_INS_RETAINED(escape_instruction);
+ EXPECT_INS_RETAINED(read_escape);
+ EXPECT_EQ(read_escape->InputAt(0), new_phi) << *new_phi << " vs " << *read_escape->InputAt(0);
+ EXPECT_EQ(store_one->InputAt(0), materialization_ins_one);
+ EXPECT_EQ(store_two->InputAt(0), materialization_ins_two);
+ EXPECT_EQ(escape_instruction->InputAt(0), new_phi);
+ EXPECT_INS_REMOVED(read_early);
+ EXPECT_EQ(return_early->InputAt(0), c0);
+}
+
+// // ENTRY
+// // To be moved
+// obj = new Obj();
+// switch(args) {
+// case a:
+// // set_one_and_escape
+// obj.a = 5;
+// escape(obj);
+// // FALLTHROUGH
+// case c:
+// // set_two
+// obj.a = 4; break;
+// default:
+// return obj.a;
+// }
+// escape(obj);
+// return obj.a;
+// EXIT
+TEST_F(LoadStoreEliminationTest, MovePredicatedAlloc4) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ // Break the critical edge between entry and set_two with the
+ // set_two_critical_break node. Graph simplification would do this for us if
+ // we didn't do it manually. This way we have a nice-name for debugging and
+ // testing.
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "early_return"},
+ {"entry", "set_one_and_escape"},
+ {"entry", "set_two_critical_break"},
+ {"set_two_critical_break", "set_two"},
+ {"early_return", "exit"},
+ {"set_one_and_escape", "set_two"},
+ {"set_two", "escape"},
+ {"escape", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(escape);
+ GET_BLOCK(early_return);
+ GET_BLOCK(set_one_and_escape);
+ GET_BLOCK(set_two);
+ GET_BLOCK(set_two_critical_break);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(set_two, {set_one_and_escape, set_two_critical_break});
+ HInstruction* int_val = MakeParam(DataType::Type::kInt32);
+ HInstruction* c0 = graph_->GetIntConstant(0);
+ HInstruction* c4 = graph_->GetIntConstant(4);
+ HInstruction* c5 = graph_->GetIntConstant(5);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, int_val);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(switch_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* store_one = MakeIFieldSet(new_inst, c4, MemberOffset(32));
+ HInstruction* escape_one = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_one = new (GetAllocator()) HGoto();
+ set_one_and_escape->AddInstruction(store_one);
+ set_one_and_escape->AddInstruction(escape_one);
+ set_one_and_escape->AddInstruction(goto_one);
+ escape_one->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_crit_break = new (GetAllocator()) HGoto();
+ set_two_critical_break->AddInstruction(goto_crit_break);
+
+ HInstruction* store_two = MakeIFieldSet(new_inst, c5, MemberOffset(32));
+ HInstruction* goto_two = new (GetAllocator()) HGoto();
+ set_two->AddInstruction(store_two);
+ set_two->AddInstruction(goto_two);
+
+ HInstruction* read_early = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_early = new (GetAllocator()) HReturn(read_early);
+ early_return->AddInstruction(read_early);
+ early_return->AddInstruction(return_early);
+
+ HInstruction* escape_instruction = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* read_escape = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_escape = new (GetAllocator()) HReturn(read_escape);
+ escape->AddInstruction(escape_instruction);
+ escape->AddInstruction(read_escape);
+ escape->AddInstruction(return_escape);
+ escape_instruction->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(read_early);
+ EXPECT_EQ(return_early->InputAt(0), c0);
+ // Each escaping switch path gets its own materialization block.
+ // Blocks:
+ // early_return(5) -> [exit(4)]
+ // entry(3) -> [early_return(5), <Unnamed>(10), <Unnamed>(11)]
+ // escape(9) -> [exit(4)]
+ // exit(4) -> []
+ // set_one_and_escape(6) -> [set_two(8)]
+ // set_two(8) -> [escape(9)]
+ // set_two_critical_break(7) -> [set_two(8)]
+ // <Unnamed>(11) -> [set_two_critical_break(7)]
+ // <Unnamed>(10) -> [set_one_and_escape(6)]
+ HBasicBlock* materialize_one = set_one_and_escape->GetSinglePredecessor();
+ HBasicBlock* materialize_two = set_two_critical_break->GetSinglePredecessor();
+ HNewInstance* materialization_ins_one =
+ FindSingleInstruction<HNewInstance>(graph_, materialize_one);
+ HNewInstance* materialization_ins_two =
+ FindSingleInstruction<HNewInstance>(graph_, materialize_two);
+ HPhi* new_phi = FindSingleInstruction<HPhi>(graph_, set_two);
+ ASSERT_NE(new_phi, nullptr);
+ ASSERT_NE(materialization_ins_one, nullptr);
+ ASSERT_NE(materialization_ins_two, nullptr);
+ EXPECT_INS_EQ(materialization_ins_one, new_phi->InputAt(0));
+ EXPECT_INS_EQ(materialization_ins_two, new_phi->InputAt(1));
+
+ EXPECT_INS_EQ(store_one->InputAt(0), materialization_ins_one);
+ EXPECT_INS_EQ(store_two->InputAt(0), new_phi) << *store_two << " vs " << *new_phi;
+ EXPECT_INS_EQ(escape_instruction->InputAt(0), new_phi);
+ EXPECT_INS_RETAINED(escape_one);
+ EXPECT_INS_EQ(escape_one->InputAt(0), materialization_ins_one);
+ EXPECT_INS_RETAINED(escape_instruction);
+ EXPECT_INS_RETAINED(read_escape);
+ EXPECT_EQ(read_escape->InputAt(0), new_phi) << *new_phi << " vs " << *read_escape->InputAt(0);
+}
+
+// // ENTRY
+// // To be moved
+// obj = new Obj();
+// switch(args) {
+// case a:
+// // set_one
+// obj.a = 5;
+// // nb passthrough
+// case c:
+// // set_two_and_escape
+// obj.a += 4;
+// escape(obj);
+// break;
+// default:
+// obj.a = 10;
+// }
+// return obj.a;
+// EXIT
+TEST_F(LoadStoreEliminationTest, MovePredicatedAlloc5) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ // Break the critical edge between entry and set_two with the
+ // set_two_critical_break node. Graph simplification would do this for us if
+ // we didn't do it manually. This way we have a nice-name for debugging and
+ // testing.
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "set_noescape"},
+ {"entry", "set_one"},
+ {"entry", "set_two_critical_break"},
+ {"set_two_critical_break", "set_two_and_escape"},
+ {"set_noescape", "breturn"},
+ {"set_one", "set_two_and_escape"},
+ {"set_two_and_escape", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(set_noescape);
+ GET_BLOCK(set_one);
+ GET_BLOCK(set_two_and_escape);
+ GET_BLOCK(set_two_critical_break);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(set_two_and_escape, {set_one, set_two_critical_break});
+ EnsurePredecessorOrder(breturn, {set_two_and_escape, set_noescape});
+ HInstruction* int_val = MakeParam(DataType::Type::kInt32);
+ HInstruction* c0 = graph_->GetIntConstant(0);
+ HInstruction* c4 = graph_->GetIntConstant(4);
+ HInstruction* c5 = graph_->GetIntConstant(5);
+ HInstruction* c10 = graph_->GetIntConstant(10);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, int_val);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(switch_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* store_one = MakeIFieldSet(new_inst, c5, MemberOffset(32));
+ HInstruction* goto_one = new (GetAllocator()) HGoto();
+ set_one->AddInstruction(store_one);
+ set_one->AddInstruction(goto_one);
+
+ HInstruction* goto_crit_break = new (GetAllocator()) HGoto();
+ set_two_critical_break->AddInstruction(goto_crit_break);
+
+ HInstruction* get_two = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* add_two = new (GetAllocator()) HAdd(DataType::Type::kInt32, get_two, c4);
+ HInstruction* store_two = MakeIFieldSet(new_inst, add_two, MemberOffset(32));
+ HInstruction* escape_two = MakeInvoke(DataType::Type::kVoid, {new_inst});
+ HInstruction* goto_two = new (GetAllocator()) HGoto();
+ set_two_and_escape->AddInstruction(get_two);
+ set_two_and_escape->AddInstruction(add_two);
+ set_two_and_escape->AddInstruction(store_two);
+ set_two_and_escape->AddInstruction(escape_two);
+ set_two_and_escape->AddInstruction(goto_two);
+ escape_two->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* store_noescape = MakeIFieldSet(new_inst, c10, MemberOffset(32));
+ HInstruction* goto_noescape = new (GetAllocator()) HGoto();
+ set_noescape->AddInstruction(store_noescape);
+ set_noescape->AddInstruction(goto_noescape);
+
+ HInstruction* read_breturn = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_breturn = new (GetAllocator()) HReturn(read_breturn);
+ breturn->AddInstruction(read_breturn);
+ breturn->AddInstruction(return_breturn);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ // Normal LSE can get rid of these two.
+ EXPECT_INS_REMOVED(store_one);
+ EXPECT_INS_REMOVED(get_two);
+ EXPECT_INS_RETAINED(add_two);
+ EXPECT_TRUE(add_two->InputAt(0)->IsPhi());
+ EXPECT_INS_EQ(add_two->InputAt(0)->InputAt(0), c5);
+ EXPECT_INS_EQ(add_two->InputAt(0)->InputAt(1), c0);
+ EXPECT_INS_EQ(add_two->InputAt(1), c4);
+
+ HBasicBlock* materialize_one = set_one->GetSinglePredecessor();
+ HBasicBlock* materialize_two = set_two_critical_break->GetSinglePredecessor();
+ HNewInstance* materialization_ins_one =
+ FindSingleInstruction<HNewInstance>(graph_, materialize_one);
+ HNewInstance* materialization_ins_two =
+ FindSingleInstruction<HNewInstance>(graph_, materialize_two);
+ std::vector<HPhi*> phis;
+ std::tie(phis) = FindAllInstructions<HPhi>(graph_, set_two_and_escape);
+ HPhi* new_phi = FindOrNull(
+ phis.begin(), phis.end(), [&](auto p) { return p->GetType() == DataType::Type::kReference; });
+ ASSERT_NE(new_phi, nullptr);
+ ASSERT_NE(materialization_ins_one, nullptr);
+ ASSERT_NE(materialization_ins_two, nullptr);
+ EXPECT_INS_EQ(materialization_ins_one, new_phi->InputAt(0));
+ EXPECT_INS_EQ(materialization_ins_two, new_phi->InputAt(1));
+
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ EXPECT_TRUE(pred_get->GetTarget()->IsPhi());
+ EXPECT_INS_EQ(pred_get->GetTarget()->InputAt(0), new_phi);
+ EXPECT_INS_EQ(pred_get->GetTarget()->InputAt(1), graph_->GetNullConstant());
+
+ EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(0), c0);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(1), c10);
}
// // ENTRY
@@ -2421,117 +4171,59 @@
// }
// EXIT
TEST_F(LoadStoreEliminationTest, PartialLoadElimination3) {
- InitGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blks(SetupFromAdjacencyList(
"entry",
"exit",
- { { "entry", "left" }, { "entry", "right" }, { "left", "exit" }, { "right", "exit" } }));
+ {{"entry", "left"}, {"entry", "right"}, {"left", "exit"}, {"right", "exit"}}));
#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
GET_BLOCK(entry);
GET_BLOCK(exit);
GET_BLOCK(left);
GET_BLOCK(right);
#undef GET_BLOCK
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(if_inst);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_left = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- HInstruction* read_left = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* read_left = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_left = new (GetAllocator()) HReturn(read_left);
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
left->AddInstruction(write_left);
left->AddInstruction(call_left);
left->AddInstruction(read_left);
left->AddInstruction(return_left);
call_left->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* read_right = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* read_right = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_right = new (GetAllocator()) HReturn(read_right);
right->AddInstruction(write_right);
right->AddInstruction(read_right);
right->AddInstruction(return_right);
- HInstruction* exit_instruction = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_instruction);
+ SetupExit(exit);
+
// PerformLSE expects this to be empty.
graph_->ClearDominanceInformation();
PerformLSE();
- EXPECT_TRUE(IsRemoved(read_right));
- EXPECT_TRUE(IsRemoved(write_right));
- EXPECT_FALSE(IsRemoved(write_left));
- EXPECT_FALSE(IsRemoved(call_left));
- EXPECT_FALSE(IsRemoved(read_left));
+ EXPECT_INS_REMOVED(read_right);
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_RETAINED(write_left);
+ EXPECT_INS_RETAINED(call_left);
+ EXPECT_INS_RETAINED(read_left);
}
// // ENTRY
@@ -2556,17 +4248,18 @@
// }
// EXIT
TEST_F(LoadStoreEliminationTest, PartialLoadElimination4) {
- InitGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
"exit",
- { { "entry", "entry_post" },
- { "entry_post", "right" },
- { "right", "exit" },
- { "entry_post", "left_pre" },
- { "left_pre", "left_loop" },
- { "left_loop", "left_loop" },
- { "left_loop", "left_finish" },
- { "left_finish", "exit" } }));
+ {{"entry", "entry_post"},
+ {"entry_post", "right"},
+ {"right", "exit"},
+ {"entry_post", "left_pre"},
+ {"left_pre", "left_loop"},
+ {"left_loop", "left_loop"},
+ {"left_loop", "left_finish"},
+ {"left_finish", "exit"}}));
#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
GET_BLOCK(entry);
GET_BLOCK(entry_post);
@@ -2580,75 +4273,32 @@
if (left_loop->GetSuccessors()[0] != left_finish) {
left_loop->SwapSuccessors();
}
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* goto_entry = new (GetAllocator()) HGoto();
- entry->AddInstruction(bool_value);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(goto_entry);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
entry_post->AddInstruction(if_inst);
- HInstruction* write_left_pre = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_left_pre = MakeIFieldSet(new_inst, c1, MemberOffset(32));
HInstruction* goto_left_pre = new (GetAllocator()) HGoto();
left_pre->AddInstruction(write_left_pre);
left_pre->AddInstruction(goto_left_pre);
HInstruction* suspend_left_loop = new (GetAllocator()) HSuspendCheck();
- HInstruction* call_left_loop = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kBool,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- HInstruction* write_left_loop = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c3,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* call_left_loop = MakeInvoke(DataType::Type::kBool, { new_inst });
+ HInstruction* write_left_loop = MakeIFieldSet(new_inst, c3, MemberOffset(32));
HInstruction* if_left_loop = new (GetAllocator()) HIf(call_left_loop);
- call_left_loop->AsInvoke()->SetRawInputAt(0, new_inst);
left_loop->AddInstruction(suspend_left_loop);
left_loop->AddInstruction(call_left_loop);
left_loop->AddInstruction(write_left_loop);
@@ -2656,55 +4306,30 @@
suspend_left_loop->CopyEnvironmentFrom(cls->GetEnvironment());
call_left_loop->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* read_left_end = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* read_left_end = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_left_end = new (GetAllocator()) HReturn(read_left_end);
left_finish->AddInstruction(read_left_end);
left_finish->AddInstruction(return_left_end);
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* read_right = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* read_right = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_right = new (GetAllocator()) HReturn(read_right);
right->AddInstruction(write_right);
right->AddInstruction(read_right);
right->AddInstruction(return_right);
- HInstruction* exit_instruction = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_instruction);
+ SetupExit(exit);
+
// PerformLSE expects this to be empty.
graph_->ClearDominanceInformation();
PerformLSE();
- EXPECT_FALSE(IsRemoved(write_left_pre));
- EXPECT_TRUE(IsRemoved(read_right));
- EXPECT_TRUE(IsRemoved(write_right));
- EXPECT_FALSE(IsRemoved(write_left_loop));
- EXPECT_FALSE(IsRemoved(call_left_loop));
- EXPECT_TRUE(IsRemoved(read_left_end));
+ EXPECT_INS_RETAINED(write_left_pre);
+ EXPECT_INS_REMOVED(read_right);
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_RETAINED(write_left_loop);
+ EXPECT_INS_RETAINED(call_left_loop);
+ EXPECT_INS_REMOVED(read_left_end);
}
// // ENTRY
@@ -2725,14 +4350,15 @@
// ELIMINATE
// return obj.field
TEST_F(LoadStoreEliminationTest, PartialLoadElimination5) {
- InitGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
"exit",
- { { "entry", "left" },
- { "entry", "right" },
- { "left", "breturn" },
- { "right", "breturn" },
- { "breturn", "exit" } }));
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
GET_BLOCK(entry);
GET_BLOCK(exit);
@@ -2740,112 +4366,51 @@
GET_BLOCK(left);
GET_BLOCK(right);
#undef GET_BLOCK
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(if_inst);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- HInstruction* write_left = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
left->AddInstruction(call_left);
left->AddInstruction(write_left);
left->AddInstruction(goto_left);
call_left->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call_right = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 0,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* call_right = MakeInvoke(DataType::Type::kVoid, {});
HInstruction* goto_right = new (GetAllocator()) HGoto();
right->AddInstruction(write_right);
right->AddInstruction(call_right);
right->AddInstruction(goto_right);
call_right->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* read_bottom = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
breturn->AddInstruction(read_bottom);
breturn->AddInstruction(return_exit);
- HInstruction* exit_instruction = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_instruction);
+ SetupExit(exit);
+
// PerformLSE expects this to be empty.
graph_->ClearDominanceInformation();
PerformLSE();
- EXPECT_TRUE(IsRemoved(read_bottom));
- EXPECT_TRUE(IsRemoved(write_right));
- EXPECT_FALSE(IsRemoved(write_left));
- EXPECT_FALSE(IsRemoved(call_left));
- EXPECT_FALSE(IsRemoved(call_right));
+ EXPECT_INS_REMOVED(read_bottom);
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_RETAINED(write_left);
+ EXPECT_INS_RETAINED(call_left);
+ EXPECT_INS_RETAINED(call_right);
}
// // ENTRY
@@ -2866,16 +4431,17 @@
// }
// EXIT
// ELIMINATE
-// return obj.field
+// return obj.fid
TEST_F(LoadStoreEliminationTest, PartialLoadElimination6) {
- InitGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
"exit",
- { { "entry", "left" },
- { "entry", "right" },
- { "left", "breturn" },
- { "right", "breturn" },
- { "breturn", "exit" } }));
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
GET_BLOCK(entry);
GET_BLOCK(exit);
@@ -2883,138 +4449,59 @@
GET_BLOCK(left);
GET_BLOCK(right);
#undef GET_BLOCK
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
HInstruction* c3 = graph_->GetIntConstant(3);
HInstruction* c5 = graph_->GetIntConstant(5);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
- HInstruction* write_entry = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c3,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call_entry = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 0,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* call_entry = MakeInvoke(DataType::Type::kVoid, {});
HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(write_entry);
entry->AddInstruction(call_entry);
entry->AddInstruction(if_inst);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
call_entry->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_left_start = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c5,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- HInstruction* write_left = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_left_start = MakeIFieldSet(new_inst, c5, MemberOffset(32));
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
left->AddInstruction(write_left_start);
left->AddInstruction(call_left);
left->AddInstruction(write_left);
left->AddInstruction(goto_left);
call_left->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
HInstruction* goto_right = new (GetAllocator()) HGoto();
right->AddInstruction(write_right);
right->AddInstruction(goto_right);
- HInstruction* read_bottom = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
breturn->AddInstruction(read_bottom);
breturn->AddInstruction(return_exit);
- HInstruction* exit_instruction = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_instruction);
+ SetupExit(exit);
+
// PerformLSE expects this to be empty.
graph_->ClearDominanceInformation();
PerformLSE();
- EXPECT_TRUE(IsRemoved(read_bottom));
- EXPECT_TRUE(IsRemoved(write_right));
- EXPECT_TRUE(IsRemoved(write_entry));
- EXPECT_FALSE(IsRemoved(write_left_start));
- EXPECT_FALSE(IsRemoved(write_left));
- EXPECT_FALSE(IsRemoved(call_left));
- EXPECT_FALSE(IsRemoved(call_entry));
+ EXPECT_INS_REMOVED(read_bottom);
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_REMOVED(write_entry);
+ EXPECT_INS_RETAINED(write_left_start);
+ EXPECT_INS_RETAINED(write_left);
+ EXPECT_INS_RETAINED(call_left);
+ EXPECT_INS_RETAINED(call_entry);
}
// // ENTRY
@@ -3038,18 +4525,19 @@
// return obj.field;
// EXIT
TEST_F(LoadStoreEliminationTest, PartialLoadPreserved3) {
- InitGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
"exit",
- { { "entry", "entry_post" },
- { "entry_post", "right" },
- { "right", "return_block" },
- { "entry_post", "left_pre" },
- { "left_pre", "left_loop" },
- { "left_loop", "left_loop_post" },
- { "left_loop_post", "left_loop" },
- { "left_loop", "return_block" },
- { "return_block", "exit" } }));
+ {{"entry", "entry_post"},
+ {"entry_post", "right"},
+ {"right", "return_block"},
+ {"entry_post", "left_pre"},
+ {"left_pre", "left_loop"},
+ {"left_loop", "left_loop_post"},
+ {"left_loop_post", "left_loop"},
+ {"left_loop", "return_block"},
+ {"return_block", "exit"}}));
#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
GET_BLOCK(entry);
GET_BLOCK(entry_post);
@@ -3064,123 +4552,63 @@
if (left_loop->GetSuccessors()[0] != return_block) {
left_loop->SwapSuccessors();
}
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* goto_entry = new (GetAllocator()) HGoto();
- entry->AddInstruction(bool_value);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(goto_entry);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
entry_post->AddInstruction(if_inst);
- HInstruction* write_left_pre = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_left_pre = MakeIFieldSet(new_inst, c1, MemberOffset(32));
HInstruction* goto_left_pre = new (GetAllocator()) HGoto();
left_pre->AddInstruction(write_left_pre);
left_pre->AddInstruction(goto_left_pre);
HInstruction* suspend_left_loop = new (GetAllocator()) HSuspendCheck();
- HInstruction* call_left_loop = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kBool,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* call_left_loop = MakeInvoke(DataType::Type::kBool, { new_inst });
HInstruction* if_left_loop = new (GetAllocator()) HIf(call_left_loop);
- call_left_loop->AsInvoke()->SetRawInputAt(0, new_inst);
left_loop->AddInstruction(suspend_left_loop);
left_loop->AddInstruction(call_left_loop);
left_loop->AddInstruction(if_left_loop);
suspend_left_loop->CopyEnvironmentFrom(cls->GetEnvironment());
call_left_loop->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_left_loop = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c3,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_left_loop = MakeIFieldSet(new_inst, c3, MemberOffset(32));
HInstruction* goto_left_loop = new (GetAllocator()) HGoto();
left_loop_post->AddInstruction(write_left_loop);
left_loop_post->AddInstruction(goto_left_loop);
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
HInstruction* goto_right = new (GetAllocator()) HGoto();
right->AddInstruction(write_right);
right->AddInstruction(goto_right);
- HInstruction* read_return = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* read_return = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_final = new (GetAllocator()) HReturn(read_return);
return_block->AddInstruction(read_return);
return_block->AddInstruction(return_final);
- HInstruction* exit_instruction = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_instruction);
+ SetupExit(exit);
+
// PerformLSE expects this to be empty.
graph_->ClearDominanceInformation();
- PerformLSE();
+ PerformLSENoPartial();
- EXPECT_FALSE(IsRemoved(write_left_pre));
- EXPECT_FALSE(IsRemoved(read_return));
- EXPECT_FALSE(IsRemoved(write_right));
- EXPECT_FALSE(IsRemoved(write_left_loop));
- EXPECT_FALSE(IsRemoved(call_left_loop));
+ EXPECT_INS_RETAINED(write_left_pre) << *write_left_pre;
+ EXPECT_INS_RETAINED(read_return) << *read_return;
+ EXPECT_INS_RETAINED(write_right) << *write_right;
+ EXPECT_INS_RETAINED(write_left_loop) << *write_left_loop;
+ EXPECT_INS_RETAINED(call_left_loop) << *call_left_loop;
}
// // ENTRY
@@ -3205,17 +4633,18 @@
// return obj.field;
// EXIT
TEST_F(LoadStoreEliminationTest, PartialLoadPreserved4) {
- InitGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
"exit",
- { { "entry", "entry_post" },
- { "entry_post", "right" },
- { "right", "return_block" },
- { "entry_post", "left_pre" },
- { "left_pre", "left_loop" },
- { "left_loop", "left_loop" },
- { "left_loop", "return_block" },
- { "return_block", "exit" } }));
+ {{"entry", "entry_post"},
+ {"entry_post", "right"},
+ {"right", "return_block"},
+ {"entry_post", "left_pre"},
+ {"left_pre", "left_loop"},
+ {"left_loop", "left_loop"},
+ {"left_loop", "return_block"},
+ {"return_block", "exit"}}));
#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
GET_BLOCK(entry);
GET_BLOCK(entry_post);
@@ -3229,73 +4658,31 @@
if (left_loop->GetSuccessors()[0] != return_block) {
left_loop->SwapSuccessors();
}
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* goto_entry = new (GetAllocator()) HGoto();
- entry->AddInstruction(bool_value);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(goto_entry);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
entry_post->AddInstruction(if_inst);
- HInstruction* write_left_pre = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_left_pre = MakeIFieldSet(new_inst, c1, MemberOffset(32));
HInstruction* goto_left_pre = new (GetAllocator()) HGoto();
left_pre->AddInstruction(write_left_pre);
left_pre->AddInstruction(goto_left_pre);
HInstruction* suspend_left_loop = new (GetAllocator()) HSuspendCheck();
- HInstruction* call_left_loop = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 0,
- DataType::Type::kBool,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- HInstruction* write_left_loop = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c3,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* call_left_loop = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* write_left_loop = MakeIFieldSet(new_inst, c3, MemberOffset(32));
HInstruction* if_left_loop = new (GetAllocator()) HIf(call_left_loop);
left_loop->AddInstruction(suspend_left_loop);
left_loop->AddInstruction(call_left_loop);
@@ -3304,59 +4691,31 @@
suspend_left_loop->CopyEnvironmentFrom(cls->GetEnvironment());
call_left_loop->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call_right = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kBool,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* call_right = MakeInvoke(DataType::Type::kBool, { new_inst });
HInstruction* goto_right = new (GetAllocator()) HGoto();
- call_right->AsInvoke()->SetRawInputAt(0, new_inst);
right->AddInstruction(write_right);
right->AddInstruction(call_right);
right->AddInstruction(goto_right);
call_right->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* read_return = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* read_return = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_final = new (GetAllocator()) HReturn(read_return);
return_block->AddInstruction(read_return);
return_block->AddInstruction(return_final);
- HInstruction* exit_instruction = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_instruction);
+ SetupExit(exit);
+
// PerformLSE expects this to be empty.
graph_->ClearDominanceInformation();
- PerformLSE();
+ PerformLSENoPartial();
- EXPECT_FALSE(IsRemoved(read_return));
- EXPECT_FALSE(IsRemoved(write_right));
- EXPECT_FALSE(IsRemoved(write_left_loop));
- EXPECT_FALSE(IsRemoved(call_left_loop));
- EXPECT_TRUE(IsRemoved(write_left_pre));
- EXPECT_FALSE(IsRemoved(call_right));
+ EXPECT_INS_RETAINED(read_return);
+ EXPECT_INS_RETAINED(write_right);
+ EXPECT_INS_RETAINED(write_left_loop);
+ EXPECT_INS_RETAINED(call_left_loop);
+ EXPECT_INS_REMOVED(write_left_pre);
+ EXPECT_INS_RETAINED(call_right);
}
// // ENTRY
@@ -3379,14 +4738,15 @@
// ELIMINATE
// return obj.field
TEST_F(LoadStoreEliminationTest, PartialLoadPreserved5) {
- InitGraph();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
"exit",
- { { "entry", "left" },
- { "entry", "right" },
- { "left", "breturn" },
- { "right", "breturn" },
- { "breturn", "exit" } }));
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
GET_BLOCK(entry);
GET_BLOCK(exit);
@@ -3394,67 +4754,23 @@
GET_BLOCK(left);
GET_BLOCK(right);
#undef GET_BLOCK
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(if_inst);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- HInstruction* write_left = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call2_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 0,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
+ HInstruction* call2_left = MakeInvoke(DataType::Type::kVoid, {});
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
left->AddInstruction(call_left);
left->AddInstruction(write_left);
left->AddInstruction(call2_left);
@@ -3462,57 +4778,30 @@
call_left->CopyEnvironmentFrom(cls->GetEnvironment());
call2_left->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call_right = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 0,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* call_right = MakeInvoke(DataType::Type::kVoid, {});
HInstruction* goto_right = new (GetAllocator()) HGoto();
right->AddInstruction(write_right);
right->AddInstruction(call_right);
right->AddInstruction(goto_right);
call_right->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* read_bottom = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
breturn->AddInstruction(read_bottom);
breturn->AddInstruction(return_exit);
- HInstruction* exit_instruction = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_instruction);
+ SetupExit(exit);
+
// PerformLSE expects this to be empty.
graph_->ClearDominanceInformation();
- PerformLSE();
+ PerformLSENoPartial();
- EXPECT_FALSE(IsRemoved(read_bottom));
- EXPECT_FALSE(IsRemoved(write_right));
- EXPECT_FALSE(IsRemoved(write_left));
- EXPECT_FALSE(IsRemoved(call_left));
- EXPECT_FALSE(IsRemoved(call_right));
+ EXPECT_INS_RETAINED(read_bottom);
+ EXPECT_INS_RETAINED(write_right);
+ EXPECT_INS_RETAINED(write_left);
+ EXPECT_INS_RETAINED(call_left);
+ EXPECT_INS_RETAINED(call_right);
}
// // ENTRY
@@ -3534,14 +4823,14 @@
// ELIMINATE
// return obj.field
TEST_F(LoadStoreEliminationTest, PartialLoadPreserved6) {
- InitGraph();
+ CreateGraph();
AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
"exit",
- { { "entry", "left" },
- { "entry", "right" },
- { "left", "breturn" },
- { "right", "breturn" },
- { "breturn", "exit" } }));
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
GET_BLOCK(entry);
GET_BLOCK(exit);
@@ -3549,125 +4838,3090 @@
GET_BLOCK(left);
GET_BLOCK(right);
#undef GET_BLOCK
- HInstruction* bool_value = new (GetAllocator())
- HParameterValue(graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kBool);
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* cls = new (GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- ScopedNullHandle<mirror::Class>(),
- false,
- 0,
- false);
- HInstruction* new_inst =
- new (GetAllocator()) HNewInstance(cls,
- 0,
- dex::TypeIndex(10),
- graph_->GetDexFile(),
- false,
- QuickEntrypointEnum::kQuickAllocObjectInitialized);
- HInstruction* write_entry = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c3,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
- HInstruction* call_entry = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 0,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* call_entry = MakeInvoke(DataType::Type::kVoid, {});
HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
- entry->AddInstruction(bool_value);
entry->AddInstruction(cls);
entry->AddInstruction(new_inst);
entry->AddInstruction(write_entry);
entry->AddInstruction(call_entry);
entry->AddInstruction(if_inst);
- ArenaVector<HInstruction*> current_locals({}, GetAllocator()->Adapter(kArenaAllocInstruction));
- ManuallyBuildEnvFor(cls, ¤t_locals);
+ ManuallyBuildEnvFor(cls, {});
new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
call_entry->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* call_left = new (GetAllocator())
- HInvokeStaticOrDirect(GetAllocator(),
- 1,
- DataType::Type::kVoid,
- 0,
- { nullptr, 0 },
- nullptr,
- {},
- InvokeType::kStatic,
- { nullptr, 0 },
- HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
- HInstruction* write_left = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* write_left = MakeIFieldSet(new_inst, c1, MemberOffset(32));
HInstruction* goto_left = new (GetAllocator()) HGoto();
- call_left->AsInvoke()->SetRawInputAt(0, new_inst);
left->AddInstruction(call_left);
left->AddInstruction(write_left);
left->AddInstruction(goto_left);
call_left->CopyEnvironmentFrom(cls->GetEnvironment());
- HInstruction* write_right = new (GetAllocator()) HInstanceFieldSet(new_inst,
- c2,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
HInstruction* goto_right = new (GetAllocator()) HGoto();
right->AddInstruction(write_right);
right->AddInstruction(goto_right);
- HInstruction* read_bottom = new (GetAllocator()) HInstanceFieldGet(new_inst,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(32),
- false,
- 0,
- 0,
- graph_->GetDexFile(),
- 0);
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
breturn->AddInstruction(read_bottom);
breturn->AddInstruction(return_exit);
- HInstruction* exit_instruction = new (GetAllocator()) HExit();
- exit->AddInstruction(exit_instruction);
+ SetupExit(exit);
+
// PerformLSE expects this to be empty.
graph_->ClearDominanceInformation();
+
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSENoPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(read_bottom);
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_RETAINED(write_entry);
+ EXPECT_INS_RETAINED(write_left);
+ EXPECT_INS_RETAINED(call_left);
+ EXPECT_INS_RETAINED(call_entry);
+}
+
+// // ENTRY
+// // MOVED TO MATERIALIZATION BLOCK
+// obj = new Obj();
+// ELIMINATE, moved to materialization block. Kept by escape.
+// obj.field = 3;
+// // Make sure this graph isn't broken
+// if (obj ==/!= (STATIC.VALUE|obj|null)) {
+// // partial_BLOCK
+// // REMOVE (either from unreachable or normal PHI creation)
+// obj.field = 4;
+// }
+// if (parameter_value) {
+// // LEFT
+// // DO NOT ELIMINATE
+// escape(obj);
+// } else {
+// // RIGHT
+// // ELIMINATE
+// obj.field = 2;
+// }
+// EXIT
+// PREDICATED GET
+// return obj.field
+TEST_P(PartialComparisonTestGroup, PartialComparisonBeforeCohort) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "critical_break"},
+ {"entry", "partial"},
+ {"partial", "merge"},
+ {"critical_break", "merge"},
+ {"merge", "left"},
+ {"merge", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(merge);
+ GET_BLOCK(partial);
+ GET_BLOCK(critical_break);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c4 = graph_->GetIntConstant(4);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ ComparisonInstructions cmp_instructions = GetComparisonInstructions(new_inst);
+ HInstruction* if_inst = new (GetAllocator()) HIf(cmp_instructions.cmp_);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_entry);
+ cmp_instructions.AddSetup(entry);
+ entry->AddInstruction(cmp_instructions.cmp_);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ cmp_instructions.AddEnvironment(cls->GetEnvironment());
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_partial = MakeIFieldSet(new_inst, c4, MemberOffset(32));
+ HInstruction* goto_partial = new (GetAllocator()) HGoto();
+ partial->AddInstruction(write_partial);
+ partial->AddInstruction(goto_partial);
+
+ HInstruction* goto_crit_break = new (GetAllocator()) HGoto();
+ critical_break->AddInstruction(goto_crit_break);
+
+ HInstruction* if_merge = new (GetAllocator()) HIf(bool_value);
+ merge->AddInstruction(if_merge);
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right);
+ right->AddInstruction(goto_right);
+
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ std::vector<HPhi*> merges;
+ HPredicatedInstanceFieldGet* pred_get;
+ HInstanceFieldSet* init_set;
+ std::tie(pred_get, init_set) =
+ FindSingleInstructions<HPredicatedInstanceFieldGet, HInstanceFieldSet>(graph_);
+ std::tie(merges) = FindAllInstructions<HPhi>(graph_);
+ ASSERT_EQ(merges.size(), 3u);
+ HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->GetBlock() == breturn;
+ });
+ HPhi* merge_value_top = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->GetBlock() != breturn;
+ });
+ HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference;
+ });
+ EXPECT_INS_REMOVED(read_bottom);
+ EXPECT_INS_REMOVED(write_entry);
+ EXPECT_INS_REMOVED(write_partial);
+ EXPECT_INS_RETAINED(call_left);
+ CheckFinalInstruction(if_inst->InputAt(0), ComparisonPlacement::kBeforeEscape);
+ EXPECT_INS_EQ(init_set->InputAt(1), merge_value_top);
+ EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return);
+}
+
+// // ENTRY
+// // MOVED TO MATERIALIZATION BLOCK
+// obj = new Obj();
+// ELIMINATE, moved to materialization block. Kept by escape.
+// obj.field = 3;
+// // Make sure this graph isn't broken
+// if (parameter_value) {
+// if (obj ==/!= (STATIC.VALUE|obj|null)) {
+// // partial_BLOCK
+// obj.field = 4;
+// }
+// // LEFT
+// // DO NOT ELIMINATE
+// escape(obj);
+// } else {
+// // RIGHT
+// // ELIMINATE
+// obj.field = 2;
+// }
+// EXIT
+// PREDICATED GET
+// return obj.field
+TEST_P(PartialComparisonTestGroup, PartialComparisonInCohortBeforeEscape) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left_begin"},
+ {"left_begin", "partial"},
+ {"left_begin", "left_crit_break"},
+ {"left_crit_break", "left"},
+ {"partial", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(partial);
+ GET_BLOCK(left_begin);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(left_crit_break);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(left, {left_crit_break, partial});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c4 = graph_->GetIntConstant(4);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_entry);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ ComparisonInstructions cmp_instructions = GetComparisonInstructions(new_inst);
+ HInstruction* if_left_begin = new (GetAllocator()) HIf(cmp_instructions.cmp_);
+ cmp_instructions.AddSetup(left_begin);
+ left_begin->AddInstruction(cmp_instructions.cmp_);
+ left_begin->AddInstruction(if_left_begin);
+ cmp_instructions.AddEnvironment(cls->GetEnvironment());
+
+ left_crit_break->AddInstruction(new (GetAllocator()) HGoto());
+
+ HInstruction* write_partial = MakeIFieldSet(new_inst, c4, MemberOffset(32));
+ HInstruction* goto_partial = new (GetAllocator()) HGoto();
+ partial->AddInstruction(write_partial);
+ partial->AddInstruction(goto_partial);
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right);
+ right->AddInstruction(goto_right);
+
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ std::vector<HPhi*> merges;
+ HInstanceFieldSet* init_set =
+ FindSingleInstruction<HInstanceFieldSet>(graph_, left_begin->GetSinglePredecessor());
+ HInstanceFieldSet* partial_set = FindSingleInstruction<HInstanceFieldSet>(graph_, partial);
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_);
+ std::tie(merges) = FindAllInstructions<HPhi>(graph_);
+ ASSERT_EQ(merges.size(), 2u);
+ HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32;
+ });
+ HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference;
+ });
+ EXPECT_EQ(merge_value_return->GetBlock(), breturn)
+ << blks.GetName(merge_value_return->GetBlock());
+ EXPECT_INS_REMOVED(read_bottom);
+ EXPECT_INS_REMOVED(write_entry);
+ EXPECT_INS_RETAINED(write_partial);
+ EXPECT_INS_RETAINED(call_left);
+ CheckFinalInstruction(if_left_begin->InputAt(0), ComparisonPlacement::kInEscape);
+ EXPECT_INS_EQ(init_set->InputAt(1), c3);
+ EXPECT_INS_EQ(partial_set->InputAt(0), init_set->InputAt(0));
+ EXPECT_INS_EQ(partial_set->InputAt(1), c4);
+ EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return);
+}
+
+// // ENTRY
+// // MOVED TO MATERIALIZATION BLOCK
+// obj = new Obj();
+// ELIMINATE, moved to materialization block. Kept by escape.
+// obj.field = 3;
+// // Make sure this graph isn't broken
+// if (parameter_value) {
+// // LEFT
+// // DO NOT ELIMINATE
+// escape(obj);
+// } else {
+// // RIGHT
+// // ELIMINATE
+// obj.field = 2;
+// }
+// if (obj ==/!= (STATIC.VALUE|obj|null)) {
+// // partial_BLOCK
+// obj.field = 4;
+// }
+// EXIT
+// PREDICATED GET
+// return obj.field
+TEST_P(PartialComparisonTestGroup, PartialComparisonAfterCohort) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "merge"},
+ {"right", "merge"},
+ {"merge", "critical_break"},
+ {"critical_break", "breturn"},
+ {"merge", "partial"},
+ {"partial", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(partial);
+ GET_BLOCK(critical_break);
+ GET_BLOCK(merge);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {critical_break, partial});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c4 = graph_->GetIntConstant(4);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_entry);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right);
+ right->AddInstruction(goto_right);
+
+ ComparisonInstructions cmp_instructions = GetComparisonInstructions(new_inst);
+ HInstruction* if_merge = new (GetAllocator()) HIf(cmp_instructions.cmp_);
+ cmp_instructions.AddSetup(merge);
+ merge->AddInstruction(cmp_instructions.cmp_);
+ merge->AddInstruction(if_merge);
+ cmp_instructions.AddEnvironment(cls->GetEnvironment());
+
+ HInstanceFieldSet* write_partial = MakeIFieldSet(new_inst, c4, MemberOffset(32));
+ HInstruction* goto_partial = new (GetAllocator()) HGoto();
+ partial->AddInstruction(write_partial);
+ partial->AddInstruction(goto_partial);
+
+ HInstruction* goto_crit_break = new (GetAllocator()) HGoto();
+ critical_break->AddInstruction(goto_crit_break);
+
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ std::vector<HPhi*> merges;
+ HInstanceFieldSet* init_set =
+ FindSingleInstruction<HInstanceFieldSet>(graph_, left->GetSinglePredecessor());
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_);
+ std::tie(merges) = FindAllInstructions<HPhi>(graph_);
+ ASSERT_EQ(merges.size(), 3u);
+ HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->GetBlock() == breturn;
+ });
+ HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference;
+ });
+ EXPECT_INS_REMOVED(read_bottom);
+ EXPECT_INS_REMOVED(write_entry);
+ EXPECT_INS_RETAINED(write_partial);
+ EXPECT_TRUE(write_partial->GetIsPredicatedSet());
+ EXPECT_INS_RETAINED(call_left);
+ CheckFinalInstruction(if_merge->InputAt(0), ComparisonPlacement::kAfterEscape);
+ EXPECT_INS_EQ(init_set->InputAt(1), c3);
+ ASSERT_TRUE(write_partial->InputAt(0)->IsPhi());
+ EXPECT_INS_EQ(write_partial->InputAt(0)->AsPhi()->InputAt(0), init_set->InputAt(0));
+ EXPECT_INS_EQ(write_partial->InputAt(1), c4);
+ EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return);
+}
+
+// // ENTRY
+// // MOVED TO MATERIALIZATION BLOCK
+// obj = new Obj();
+// ELIMINATE, moved to materialization block. Kept by escape.
+// obj.field = 3;
+// // Make sure this graph isn't broken
+// if (parameter_value) {
+// // LEFT
+// // DO NOT ELIMINATE
+// escape(obj);
+// if (obj ==/!= (STATIC.VALUE|obj|null)) {
+// // partial_BLOCK
+// obj.field = 4;
+// }
+// } else {
+// // RIGHT
+// // ELIMINATE
+// obj.field = 2;
+// }
+// EXIT
+// PREDICATED GET
+// return obj.field
+TEST_P(PartialComparisonTestGroup, PartialComparisonInCohortAfterEscape) {
+ PartialComparisonKind kind = GetParam();
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"left", "partial"},
+ {"partial", "left_end"},
+ {"left", "left_crit_break"},
+ {"left_crit_break", "left_end"},
+ {"left_end", "breturn"},
+ {"entry", "right"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(partial);
+ GET_BLOCK(left_end);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(left_crit_break);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c4 = graph_->GetIntConstant(4);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_entry);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ ComparisonInstructions cmp_instructions = GetComparisonInstructions(new_inst);
+ HInstruction* if_left = new (GetAllocator()) HIf(cmp_instructions.cmp_);
+ left->AddInstruction(call_left);
+ cmp_instructions.AddSetup(left);
+ left->AddInstruction(cmp_instructions.cmp_);
+ left->AddInstruction(if_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+ cmp_instructions.AddEnvironment(cls->GetEnvironment());
+ if (if_left->AsIf()->IfTrueSuccessor() != partial) {
+ left->SwapSuccessors();
+ }
+
+ HInstruction* write_partial = MakeIFieldSet(new_inst, c4, MemberOffset(32));
+ HInstruction* goto_partial = new (GetAllocator()) HGoto();
+ partial->AddInstruction(write_partial);
+ partial->AddInstruction(goto_partial);
+
+ HInstruction* goto_left_crit_break = new (GetAllocator()) HGoto();
+ left_crit_break->AddInstruction(goto_left_crit_break);
+
+ HInstruction* goto_left_end = new (GetAllocator()) HGoto();
+ left_end->AddInstruction(goto_left_end);
+
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right);
+ right->AddInstruction(goto_right);
+
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ std::vector<HPhi*> merges;
+ std::vector<HInstanceFieldSet*> sets;
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_);
+ std::tie(merges, sets) = FindAllInstructions<HPhi, HInstanceFieldSet>(graph_);
+ ASSERT_EQ(merges.size(), 2u);
+ ASSERT_EQ(sets.size(), 2u);
+ HInstanceFieldSet* init_set = FindOrNull(sets.begin(), sets.end(), [&](HInstanceFieldSet* s) {
+ return s->GetBlock()->GetSingleSuccessor() == left;
+ });
+ EXPECT_INS_EQ(init_set->InputAt(1), c3);
+ HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->GetBlock() == breturn;
+ });
+ HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference;
+ });
+ EXPECT_INS_REMOVED(read_bottom);
+ EXPECT_INS_REMOVED(write_entry);
+ if (kind.IsPossiblyTrue()) {
+ EXPECT_INS_RETAINED(write_partial);
+ EXPECT_TRUE(std::find(sets.begin(), sets.end(), write_partial) != sets.end());
+ }
+ EXPECT_INS_RETAINED(call_left);
+ CheckFinalInstruction(if_left->InputAt(0), ComparisonPlacement::kInEscape);
+ EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return);
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ LoadStoreEliminationTest,
+ PartialComparisonTestGroup,
+ testing::Values(PartialComparisonKind{PartialComparisonKind::Type::kEquals,
+ PartialComparisonKind::Target::kNull,
+ PartialComparisonKind::Position::kLeft},
+ PartialComparisonKind{PartialComparisonKind::Type::kEquals,
+ PartialComparisonKind::Target::kNull,
+ PartialComparisonKind::Position::kRight},
+ PartialComparisonKind{PartialComparisonKind::Type::kEquals,
+ PartialComparisonKind::Target::kValue,
+ PartialComparisonKind::Position::kLeft},
+ PartialComparisonKind{PartialComparisonKind::Type::kEquals,
+ PartialComparisonKind::Target::kValue,
+ PartialComparisonKind::Position::kRight},
+ PartialComparisonKind{PartialComparisonKind::Type::kEquals,
+ PartialComparisonKind::Target::kSelf,
+ PartialComparisonKind::Position::kLeft},
+ PartialComparisonKind{PartialComparisonKind::Type::kNotEquals,
+ PartialComparisonKind::Target::kNull,
+ PartialComparisonKind::Position::kLeft},
+ PartialComparisonKind{PartialComparisonKind::Type::kNotEquals,
+ PartialComparisonKind::Target::kNull,
+ PartialComparisonKind::Position::kRight},
+ PartialComparisonKind{PartialComparisonKind::Type::kNotEquals,
+ PartialComparisonKind::Target::kSelf,
+ PartialComparisonKind::Position::kLeft},
+ PartialComparisonKind{PartialComparisonKind::Type::kNotEquals,
+ PartialComparisonKind::Target::kValue,
+ PartialComparisonKind::Position::kLeft},
+ PartialComparisonKind{PartialComparisonKind::Type::kNotEquals,
+ PartialComparisonKind::Target::kValue,
+ PartialComparisonKind::Position::kRight}));
+
+// // ENTRY
+// obj = new Obj();
+// if (parameter_value) {
+// // LEFT
+// escape(obj);
+// } else {
+// // RIGHT
+// // ELIMINATE
+// obj.field = 2;
+// }
+// EXIT
+// predicated-ELIMINATE
+// obj.field = 3;
+TEST_F(LoadStoreEliminationTest, PredicatedStore1) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ InitGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {left, right});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* null_const = graph_->GetNullConstant();
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right);
+ right->AddInstruction(goto_right);
+
+ HInstruction* write_bottom = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturnVoid();
+ breturn->AddInstruction(write_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_RETAINED(write_bottom);
+ EXPECT_TRUE(write_bottom->AsInstanceFieldSet()->GetIsPredicatedSet());
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_RETAINED(call_left);
+ HPhi* merge_alloc = FindSingleInstruction<HPhi>(graph_, breturn);
+ ASSERT_NE(merge_alloc, nullptr);
+ EXPECT_TRUE(merge_alloc->InputAt(0)->IsNewInstance()) << *merge_alloc;
+ EXPECT_EQ(merge_alloc->InputAt(0)->InputAt(0), cls) << *merge_alloc << " cls? " << *cls;
+ EXPECT_EQ(merge_alloc->InputAt(1), null_const);
+}
+
+// // ENTRY
+// obj = new Obj();
+// obj.field = 3;
+// if (parameter_value) {
+// // LEFT
+// escape(obj);
+// } else {
+// // RIGHT
+// // ELIMINATE
+// obj.field = 2;
+// }
+// // MERGE
+// if (second_param) {
+// // NON_ESCAPE
+// obj.field = 1;
+// noescape();
+// }
+// EXIT
+// predicated-ELIMINATE
+// obj.field = 4;
+TEST_F(LoadStoreEliminationTest, PredicatedStore2) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "merge"},
+ {"right", "merge"},
+ {"merge", "non_escape"},
+ {"non_escape", "breturn"},
+ {"merge", "merge_crit_break"},
+ {"merge_crit_break", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+ GET_BLOCK(merge);
+ GET_BLOCK(merge_crit_break);
+ GET_BLOCK(non_escape);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(merge, {left, right});
+ EnsurePredecessorOrder(breturn, {merge_crit_break, non_escape});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* bool_value2 = MakeParam(DataType::Type::kBool);
+ HInstruction* null_const = graph_->GetNullConstant();
+ HInstruction* c1 = graph_->GetIntConstant(3);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c4 = graph_->GetIntConstant(4);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_entry);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right);
+ right->AddInstruction(goto_right);
+
+ HInstruction* merge_if = new (GetAllocator()) HIf(bool_value2);
+ merge->AddInstruction(merge_if);
+
+ merge_crit_break->AddInstruction(new (GetAllocator()) HGoto());
+
+ HInstruction* write_non_escape = MakeIFieldSet(new_inst, c1, MemberOffset(32));
+ HInstruction* non_escape_call = MakeInvoke(DataType::Type::kVoid, {});
+ HInstruction* non_escape_goto = new (GetAllocator()) HGoto();
+ non_escape->AddInstruction(write_non_escape);
+ non_escape->AddInstruction(non_escape_call);
+ non_escape->AddInstruction(non_escape_goto);
+ non_escape_call->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_bottom = MakeIFieldSet(new_inst, c4, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturnVoid();
+ breturn->AddInstruction(write_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_RETAINED(write_bottom);
+ EXPECT_TRUE(write_bottom->AsInstanceFieldSet()->GetIsPredicatedSet()) << *write_bottom;
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_RETAINED(call_left);
+ HInstanceFieldSet* pred_set = FindSingleInstruction<HInstanceFieldSet>(graph_, breturn);
+ HPhi* merge_alloc = FindSingleInstruction<HPhi>(graph_);
+ ASSERT_NE(merge_alloc, nullptr);
+ EXPECT_TRUE(merge_alloc->InputAt(0)->IsNewInstance()) << *merge_alloc;
+ EXPECT_INS_EQ(merge_alloc->InputAt(0)->InputAt(0), cls) << " phi is: " << *merge_alloc;
+ EXPECT_INS_EQ(merge_alloc->InputAt(1), null_const);
+ ASSERT_NE(pred_set, nullptr);
+ EXPECT_TRUE(pred_set->GetIsPredicatedSet()) << *pred_set;
+ EXPECT_INS_EQ(pred_set->InputAt(0), merge_alloc);
+}
+
+// // ENTRY
+// obj = new Obj();
+// obj.field = 3;
+// if (parameter_value) {
+// // LEFT
+// escape(obj);
+// } else {
+// // RIGHT
+// // ELIMINATE
+// obj.field = 2;
+// }
+// EXIT
+// predicated-ELIMINATE
+// return obj.field
+TEST_F(LoadStoreEliminationTest, PredicatedLoad1) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {left, right});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* null_const = graph_->GetNullConstant();
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_entry);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right);
+ right->AddInstruction(goto_right);
+
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(read_bottom);
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_RETAINED(call_left);
+ std::vector<HPhi*> merges;
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ std::tie(merges) = FindAllInstructions<HPhi>(graph_, breturn);
+ ASSERT_EQ(merges.size(), 2u);
+ HPhi* merge_value_return = FindOrNull(
+ merges.begin(), merges.end(), [](HPhi* p) { return p->GetType() == DataType::Type::kInt32; });
+ HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference;
+ });
+ ASSERT_NE(merge_alloc, nullptr);
+ EXPECT_TRUE(merge_alloc->InputAt(0)->IsNewInstance()) << *merge_alloc;
+ EXPECT_EQ(merge_alloc->InputAt(0)->InputAt(0), cls) << *merge_alloc << " cls? " << *cls;
+ EXPECT_EQ(merge_alloc->InputAt(1), null_const);
+ ASSERT_NE(pred_get, nullptr);
+ EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return) << " pred-get is: " << *pred_get;
+ EXPECT_INS_EQ(merge_value_return->InputAt(0), graph_->GetIntConstant(0))
+ << " merge val is: " << *merge_value_return;
+ EXPECT_INS_EQ(merge_value_return->InputAt(1), c2) << " merge val is: " << *merge_value_return;
+}
+
+// // ENTRY
+// obj1 = new Obj1();
+// obj2 = new Obj2();
+// obj1.field = 3;
+// obj2.field = 13;
+// if (parameter_value) {
+// // LEFT
+// escape(obj1);
+// escape(obj2);
+// } else {
+// // RIGHT
+// // ELIMINATE
+// obj1.field = 2;
+// obj2.field = 12;
+// }
+// EXIT
+// predicated-ELIMINATE
+// return obj1.field + obj2.field
+TEST_F(LoadStoreEliminationTest, MultiPredicatedLoad1) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {left, right});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c12 = graph_->GetIntConstant(12);
+ HInstruction* c13 = graph_->GetIntConstant(13);
+
+ HInstruction* cls1 = MakeClassLoad();
+ HInstruction* cls2 = MakeClassLoad();
+ HInstruction* new_inst1 = MakeNewInstance(cls1);
+ HInstruction* new_inst2 = MakeNewInstance(cls2);
+ HInstruction* write_entry1 = MakeIFieldSet(new_inst1, c3, MemberOffset(32));
+ HInstruction* write_entry2 = MakeIFieldSet(new_inst2, c13, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls1);
+ entry->AddInstruction(cls2);
+ entry->AddInstruction(new_inst1);
+ entry->AddInstruction(new_inst2);
+ entry->AddInstruction(write_entry1);
+ entry->AddInstruction(write_entry2);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls1, {});
+ cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
+ new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
+ new_inst2->CopyEnvironmentFrom(cls1->GetEnvironment());
+
+ HInstruction* call_left1 = MakeInvoke(DataType::Type::kVoid, { new_inst1 });
+ HInstruction* call_left2 = MakeInvoke(DataType::Type::kVoid, { new_inst2 });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left1);
+ left->AddInstruction(call_left2);
+ left->AddInstruction(goto_left);
+ call_left1->CopyEnvironmentFrom(cls1->GetEnvironment());
+ call_left2->CopyEnvironmentFrom(cls1->GetEnvironment());
+
+ HInstruction* write_right1 = MakeIFieldSet(new_inst1, c2, MemberOffset(32));
+ HInstruction* write_right2 = MakeIFieldSet(new_inst2, c12, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right1);
+ right->AddInstruction(write_right2);
+ right->AddInstruction(goto_right);
+
+ HInstruction* read_bottom1 = MakeIFieldGet(new_inst1, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* read_bottom2 = MakeIFieldGet(new_inst2, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* combine =
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, read_bottom1, read_bottom2);
+ HInstruction* return_exit = new (GetAllocator()) HReturn(combine);
+ breturn->AddInstruction(read_bottom1);
+ breturn->AddInstruction(read_bottom2);
+ breturn->AddInstruction(combine);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(read_bottom1);
+ EXPECT_INS_REMOVED(read_bottom2);
+ EXPECT_INS_REMOVED(write_right1);
+ EXPECT_INS_REMOVED(write_right2);
+ EXPECT_INS_RETAINED(call_left1);
+ EXPECT_INS_RETAINED(call_left2);
+ std::vector<HPhi*> merges;
+ std::vector<HPredicatedInstanceFieldGet*> pred_gets;
+ std::tie(merges, pred_gets) =
+ FindAllInstructions<HPhi, HPredicatedInstanceFieldGet>(graph_, breturn);
+ ASSERT_EQ(merges.size(), 4u);
+ ASSERT_EQ(pred_gets.size(), 2u);
+ HPhi* merge_value_return1 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->InputAt(1) == c2;
+ });
+ HPhi* merge_value_return2 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->InputAt(1) == c12;
+ });
+ HPhi* merge_alloc1 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference &&
+ p->InputAt(0)->IsNewInstance() &&
+ p->InputAt(0)->InputAt(0) == cls1;
+ });
+ HPhi* merge_alloc2 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference &&
+ p->InputAt(0)->IsNewInstance() &&
+ p->InputAt(0)->InputAt(0) == cls2;
+ });
+ ASSERT_NE(merge_alloc1, nullptr);
+ ASSERT_NE(merge_alloc2, nullptr);
+ EXPECT_EQ(merge_alloc1->InputAt(1), graph_->GetNullConstant());
+ EXPECT_EQ(merge_alloc2->InputAt(1), graph_->GetNullConstant());
+ HPredicatedInstanceFieldGet* pred_get1 =
+ FindOrNull(pred_gets.begin(), pred_gets.end(), [&](HPredicatedInstanceFieldGet* pg) {
+ return pg->GetTarget() == merge_alloc1;
+ });
+ HPredicatedInstanceFieldGet* pred_get2 =
+ FindOrNull(pred_gets.begin(), pred_gets.end(), [&](HPredicatedInstanceFieldGet* pg) {
+ return pg->GetTarget() == merge_alloc2;
+ });
+ ASSERT_NE(pred_get1, nullptr);
+ EXPECT_INS_EQ(pred_get1->GetTarget(), merge_alloc1);
+ EXPECT_INS_EQ(pred_get1->GetDefaultValue(), merge_value_return1)
+ << " pred-get is: " << *pred_get1;
+ EXPECT_INS_EQ(merge_value_return1->InputAt(0), graph_->GetIntConstant(0))
+ << " merge val is: " << *merge_value_return1;
+ EXPECT_INS_EQ(merge_value_return1->InputAt(1), c2) << " merge val is: " << *merge_value_return1;
+ ASSERT_NE(pred_get2, nullptr);
+ EXPECT_INS_EQ(pred_get2->GetTarget(), merge_alloc2);
+ EXPECT_INS_EQ(pred_get2->GetDefaultValue(), merge_value_return2)
+ << " pred-get is: " << *pred_get2;
+ EXPECT_INS_EQ(merge_value_return2->InputAt(0), graph_->GetIntConstant(0))
+ << " merge val is: " << *merge_value_return1;
+ EXPECT_INS_EQ(merge_value_return2->InputAt(1), c12) << " merge val is: " << *merge_value_return1;
+}
+
+// // ENTRY
+// obj1 = new Obj1();
+// obj2 = new Obj2();
+// obj1.field = 3;
+// obj2.field = 13;
+// if (parameter_value) {
+// // LEFT
+// escape(obj1);
+// // ELIMINATE
+// obj2.field = 12;
+// } else {
+// // RIGHT
+// // ELIMINATE
+// obj1.field = 2;
+// escape(obj2);
+// }
+// EXIT
+// predicated-ELIMINATE
+// return obj1.field + obj2.field
+TEST_F(LoadStoreEliminationTest, MultiPredicatedLoad2) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {left, right});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c12 = graph_->GetIntConstant(12);
+ HInstruction* c13 = graph_->GetIntConstant(13);
+
+ HInstruction* cls1 = MakeClassLoad();
+ HInstruction* cls2 = MakeClassLoad();
+ HInstruction* new_inst1 = MakeNewInstance(cls1);
+ HInstruction* new_inst2 = MakeNewInstance(cls2);
+ HInstruction* write_entry1 = MakeIFieldSet(new_inst1, c3, MemberOffset(32));
+ HInstruction* write_entry2 = MakeIFieldSet(new_inst2, c13, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls1);
+ entry->AddInstruction(cls2);
+ entry->AddInstruction(new_inst1);
+ entry->AddInstruction(new_inst2);
+ entry->AddInstruction(write_entry1);
+ entry->AddInstruction(write_entry2);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls1, {});
+ cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
+ new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
+ new_inst2->CopyEnvironmentFrom(cls1->GetEnvironment());
+
+ HInstruction* call_left1 = MakeInvoke(DataType::Type::kVoid, { new_inst1 });
+ HInstruction* write_left2 = MakeIFieldSet(new_inst2, c12, MemberOffset(32));
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left1);
+ left->AddInstruction(write_left2);
+ left->AddInstruction(goto_left);
+ call_left1->CopyEnvironmentFrom(cls1->GetEnvironment());
+
+ HInstruction* write_right1 = MakeIFieldSet(new_inst1, c2, MemberOffset(32));
+ HInstruction* call_right2 = MakeInvoke(DataType::Type::kVoid, { new_inst2 });
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right1);
+ right->AddInstruction(call_right2);
+ right->AddInstruction(goto_right);
+ call_right2->CopyEnvironmentFrom(cls1->GetEnvironment());
+
+ HInstruction* read_bottom1 = MakeIFieldGet(new_inst1, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* read_bottom2 = MakeIFieldGet(new_inst2, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* combine =
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, read_bottom1, read_bottom2);
+ HInstruction* return_exit = new (GetAllocator()) HReturn(combine);
+ breturn->AddInstruction(read_bottom1);
+ breturn->AddInstruction(read_bottom2);
+ breturn->AddInstruction(combine);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(read_bottom1);
+ EXPECT_INS_REMOVED(read_bottom2);
+ EXPECT_INS_REMOVED(write_right1);
+ EXPECT_INS_REMOVED(write_left2);
+ EXPECT_INS_RETAINED(call_left1);
+ EXPECT_INS_RETAINED(call_right2);
+ std::vector<HPhi*> merges;
+ std::vector<HPredicatedInstanceFieldGet*> pred_gets;
+ std::tie(merges, pred_gets) =
+ FindAllInstructions<HPhi, HPredicatedInstanceFieldGet>(graph_, breturn);
+ ASSERT_EQ(merges.size(), 4u);
+ ASSERT_EQ(pred_gets.size(), 2u);
+ HPhi* merge_value_return1 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->InputAt(1) == c2;
+ });
+ HPhi* merge_value_return2 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->InputAt(0) == c12;
+ });
+ HPhi* merge_alloc1 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference && p->InputAt(1)->IsNullConstant();
+ });
+ HPhi* merge_alloc2 = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference && p->InputAt(0)->IsNullConstant();
+ });
+ ASSERT_NE(merge_alloc1, nullptr);
+ ASSERT_NE(merge_alloc2, nullptr);
+ EXPECT_TRUE(merge_alloc1->InputAt(0)->IsNewInstance()) << *merge_alloc1;
+ EXPECT_INS_EQ(merge_alloc1->InputAt(0)->InputAt(0), cls1) << *merge_alloc1;
+ EXPECT_INS_EQ(merge_alloc1->InputAt(1), graph_->GetNullConstant());
+ EXPECT_TRUE(merge_alloc2->InputAt(1)->IsNewInstance()) << *merge_alloc2;
+ EXPECT_INS_EQ(merge_alloc2->InputAt(1)->InputAt(0), cls2) << *merge_alloc2;
+ EXPECT_INS_EQ(merge_alloc2->InputAt(0), graph_->GetNullConstant());
+ HPredicatedInstanceFieldGet* pred_get1 =
+ FindOrNull(pred_gets.begin(), pred_gets.end(), [&](HPredicatedInstanceFieldGet* pg) {
+ return pg->GetTarget() == merge_alloc1;
+ });
+ HPredicatedInstanceFieldGet* pred_get2 =
+ FindOrNull(pred_gets.begin(), pred_gets.end(), [&](HPredicatedInstanceFieldGet* pg) {
+ return pg->GetTarget() == merge_alloc2;
+ });
+ ASSERT_NE(pred_get1, nullptr);
+ EXPECT_INS_EQ(pred_get1->GetTarget(), merge_alloc1);
+ EXPECT_INS_EQ(pred_get1->GetDefaultValue(), merge_value_return1)
+ << " pred-get is: " << *pred_get1;
+ EXPECT_INS_EQ(merge_value_return1->InputAt(0), graph_->GetIntConstant(0))
+ << " merge val is: " << *merge_value_return1;
+ EXPECT_INS_EQ(merge_value_return1->InputAt(1), c2) << " merge val is: " << *merge_value_return1;
+ ASSERT_NE(pred_get2, nullptr);
+ EXPECT_INS_EQ(pred_get2->GetTarget(), merge_alloc2);
+ EXPECT_INS_EQ(pred_get2->GetDefaultValue(), merge_value_return2)
+ << " pred-get is: " << *pred_get2;
+ EXPECT_INS_EQ(merge_value_return2->InputAt(1), graph_->GetIntConstant(0))
+ << " merge val is: " << *merge_value_return1;
+ EXPECT_INS_EQ(merge_value_return2->InputAt(0), c12) << " merge val is: " << *merge_value_return1;
+}
+
+// Based on structure seen in `java.util.List
+// java.util.Collections.checkedList(java.util.List, java.lang.Class)`
+// Incorrect accounting would cause attempts to materialize both obj1 and obj2
+// in each of the materialization blocks.
+// // ENTRY
+// Obj obj;
+// if (param1) {
+// // needs to be moved after param2 check
+// obj1 = new Obj1();
+// obj1.foo = 33;
+// if (param2) {
+// return obj1.foo;
+// }
+// obj = obj1;
+// } else {
+// obj2 = new Obj2();
+// obj2.foo = 44;
+// if (param2) {
+// return obj2.foo;
+// }
+// obj = obj2;
+// }
+// EXIT
+// // obj = PHI[obj1, obj2]
+// // NB The phi acts as an escape for both obj1 and obj2 meaning as far as the
+// // LSA is concerned the escape frontier is left_crit_break->breturn and
+// // right_crit_break->breturn for both even though only one of the objects is
+// // actually live at each edge.
+// // TODO In the future we really should track liveness through PHIs which would
+// // allow us to entirely remove the allocation in this test.
+// return obj.foo;
+TEST_F(LoadStoreEliminationTest, MultiPredicatedLoad3) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"left", "left_end"},
+ {"left_end", "breturn"},
+ {"left", "left_exit_early"},
+ {"left_exit_early", "exit"},
+ {"entry", "right"},
+ {"right", "right_end"},
+ {"right_end", "breturn"},
+ {"right", "right_exit_early"},
+ {"right_exit_early", "exit"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(left_end);
+ GET_BLOCK(left_exit_early);
+ GET_BLOCK(right);
+ GET_BLOCK(right_end);
+ GET_BLOCK(right_exit_early);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {left_end, right_end});
+ HInstruction* param1 = MakeParam(DataType::Type::kBool);
+ HInstruction* param2 = MakeParam(DataType::Type::kBool);
+ HInstruction* c33 = graph_->GetIntConstant(33);
+ HInstruction* c44 = graph_->GetIntConstant(44);
+
+ HInstruction* if_inst = new (GetAllocator()) HIf(param1);
+ entry->AddInstruction(if_inst);
+
+ HInstruction* cls1 = MakeClassLoad();
+ HInstruction* new_inst1 = MakeNewInstance(cls1);
+ HInstruction* write1 = MakeIFieldSet(new_inst1, c33, MemberOffset(32));
+ HInstruction* if_left = new (GetAllocator()) HIf(param2);
+ left->AddInstruction(cls1);
+ left->AddInstruction(new_inst1);
+ left->AddInstruction(write1);
+ left->AddInstruction(if_left);
+ ManuallyBuildEnvFor(cls1, {});
+ new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
+
+ left_end->AddInstruction(new (GetAllocator()) HGoto());
+
+ HInstruction* early_exit_left_read =
+ MakeIFieldGet(new_inst1, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* early_exit_left_return = new (GetAllocator()) HReturn(early_exit_left_read);
+ left_exit_early->AddInstruction(early_exit_left_read);
+ left_exit_early->AddInstruction(early_exit_left_return);
+
+ HInstruction* cls2 = MakeClassLoad();
+ HInstruction* new_inst2 = MakeNewInstance(cls2);
+ HInstruction* write2 = MakeIFieldSet(new_inst2, c44, MemberOffset(32));
+ HInstruction* if_right = new (GetAllocator()) HIf(param2);
+ right->AddInstruction(cls2);
+ right->AddInstruction(new_inst2);
+ right->AddInstruction(write2);
+ right->AddInstruction(if_right);
+ cls2->CopyEnvironmentFrom(cls1->GetEnvironment());
+ new_inst2->CopyEnvironmentFrom(cls2->GetEnvironment());
+
+ right_end->AddInstruction(new (GetAllocator()) HGoto());
+
+ HInstruction* early_exit_right_read =
+ MakeIFieldGet(new_inst2, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* early_exit_right_return = new (GetAllocator()) HReturn(early_exit_right_read);
+ right_exit_early->AddInstruction(early_exit_right_read);
+ right_exit_early->AddInstruction(early_exit_right_return);
+
+ HPhi* bottom_phi = MakePhi({new_inst1, new_inst2});
+ HInstruction* read_bottom = MakeIFieldGet(bottom_phi, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddPhi(bottom_phi);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(early_exit_left_read);
+ EXPECT_INS_REMOVED(early_exit_right_read);
+ EXPECT_INS_RETAINED(bottom_phi);
+ EXPECT_INS_RETAINED(read_bottom);
+ EXPECT_INS_EQ(early_exit_left_return->InputAt(0), c33);
+ EXPECT_INS_EQ(early_exit_right_return->InputAt(0), c44);
+ // These assert there is only 1 HNewInstance in the given blocks.
+ HNewInstance* moved_ni1 =
+ FindSingleInstruction<HNewInstance>(graph_, left_end->GetSinglePredecessor());
+ HNewInstance* moved_ni2 =
+ FindSingleInstruction<HNewInstance>(graph_, right_end->GetSinglePredecessor());
+ ASSERT_NE(moved_ni1, nullptr);
+ ASSERT_NE(moved_ni2, nullptr);
+ EXPECT_INS_EQ(bottom_phi->InputAt(0), moved_ni1);
+ EXPECT_INS_EQ(bottom_phi->InputAt(1), moved_ni2);
+}
+
+// Based on structure seen in `java.util.Set java.util.Collections$UnmodifiableMap.entrySet()`
+// We end up having to update a PHI generated by normal LSE.
+// // ENTRY
+// Obj obj_init = param_obj.BAR;
+// if (param1) {
+// Obj other = new Obj();
+// other.foo = 42;
+// if (param2) {
+// return other.foo;
+// } else {
+// param_obj.BAR = other;
+// }
+// } else { }
+// EXIT
+// LSE Turns this into PHI[obj_init, other]
+// read_bottom = param_obj.BAR;
+// // won't be changed. The escape happens with .BAR set so this is in escaping cohort.
+// return read_bottom.foo;
+TEST_F(LoadStoreEliminationTest, MultiPredicatedLoad4) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"left", "left_early_return"},
+ {"left_early_return", "exit"},
+ {"left", "left_write_escape"},
+ {"left_write_escape", "breturn"},
+ {"entry", "right"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(left_early_return);
+ GET_BLOCK(left_write_escape);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ MemberOffset foo_offset = MemberOffset(32);
+ MemberOffset bar_offset = MemberOffset(20);
+ EnsurePredecessorOrder(breturn, {left_write_escape, right});
+ HInstruction* c42 = graph_->GetIntConstant(42);
+ HInstruction* param1 = MakeParam(DataType::Type::kBool);
+ HInstruction* param2 = MakeParam(DataType::Type::kBool);
+ HInstruction* param_obj = MakeParam(DataType::Type::kReference);
+
+ HInstruction* get_initial = MakeIFieldGet(param_obj, DataType::Type::kReference, bar_offset);
+ HInstruction* if_inst = new (GetAllocator()) HIf(param1);
+ entry->AddInstruction(get_initial);
+ entry->AddInstruction(if_inst);
+
+ HInstruction* cls1 = MakeClassLoad();
+ HInstruction* new_inst1 = MakeNewInstance(cls1);
+ HInstruction* write1 = MakeIFieldSet(new_inst1, c42, foo_offset);
+ HInstruction* if_left = new (GetAllocator()) HIf(param2);
+ left->AddInstruction(cls1);
+ left->AddInstruction(new_inst1);
+ left->AddInstruction(write1);
+ left->AddInstruction(if_left);
+ ManuallyBuildEnvFor(cls1, {});
+ new_inst1->CopyEnvironmentFrom(cls1->GetEnvironment());
+
+ HInstruction* read_early_return = MakeIFieldGet(new_inst1, DataType::Type::kInt32, foo_offset);
+ HInstruction* return_early = new (GetAllocator()) HReturn(read_early_return);
+ left_early_return->AddInstruction(read_early_return);
+ left_early_return->AddInstruction(return_early);
+
+ HInstruction* write_escape = MakeIFieldSet(param_obj, new_inst1, bar_offset);
+ HInstruction* write_goto = new (GetAllocator()) HGoto();
+ left_write_escape->AddInstruction(write_escape);
+ left_write_escape->AddInstruction(write_goto);
+
+ right->AddInstruction(new (GetAllocator()) HGoto());
+
+ HInstruction* read_bottom = MakeIFieldGet(param_obj, DataType::Type::kReference, bar_offset);
+ HInstruction* final_read = MakeIFieldGet(read_bottom, DataType::Type::kInt32, foo_offset);
+ HInstruction* return_exit = new (GetAllocator()) HReturn(final_read);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(final_read);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(read_bottom);
+ EXPECT_INS_REMOVED(read_early_return);
+ EXPECT_INS_EQ(return_early->InputAt(0), c42);
+ EXPECT_INS_RETAINED(final_read);
+ HNewInstance* moved_ni =
+ FindSingleInstruction<HNewInstance>(graph_, left_write_escape->GetSinglePredecessor());
+ EXPECT_TRUE(final_read->InputAt(0)->IsPhi());
+ EXPECT_INS_EQ(final_read->InputAt(0)->InputAt(0), moved_ni);
+ EXPECT_INS_EQ(final_read->InputAt(0)->InputAt(1), get_initial);
+}
+
+// // ENTRY
+// obj = new Obj();
+// obj.field = 3;
+// if (parameter_value) {
+// // LEFT
+// escape(obj);
+// } else {
+// // RIGHT
+// // ELIMINATE
+// obj.field = 2;
+// }
+// // MERGE
+// if (second_param) {
+// // NON_ESCAPE
+// obj.field = 1;
+// noescape();
+// }
+// EXIT
+// predicated-ELIMINATE
+// return obj.field
+TEST_F(LoadStoreEliminationTest, PredicatedLoad2) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "merge"},
+ {"right", "merge"},
+ {"merge", "non_escape"},
+ {"non_escape", "breturn"},
+ {"merge", "crit_break"},
+ {"crit_break", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+ GET_BLOCK(merge);
+ GET_BLOCK(non_escape);
+ GET_BLOCK(crit_break);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(merge, {left, right});
+ EnsurePredecessorOrder(breturn, {crit_break, non_escape});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* bool_value2 = MakeParam(DataType::Type::kBool);
+ HInstruction* null_const = graph_->GetNullConstant();
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_entry);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right);
+ right->AddInstruction(goto_right);
+
+ HInstruction* merge_if = new (GetAllocator()) HIf(bool_value2);
+ merge->AddInstruction(merge_if);
+
+ crit_break->AddInstruction(new (GetAllocator()) HGoto());
+
+ HInstruction* write_non_escape = MakeIFieldSet(new_inst, c1, MemberOffset(32));
+ HInstruction* non_escape_call = MakeInvoke(DataType::Type::kVoid, {});
+ HInstruction* non_escape_goto = new (GetAllocator()) HGoto();
+ non_escape->AddInstruction(write_non_escape);
+ non_escape->AddInstruction(non_escape_call);
+ non_escape->AddInstruction(non_escape_goto);
+ non_escape_call->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(read_bottom);
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_RETAINED(call_left);
+ std::vector<HPhi*> merges;
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ std::tie(merges) = FindAllInstructions<HPhi>(graph_);
+ ASSERT_EQ(merges.size(), 3u);
+ HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->GetBlock() == breturn;
+ });
+ HPhi* merge_value_merge = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->GetBlock() != breturn;
+ });
+ HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference;
+ });
+ ASSERT_NE(merge_alloc, nullptr);
+ EXPECT_TRUE(merge_alloc->InputAt(0)->IsNewInstance()) << *merge_alloc;
+ EXPECT_INS_EQ(merge_alloc->InputAt(0)->InputAt(0), cls)
+ << " phi is: " << merge_alloc->DumpWithArgs();
+ EXPECT_INS_EQ(merge_alloc->InputAt(1), null_const);
+ ASSERT_NE(pred_get, nullptr);
+ EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return)
+ << "get is " << pred_get->DumpWithArgs();
+ EXPECT_INS_EQ(merge_value_return->InputAt(0), merge_value_merge)
+ << " phi is: " << *merge_value_return;
+ EXPECT_INS_EQ(merge_value_return->InputAt(1), c1)
+ << " phi is: " << merge_value_return->DumpWithArgs();
+ EXPECT_INS_EQ(merge_value_merge->InputAt(0), graph_->GetIntConstant(0))
+ << " phi is: " << *merge_value_merge;
+ EXPECT_INS_EQ(merge_value_merge->InputAt(1), c2)
+ << " phi is: " << merge_value_merge->DumpWithArgs();
+}
+
+// // ENTRY
+// obj = new Obj();
+// obj.field = 3;
+// if (parameter_value) {
+// // LEFT
+// escape(obj);
+// } else {
+// // RIGHT
+// // ELIMINATE
+// obj.field = 2;
+// }
+// // MERGE
+// if (second_param) {
+// // NON_ESCAPE
+// obj.field = 1;
+// }
+// noescape();
+// EXIT
+// predicated-ELIMINATE
+// return obj.field
+TEST_F(LoadStoreEliminationTest, PredicatedLoad3) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "merge"},
+ {"right", "merge"},
+ {"merge", "non_escape"},
+ {"non_escape", "breturn"},
+ {"merge", "crit_break"},
+ {"crit_break", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+ GET_BLOCK(merge);
+ GET_BLOCK(crit_break);
+ GET_BLOCK(non_escape);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(merge, {left, right});
+ EnsurePredecessorOrder(breturn, {crit_break, non_escape});
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* bool_value2 = MakeParam(DataType::Type::kBool);
+ HInstruction* null_const = graph_->GetNullConstant();
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_entry = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_entry);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_right = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right);
+ right->AddInstruction(goto_right);
+
+ HInstruction* merge_if = new (GetAllocator()) HIf(bool_value2);
+ merge->AddInstruction(merge_if);
+
+ HInstruction* write_non_escape = MakeIFieldSet(new_inst, c1, MemberOffset(32));
+ HInstruction* non_escape_goto = new (GetAllocator()) HGoto();
+ non_escape->AddInstruction(write_non_escape);
+ non_escape->AddInstruction(non_escape_goto);
+
+ crit_break->AddInstruction(new (GetAllocator()) HGoto());
+
+ HInstruction* bottom_call = MakeInvoke(DataType::Type::kVoid, {});
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddInstruction(bottom_call);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+ bottom_call->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(read_bottom);
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_RETAINED(call_left);
+ std::vector<HPhi*> merges;
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ std::tie(merges) = FindAllInstructions<HPhi>(graph_);
+ ASSERT_EQ(merges.size(), 3u);
+ HPhi* merge_value_return = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->GetBlock() == breturn;
+ });
+ HPhi* merge_value_merge = FindOrNull(merges.begin(), merges.end(), [&](HPhi* p) {
+ return p->GetType() == DataType::Type::kInt32 && p->GetBlock() != breturn;
+ });
+ HPhi* merge_alloc = FindOrNull(merges.begin(), merges.end(), [](HPhi* p) {
+ return p->GetType() == DataType::Type::kReference;
+ });
+ ASSERT_NE(merge_alloc, nullptr);
+ EXPECT_TRUE(merge_alloc->InputAt(0)->IsNewInstance()) << merge_alloc->DumpWithArgs();
+ EXPECT_INS_EQ(merge_alloc->InputAt(0)->InputAt(0), cls)
+ << " phi is: " << merge_alloc->DumpWithArgs();
+ EXPECT_INS_EQ(merge_alloc->InputAt(1), null_const);
+ ASSERT_NE(pred_get, nullptr);
+ EXPECT_INS_EQ(pred_get->GetTarget(), merge_alloc);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue(), merge_value_return)
+ << "get is " << pred_get->DumpWithArgs();
+ EXPECT_INS_EQ(merge_value_return->InputAt(0), merge_value_merge)
+ << " phi is: " << *merge_value_return;
+ EXPECT_INS_EQ(merge_value_return->InputAt(1), c1) << " phi is: " << *merge_value_return;
+ EXPECT_INS_EQ(merge_value_merge->InputAt(0), graph_->GetIntConstant(0))
+ << " phi is: " << *merge_value_merge;
+ EXPECT_INS_EQ(merge_value_merge->InputAt(1), c2) << " phi is: " << *merge_value_merge;
+}
+
+// // ENTRY
+// obj = new Obj();
+// // ALL should be kept
+// switch (parameter_value) {
+// case 1:
+// // Case1
+// obj.field = 1;
+// call_func(obj);
+// break;
+// case 2:
+// // Case2
+// obj.field = 2;
+// call_func(obj);
+// break;
+// default:
+// // Case3
+// obj.field = 3;
+// do {
+// if (test2()) { } else { obj.field = 5; }
+// } while (test());
+// break;
+// }
+// EXIT
+// // predicated-ELIMINATE
+// return obj.field
+TEST_F(LoadStoreEliminationTest, PartialLoopPhis1) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "bswitch"},
+ {"bswitch", "case1"},
+ {"bswitch", "case2"},
+ {"bswitch", "case3"},
+ {"case1", "breturn"},
+ {"case2", "breturn"},
+ {"case3", "loop_pre_header"},
+ {"loop_pre_header", "loop_header"},
+ {"loop_header", "loop_body"},
+ {"loop_body", "loop_if_left"},
+ {"loop_body", "loop_if_right"},
+ {"loop_if_left", "loop_merge"},
+ {"loop_if_right", "loop_merge"},
+ {"loop_merge", "loop_end"},
+ {"loop_end", "loop_header"},
+ {"loop_end", "critical_break"},
+ {"critical_break", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(bswitch);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(case1);
+ GET_BLOCK(case2);
+ GET_BLOCK(case3);
+
+ GET_BLOCK(loop_pre_header);
+ GET_BLOCK(loop_header);
+ GET_BLOCK(loop_body);
+ GET_BLOCK(loop_if_left);
+ GET_BLOCK(loop_if_right);
+ GET_BLOCK(loop_merge);
+ GET_BLOCK(loop_end);
+ GET_BLOCK(critical_break);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {case1, case2, critical_break});
+ EnsurePredecessorOrder(loop_header, {loop_pre_header, loop_end});
+ EnsurePredecessorOrder(loop_merge, {loop_if_left, loop_if_right});
+ CHECK_SUBROUTINE_FAILURE();
+ HInstruction* switch_val = MakeParam(DataType::Type::kInt32);
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c5 = graph_->GetIntConstant(5);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* entry_goto = new (GetAllocator()) HGoto();
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(entry_goto);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, switch_val);
+ bswitch->AddInstruction(switch_inst);
+
+ HInstruction* write_c1 = MakeIFieldSet(new_inst, c1, MemberOffset(32));
+ HInstruction* call_c1 = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_c1 = new (GetAllocator()) HGoto();
+ case1->AddInstruction(write_c1);
+ case1->AddInstruction(call_c1);
+ case1->AddInstruction(goto_c1);
+ call_c1->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_c2 = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* call_c2 = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_c2 = new (GetAllocator()) HGoto();
+ case2->AddInstruction(write_c2);
+ case2->AddInstruction(call_c2);
+ case2->AddInstruction(goto_c2);
+ call_c2->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_c3 = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* goto_c3 = new (GetAllocator()) HGoto();
+ case3->AddInstruction(write_c3);
+ case3->AddInstruction(goto_c3);
+
+ HInstruction* goto_preheader = new (GetAllocator()) HGoto();
+ loop_pre_header->AddInstruction(goto_preheader);
+
+ HInstruction* suspend_check_header = new (GetAllocator()) HSuspendCheck();
+ HInstruction* goto_header = new (GetAllocator()) HGoto();
+ loop_header->AddInstruction(suspend_check_header);
+ loop_header->AddInstruction(goto_header);
+ suspend_check_header->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_loop_body = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* if_loop_body = new (GetAllocator()) HIf(call_loop_body);
+ loop_body->AddInstruction(call_loop_body);
+ loop_body->AddInstruction(if_loop_body);
+ call_loop_body->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
+ loop_if_left->AddInstruction(goto_loop_left);
+
+ HInstruction* write_loop_right = MakeIFieldSet(new_inst, c5, MemberOffset(32));
+ HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
+ loop_if_right->AddInstruction(write_loop_right);
+ loop_if_right->AddInstruction(goto_loop_right);
+
+ HInstruction* goto_loop_merge = new (GetAllocator()) HGoto();
+ loop_merge->AddInstruction(goto_loop_merge);
+
+ HInstruction* call_end = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* if_end = new (GetAllocator()) HIf(call_end);
+ loop_end->AddInstruction(call_end);
+ loop_end->AddInstruction(if_end);
+ call_end->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_critical_break = new (GetAllocator()) HGoto();
+ critical_break->AddInstruction(goto_critical_break);
+
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ EXPECT_INS_REMOVED(read_bottom) << *read_bottom;
+ ASSERT_TRUE(pred_get != nullptr);
+ HPhi* inst_return_phi = pred_get->GetTarget()->AsPhi();
+ ASSERT_TRUE(inst_return_phi != nullptr) << pred_get->GetTarget()->DumpWithArgs();
+ EXPECT_INS_EQ(inst_return_phi->InputAt(0),
+ FindSingleInstruction<HNewInstance>(graph_, case1->GetSinglePredecessor()));
+ EXPECT_INS_EQ(inst_return_phi->InputAt(1),
+ FindSingleInstruction<HNewInstance>(graph_, case2->GetSinglePredecessor()));
+ EXPECT_INS_EQ(inst_return_phi->InputAt(2), graph_->GetNullConstant());
+ HPhi* inst_value_phi = pred_get->GetDefaultValue()->AsPhi();
+ ASSERT_TRUE(inst_value_phi != nullptr) << pred_get->GetDefaultValue()->DumpWithArgs();
+ EXPECT_INS_EQ(inst_value_phi->InputAt(0), graph_->GetIntConstant(0));
+ EXPECT_INS_EQ(inst_value_phi->InputAt(1), graph_->GetIntConstant(0));
+ HPhi* loop_merge_phi = FindSingleInstruction<HPhi>(graph_, loop_merge);
+ ASSERT_TRUE(loop_merge_phi != nullptr);
+ HPhi* loop_header_phi = FindSingleInstruction<HPhi>(graph_, loop_header);
+ ASSERT_TRUE(loop_header_phi != nullptr);
+ EXPECT_INS_EQ(loop_header_phi->InputAt(0), c3);
+ EXPECT_INS_EQ(loop_header_phi->InputAt(1), loop_merge_phi);
+ EXPECT_INS_EQ(loop_merge_phi->InputAt(0), loop_header_phi);
+ EXPECT_INS_EQ(loop_merge_phi->InputAt(1), c5);
+ EXPECT_INS_EQ(inst_value_phi->InputAt(2), loop_merge_phi);
+ EXPECT_INS_RETAINED(write_c1) << *write_c1;
+ EXPECT_INS_RETAINED(write_c2) << *write_c2;
+ EXPECT_INS_REMOVED(write_c3) << *write_c3;
+ EXPECT_INS_REMOVED(write_loop_right) << *write_loop_right;
+}
+
+// // ENTRY
+// obj = new Obj();
+// switch (parameter_value) {
+// case 1:
+// // Case1
+// obj.field = 1;
+// call_func(obj);
+// break;
+// case 2:
+// // Case2
+// obj.field = 2;
+// call_func(obj);
+// break;
+// default:
+// // Case3
+// obj.field = 3;
+// while (!test()) {
+// if (test2()) { } else { obj.field = 5; }
+// }
+// break;
+// }
+// EXIT
+// // predicated-ELIMINATE
+// return obj.field
+TEST_F(LoadStoreEliminationTest, PartialLoopPhis2) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "bswitch"},
+ {"bswitch", "case1"},
+ {"bswitch", "case2"},
+ {"bswitch", "case3"},
+ {"case1", "breturn"},
+ {"case2", "breturn"},
+ {"case3", "loop_pre_header"},
+
+ {"loop_pre_header", "loop_header"},
+ {"loop_header", "critical_break"},
+ {"loop_header", "loop_body"},
+ {"loop_body", "loop_if_left"},
+ {"loop_body", "loop_if_right"},
+ {"loop_if_left", "loop_merge"},
+ {"loop_if_right", "loop_merge"},
+ {"loop_merge", "loop_header"},
+
+ {"critical_break", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(bswitch);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(case1);
+ GET_BLOCK(case2);
+ GET_BLOCK(case3);
+
+ GET_BLOCK(loop_pre_header);
+ GET_BLOCK(loop_header);
+ GET_BLOCK(loop_body);
+ GET_BLOCK(loop_if_left);
+ GET_BLOCK(loop_if_right);
+ GET_BLOCK(loop_merge);
+ GET_BLOCK(critical_break);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {case1, case2, critical_break});
+ EnsurePredecessorOrder(loop_header, {loop_pre_header, loop_merge});
+ EnsurePredecessorOrder(loop_merge, {loop_if_left, loop_if_right});
+ CHECK_SUBROUTINE_FAILURE();
+ HInstruction* switch_val = MakeParam(DataType::Type::kInt32);
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c5 = graph_->GetIntConstant(5);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* entry_goto = new (GetAllocator()) HGoto();
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(entry_goto);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, switch_val);
+ bswitch->AddInstruction(switch_inst);
+
+ HInstruction* write_c1 = MakeIFieldSet(new_inst, c1, MemberOffset(32));
+ HInstruction* call_c1 = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_c1 = new (GetAllocator()) HGoto();
+ case1->AddInstruction(write_c1);
+ case1->AddInstruction(call_c1);
+ case1->AddInstruction(goto_c1);
+ call_c1->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_c2 = MakeIFieldSet(new_inst, c2, MemberOffset(32));
+ HInstruction* call_c2 = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_c2 = new (GetAllocator()) HGoto();
+ case2->AddInstruction(write_c2);
+ case2->AddInstruction(call_c2);
+ case2->AddInstruction(goto_c2);
+ call_c2->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_c3 = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* goto_c3 = new (GetAllocator()) HGoto();
+ case3->AddInstruction(write_c3);
+ case3->AddInstruction(goto_c3);
+
+ HInstruction* goto_preheader = new (GetAllocator()) HGoto();
+ loop_pre_header->AddInstruction(goto_preheader);
+
+ HInstruction* suspend_check_header = new (GetAllocator()) HSuspendCheck();
+ HInstruction* call_header = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* if_header = new (GetAllocator()) HIf(call_header);
+ loop_header->AddInstruction(suspend_check_header);
+ loop_header->AddInstruction(call_header);
+ loop_header->AddInstruction(if_header);
+ call_header->CopyEnvironmentFrom(cls->GetEnvironment());
+ suspend_check_header->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_loop_body = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* if_loop_body = new (GetAllocator()) HIf(call_loop_body);
+ loop_body->AddInstruction(call_loop_body);
+ loop_body->AddInstruction(if_loop_body);
+ call_loop_body->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
+ loop_if_left->AddInstruction(goto_loop_left);
+
+ HInstruction* write_loop_right = MakeIFieldSet(new_inst, c5, MemberOffset(32));
+ HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
+ loop_if_right->AddInstruction(write_loop_right);
+ loop_if_right->AddInstruction(goto_loop_right);
+
+ HInstruction* goto_loop_merge = new (GetAllocator()) HGoto();
+ loop_merge->AddInstruction(goto_loop_merge);
+
+ HInstruction* goto_critical_break = new (GetAllocator()) HGoto();
+ critical_break->AddInstruction(goto_critical_break);
+
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ EXPECT_INS_REMOVED(read_bottom) << *read_bottom;
+ ASSERT_TRUE(pred_get != nullptr);
+ HPhi* inst_return_phi = pred_get->GetTarget()->AsPhi();
+ ASSERT_TRUE(inst_return_phi != nullptr) << pred_get->GetTarget()->DumpWithArgs();
+ EXPECT_INS_EQ(inst_return_phi->InputAt(0),
+ FindSingleInstruction<HNewInstance>(graph_, case1->GetSinglePredecessor()));
+ EXPECT_INS_EQ(inst_return_phi->InputAt(1),
+ FindSingleInstruction<HNewInstance>(graph_, case2->GetSinglePredecessor()));
+ EXPECT_INS_EQ(inst_return_phi->InputAt(2), graph_->GetNullConstant());
+ HPhi* inst_value_phi = pred_get->GetDefaultValue()->AsPhi();
+ ASSERT_TRUE(inst_value_phi != nullptr) << pred_get->GetDefaultValue()->DumpWithArgs();
+ EXPECT_INS_EQ(inst_value_phi->InputAt(0), graph_->GetIntConstant(0));
+ EXPECT_INS_EQ(inst_value_phi->InputAt(1), graph_->GetIntConstant(0));
+ HPhi* loop_merge_phi = FindSingleInstruction<HPhi>(graph_, loop_merge);
+ ASSERT_TRUE(loop_merge_phi != nullptr);
+ HPhi* loop_header_phi = FindSingleInstruction<HPhi>(graph_, loop_header);
+ ASSERT_TRUE(loop_header_phi != nullptr);
+ EXPECT_INS_EQ(loop_header_phi->InputAt(0), c3);
+ EXPECT_INS_EQ(loop_header_phi->InputAt(1), loop_merge_phi);
+ EXPECT_INS_EQ(loop_merge_phi->InputAt(0), loop_header_phi);
+ EXPECT_INS_EQ(loop_merge_phi->InputAt(1), c5);
+ EXPECT_INS_EQ(inst_value_phi->InputAt(2), loop_header_phi);
+ EXPECT_INS_RETAINED(write_c1) << *write_c1;
+ EXPECT_INS_RETAINED(write_c2) << *write_c2;
+ EXPECT_INS_REMOVED(write_c3) << *write_c3;
+ EXPECT_INS_REMOVED(write_loop_right) << *write_loop_right;
+}
+
+// // ENTRY
+// obj = new Obj();
+// obj.field = 3;
+// while (!test()) {
+// if (test2()) { } else { obj.field = 5; }
+// }
+// if (parameter_value) {
+// escape(obj);
+// }
+// EXIT
+// return obj.field
+TEST_F(LoadStoreEliminationTest, PartialLoopPhis3) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "loop_pre_header"},
+
+ {"loop_pre_header", "loop_header"},
+ {"loop_header", "escape_check"},
+ {"loop_header", "loop_body"},
+ {"loop_body", "loop_if_left"},
+ {"loop_body", "loop_if_right"},
+ {"loop_if_left", "loop_merge"},
+ {"loop_if_right", "loop_merge"},
+ {"loop_merge", "loop_header"},
+
+ {"escape_check", "escape"},
+ {"escape_check", "no_escape"},
+ {"no_escape", "breturn"},
+ {"escape", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(no_escape);
+ GET_BLOCK(escape);
+ GET_BLOCK(escape_check);
+
+ GET_BLOCK(loop_pre_header);
+ GET_BLOCK(loop_header);
+ GET_BLOCK(loop_body);
+ GET_BLOCK(loop_if_left);
+ GET_BLOCK(loop_if_right);
+ GET_BLOCK(loop_merge);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {no_escape, escape});
+ EnsurePredecessorOrder(loop_header, {loop_pre_header, loop_merge});
+ EnsurePredecessorOrder(loop_merge, {loop_if_left, loop_if_right});
+ CHECK_SUBROUTINE_FAILURE();
+ HInstruction* bool_val = MakeParam(DataType::Type::kBool);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c5 = graph_->GetIntConstant(5);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* entry_goto = new (GetAllocator()) HGoto();
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(entry_goto);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_pre_header = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* goto_preheader = new (GetAllocator()) HGoto();
+ loop_pre_header->AddInstruction(write_pre_header);
+ loop_pre_header->AddInstruction(goto_preheader);
+
+ HInstruction* suspend_check_header = new (GetAllocator()) HSuspendCheck();
+ HInstruction* call_header = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* if_header = new (GetAllocator()) HIf(call_header);
+ loop_header->AddInstruction(suspend_check_header);
+ loop_header->AddInstruction(call_header);
+ loop_header->AddInstruction(if_header);
+ call_header->CopyEnvironmentFrom(cls->GetEnvironment());
+ suspend_check_header->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_loop_body = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* if_loop_body = new (GetAllocator()) HIf(call_loop_body);
+ loop_body->AddInstruction(call_loop_body);
+ loop_body->AddInstruction(if_loop_body);
+ call_loop_body->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
+ loop_if_left->AddInstruction(goto_loop_left);
+
+ HInstruction* write_loop_right = MakeIFieldSet(new_inst, c5, MemberOffset(32));
+ HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
+ loop_if_right->AddInstruction(write_loop_right);
+ loop_if_right->AddInstruction(goto_loop_right);
+
+ HInstruction* goto_loop_merge = new (GetAllocator()) HGoto();
+ loop_merge->AddInstruction(goto_loop_merge);
+
+ HInstruction* if_esc_check = new (GetAllocator()) HIf(bool_val);
+ escape_check->AddInstruction(if_esc_check);
+
+ HInstruction* call_escape = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_escape = new (GetAllocator()) HGoto();
+ escape->AddInstruction(call_escape);
+ escape->AddInstruction(goto_escape);
+ call_escape->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_no_escape = new (GetAllocator()) HGoto();
+ no_escape->AddInstruction(goto_no_escape);
+
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ EXPECT_INS_REMOVED(read_bottom) << *read_bottom;
+ ASSERT_TRUE(pred_get != nullptr);
+ HPhi* inst_return_phi = pred_get->GetTarget()->AsPhi();
+ ASSERT_TRUE(inst_return_phi != nullptr) << pred_get->GetTarget()->DumpWithArgs();
+ EXPECT_INS_EQ(inst_return_phi->InputAt(0), graph_->GetNullConstant());
+ EXPECT_INS_EQ(inst_return_phi->InputAt(1),
+ FindSingleInstruction<HNewInstance>(graph_, escape->GetSinglePredecessor()));
+ HPhi* inst_value_phi = pred_get->GetDefaultValue()->AsPhi();
+ ASSERT_TRUE(inst_value_phi != nullptr) << pred_get->GetDefaultValue()->DumpWithArgs();
+ HPhi* loop_header_phi = FindSingleInstruction<HPhi>(graph_, loop_header);
+ HPhi* loop_merge_phi = FindSingleInstruction<HPhi>(graph_, loop_merge);
+ EXPECT_INS_EQ(inst_value_phi->InputAt(0), loop_header_phi);
+ EXPECT_INS_EQ(inst_value_phi->InputAt(1), graph_->GetIntConstant(0));
+ EXPECT_INS_EQ(loop_header_phi->InputAt(0), c3);
+ EXPECT_INS_EQ(loop_header_phi->InputAt(1), loop_merge_phi);
+ EXPECT_INS_EQ(loop_merge_phi->InputAt(0), loop_header_phi);
+ EXPECT_INS_EQ(loop_merge_phi->InputAt(1), c5);
+ HInstanceFieldSet* mat_set =
+ FindSingleInstruction<HInstanceFieldSet>(graph_, escape->GetSinglePredecessor());
+ ASSERT_NE(mat_set, nullptr);
+ EXPECT_INS_EQ(mat_set->InputAt(1), loop_header_phi);
+ EXPECT_INS_REMOVED(write_loop_right) << *write_loop_right;
+ EXPECT_INS_REMOVED(write_pre_header) << *write_pre_header;
+}
+
+// // ENTRY
+// obj = new Obj();
+// if (parameter_value) {
+// escape(obj);
+// }
+// obj.field = 3;
+// while (!test()) {
+// if (test2()) { } else { obj.field = 5; }
+// }
+// EXIT
+// return obj.field
+TEST_F(LoadStoreEliminationTest, PartialLoopPhis4) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "escape_check"},
+ {"escape_check", "escape"},
+ {"escape_check", "no_escape"},
+ {"no_escape", "loop_pre_header"},
+ {"escape", "loop_pre_header"},
+
+ {"loop_pre_header", "loop_header"},
+ {"loop_header", "breturn"},
+ {"loop_header", "loop_body"},
+ {"loop_body", "loop_if_left"},
+ {"loop_body", "loop_if_right"},
+ {"loop_if_left", "loop_merge"},
+ {"loop_if_right", "loop_merge"},
+ {"loop_merge", "loop_header"},
+
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(no_escape);
+ GET_BLOCK(escape);
+ GET_BLOCK(escape_check);
+
+ GET_BLOCK(loop_pre_header);
+ GET_BLOCK(loop_header);
+ GET_BLOCK(loop_body);
+ GET_BLOCK(loop_if_left);
+ GET_BLOCK(loop_if_right);
+ GET_BLOCK(loop_merge);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(loop_pre_header, {no_escape, escape});
+ EnsurePredecessorOrder(loop_header, {loop_pre_header, loop_merge});
+ EnsurePredecessorOrder(loop_merge, {loop_if_left, loop_if_right});
+ CHECK_SUBROUTINE_FAILURE();
+ HInstruction* bool_val = MakeParam(DataType::Type::kBool);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c5 = graph_->GetIntConstant(5);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* entry_goto = new (GetAllocator()) HGoto();
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(entry_goto);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* if_esc_check = new (GetAllocator()) HIf(bool_val);
+ escape_check->AddInstruction(if_esc_check);
+
+ HInstruction* call_escape = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_escape = new (GetAllocator()) HGoto();
+ escape->AddInstruction(call_escape);
+ escape->AddInstruction(goto_escape);
+ call_escape->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_no_escape = new (GetAllocator()) HGoto();
+ no_escape->AddInstruction(goto_no_escape);
+
+ HInstruction* write_pre_header = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* goto_preheader = new (GetAllocator()) HGoto();
+ loop_pre_header->AddInstruction(write_pre_header);
+ loop_pre_header->AddInstruction(goto_preheader);
+
+ HInstruction* suspend_check_header = new (GetAllocator()) HSuspendCheck();
+ HInstruction* call_header = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* if_header = new (GetAllocator()) HIf(call_header);
+ loop_header->AddInstruction(suspend_check_header);
+ loop_header->AddInstruction(call_header);
+ loop_header->AddInstruction(if_header);
+ call_header->CopyEnvironmentFrom(cls->GetEnvironment());
+ suspend_check_header->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_loop_body = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* if_loop_body = new (GetAllocator()) HIf(call_loop_body);
+ loop_body->AddInstruction(call_loop_body);
+ loop_body->AddInstruction(if_loop_body);
+ call_loop_body->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
+ loop_if_left->AddInstruction(goto_loop_left);
+
+ HInstruction* write_loop_right = MakeIFieldSet(new_inst, c5, MemberOffset(32));
+ HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
+ loop_if_right->AddInstruction(write_loop_right);
+ loop_if_right->AddInstruction(goto_loop_right);
+
+ HInstruction* goto_loop_merge = new (GetAllocator()) HGoto();
+ loop_merge->AddInstruction(goto_loop_merge);
+
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ EXPECT_INS_REMOVED(read_bottom) << *read_bottom;
+ ASSERT_TRUE(pred_get != nullptr);
+ HPhi* inst_return_phi = pred_get->GetTarget()->AsPhi();
+ ASSERT_TRUE(inst_return_phi != nullptr) << pred_get->GetTarget()->DumpWithArgs();
+ EXPECT_INS_EQ(inst_return_phi->InputAt(0), graph_->GetNullConstant());
+ EXPECT_INS_EQ(inst_return_phi->InputAt(1),
+ FindSingleInstruction<HNewInstance>(graph_, escape->GetSinglePredecessor()));
+ HPhi* inst_value_phi = pred_get->GetDefaultValue()->AsPhi();
+ ASSERT_TRUE(inst_value_phi != nullptr) << pred_get->GetDefaultValue()->DumpWithArgs();
+ HPhi* loop_header_phi = FindSingleInstruction<HPhi>(graph_, loop_header);
+ HPhi* loop_merge_phi = FindSingleInstruction<HPhi>(graph_, loop_merge);
+ EXPECT_INS_EQ(inst_value_phi, loop_header_phi);
+ EXPECT_INS_EQ(loop_header_phi->InputAt(0), c3);
+ EXPECT_INS_EQ(loop_header_phi->InputAt(1), loop_merge_phi);
+ EXPECT_INS_EQ(loop_merge_phi->InputAt(0), loop_header_phi);
+ EXPECT_INS_EQ(loop_merge_phi->InputAt(1), c5);
+ EXPECT_INS_RETAINED(write_loop_right) << *write_loop_right;
+ EXPECT_TRUE(write_loop_right->AsInstanceFieldSet()->GetIsPredicatedSet()) << *write_loop_right;
+ EXPECT_INS_RETAINED(write_pre_header) << *write_pre_header;
+ EXPECT_TRUE(write_pre_header->AsInstanceFieldSet()->GetIsPredicatedSet()) << *write_pre_header;
+}
+
+// // ENTRY
+// obj = new Obj();
+// obj.field = 3;
+// while (!test()) {
+// if (test2()) { } else { obj.field += 5; }
+// }
+// if (parameter_value) {
+// escape(obj);
+// }
+// EXIT
+// return obj.field
+TEST_F(LoadStoreEliminationTest, PartialLoopPhis5) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(/*handles=*/&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "loop_pre_header"},
+ {"loop_pre_header", "loop_header"},
+ {"loop_header", "escape_check"},
+ {"loop_header", "loop_body"},
+ {"loop_body", "loop_if_left"},
+ {"loop_body", "loop_if_right"},
+ {"loop_if_left", "loop_merge"},
+ {"loop_if_right", "loop_merge"},
+ {"loop_merge", "loop_header"},
+ {"escape_check", "escape"},
+ {"escape_check", "no_escape"},
+ {"no_escape", "breturn"},
+ {"escape", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(no_escape);
+ GET_BLOCK(escape);
+ GET_BLOCK(escape_check);
+
+ GET_BLOCK(loop_pre_header);
+ GET_BLOCK(loop_header);
+ GET_BLOCK(loop_body);
+ GET_BLOCK(loop_if_left);
+ GET_BLOCK(loop_if_right);
+ GET_BLOCK(loop_merge);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {no_escape, escape});
+ EnsurePredecessorOrder(loop_header, {loop_pre_header, loop_merge});
+ EnsurePredecessorOrder(loop_merge, {loop_if_left, loop_if_right});
+ CHECK_SUBROUTINE_FAILURE();
+ HInstruction* bool_val = MakeParam(DataType::Type::kBool);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c5 = graph_->GetIntConstant(5);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* entry_goto = new (GetAllocator()) HGoto();
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(entry_goto);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_pre_header = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* goto_preheader = new (GetAllocator()) HGoto();
+ loop_pre_header->AddInstruction(write_pre_header);
+ loop_pre_header->AddInstruction(goto_preheader);
+
+ HInstruction* suspend_check_header = new (GetAllocator()) HSuspendCheck();
+ HInstruction* call_header = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* if_header = new (GetAllocator()) HIf(call_header);
+ loop_header->AddInstruction(suspend_check_header);
+ loop_header->AddInstruction(call_header);
+ loop_header->AddInstruction(if_header);
+ call_header->CopyEnvironmentFrom(cls->GetEnvironment());
+ suspend_check_header->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_loop_body = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* if_loop_body = new (GetAllocator()) HIf(call_loop_body);
+ loop_body->AddInstruction(call_loop_body);
+ loop_body->AddInstruction(if_loop_body);
+ call_loop_body->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_loop_left = new (GetAllocator()) HGoto();
+ loop_if_left->AddInstruction(goto_loop_left);
+
+ HInstruction* read_loop_right = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* add_loop_right =
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, read_loop_right, c5);
+ HInstruction* write_loop_right = MakeIFieldSet(new_inst, add_loop_right, MemberOffset(32));
+ HInstruction* goto_loop_right = new (GetAllocator()) HGoto();
+ loop_if_right->AddInstruction(read_loop_right);
+ loop_if_right->AddInstruction(add_loop_right);
+ loop_if_right->AddInstruction(write_loop_right);
+ loop_if_right->AddInstruction(goto_loop_right);
+
+ HInstruction* goto_loop_merge = new (GetAllocator()) HGoto();
+ loop_merge->AddInstruction(goto_loop_merge);
+
+ HInstruction* if_esc_check = new (GetAllocator()) HIf(bool_val);
+ escape_check->AddInstruction(if_esc_check);
+
+ HInstruction* call_escape = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_escape = new (GetAllocator()) HGoto();
+ escape->AddInstruction(call_escape);
+ escape->AddInstruction(goto_escape);
+ call_escape->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* goto_no_escape = new (GetAllocator()) HGoto();
+ no_escape->AddInstruction(goto_no_escape);
+
+ HInstruction* read_bottom = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_bottom);
+ breturn->AddInstruction(read_bottom);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSEWithPartial();
+ LOG(INFO) << "Post LSE " << blks;
+
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ EXPECT_INS_REMOVED(read_bottom) << *read_bottom;
+ ASSERT_TRUE(pred_get != nullptr);
+ HPhi* inst_return_phi = pred_get->GetTarget()->AsPhi();
+ ASSERT_TRUE(inst_return_phi != nullptr) << pred_get->GetTarget()->DumpWithArgs();
+ EXPECT_INS_EQ(inst_return_phi->InputAt(0), graph_->GetNullConstant());
+ EXPECT_INS_EQ(inst_return_phi->InputAt(1),
+ FindSingleInstruction<HNewInstance>(graph_, escape->GetSinglePredecessor()));
+ HPhi* inst_value_phi = pred_get->GetDefaultValue()->AsPhi();
+ ASSERT_TRUE(inst_value_phi != nullptr) << pred_get->GetDefaultValue()->DumpWithArgs();
+ HPhi* loop_header_phi = FindSingleInstruction<HPhi>(graph_, loop_header);
+ HPhi* loop_merge_phi = FindSingleInstruction<HPhi>(graph_, loop_merge);
+ EXPECT_INS_EQ(inst_value_phi->InputAt(0), loop_header_phi);
+ EXPECT_INS_EQ(inst_value_phi->InputAt(1), graph_->GetIntConstant(0));
+ EXPECT_INS_EQ(loop_header_phi->InputAt(0), c3);
+ EXPECT_INS_EQ(loop_header_phi->InputAt(1), loop_merge_phi);
+ EXPECT_INS_EQ(loop_merge_phi->InputAt(0), loop_header_phi);
+ EXPECT_INS_EQ(loop_merge_phi->InputAt(1), add_loop_right);
+ EXPECT_INS_EQ(add_loop_right->InputAt(0), loop_header_phi);
+ EXPECT_INS_EQ(add_loop_right->InputAt(1), c5);
+ HInstanceFieldSet* mat_set =
+ FindSingleInstruction<HInstanceFieldSet>(graph_, escape->GetSinglePredecessor());
+ ASSERT_NE(mat_set, nullptr);
+ EXPECT_INS_EQ(mat_set->InputAt(1), loop_header_phi);
+ EXPECT_INS_REMOVED(write_loop_right) << *write_loop_right;
+ EXPECT_INS_REMOVED(write_pre_header) << *write_pre_header;
+}
+
+// TODO This should really be in an Instruction simplifier Gtest but (1) that
+// doesn't exist and (2) we should move this simplification to directly in the
+// LSE pass since there is more information then.
+// // ENTRY
+// obj = new Obj();
+// obj.field = 3;
+// if (param) {
+// escape(obj);
+// } else {
+// obj.field = 10;
+// }
+// return obj.field;
+TEST_F(LoadStoreEliminationTest, SimplifyTest) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {left, right});
+
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c10 = graph_->GetIntConstant(10);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_start = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_start);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, { new_inst });
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_right = MakeIFieldSet(new_inst, c10, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(write_right);
+ right->AddInstruction(goto_right);
+
+ HInstruction* read_end = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
+ breturn->AddInstruction(read_end);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
PerformLSE();
- EXPECT_TRUE(IsRemoved(read_bottom));
- EXPECT_TRUE(IsRemoved(write_right));
- EXPECT_FALSE(IsRemoved(write_entry));
- EXPECT_FALSE(IsRemoved(write_left));
- EXPECT_FALSE(IsRemoved(call_left));
- EXPECT_FALSE(IsRemoved(call_entry));
+ // Run the code-simplifier too
+ LOG(INFO) << "Pre simplification " << blks;
+ InstructionSimplifier simp(graph_, /*codegen=*/nullptr);
+ simp.Run();
+
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_REMOVED(write_start);
+ EXPECT_INS_REMOVED(read_end);
+ EXPECT_INS_RETAINED(call_left);
+
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ ASSERT_NE(pred_get, nullptr);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue(), c10);
+}
+
+
+// TODO This should really be in an Instruction simplifier Gtest but (1) that
+// doesn't exist and (2) we should move this simplification to directly in the
+// LSE pass since there is more information then.
+//
+// This checks that we don't replace phis when the replacement isn't valid at
+// that point (i.e. it doesn't dominate)
+// // ENTRY
+// obj = new Obj();
+// obj.field = 3;
+// if (param) {
+// escape(obj);
+// } else {
+// obj.field = noescape();
+// }
+// return obj.field;
+TEST_F(LoadStoreEliminationTest, SimplifyTest2) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+ {"right", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {left, right});
+
+ HInstruction* bool_value = MakeParam(DataType::Type::kBool);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_start = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(bool_value);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_start);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_left = MakeInvoke(DataType::Type::kVoid, {new_inst});
+ HInstruction* goto_left = new (GetAllocator()) HGoto();
+ left->AddInstruction(call_left);
+ left->AddInstruction(goto_left);
+ call_left->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_right = MakeInvoke(DataType::Type::kInt32, {});
+ HInstruction* write_right = MakeIFieldSet(new_inst, call_right, MemberOffset(32));
+ HInstruction* goto_right = new (GetAllocator()) HGoto();
+ right->AddInstruction(call_right);
+ right->AddInstruction(write_right);
+ right->AddInstruction(goto_right);
+ call_right->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* read_end = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
+ breturn->AddInstruction(read_end);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSE();
+
+ // Run the code-simplifier too
+ LOG(INFO) << "Pre simplification " << blks;
+ InstructionSimplifier simp(graph_, /*codegen=*/nullptr);
+ simp.Run();
+
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(write_right);
+ EXPECT_INS_REMOVED(write_start);
+ EXPECT_INS_REMOVED(read_end);
+ EXPECT_INS_RETAINED(call_left);
+ EXPECT_INS_RETAINED(call_right);
+ EXPECT_EQ(call_right->GetBlock(), right);
+
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ ASSERT_NE(pred_get, nullptr);
+ EXPECT_TRUE(pred_get->GetDefaultValue()->IsPhi()) << pred_get->DumpWithArgs();
+ EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(0), graph_->GetIntConstant(0))
+ << pred_get->DumpWithArgs();
+ EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(1), call_right) << pred_get->DumpWithArgs();
+}
+
+// TODO This should really be in an Instruction simplifier Gtest but (1) that
+// doesn't exist and (2) we should move this simplification to directly in the
+// LSE pass since there is more information then.
+//
+// This checks that we replace phis even when there are multiple replacements as
+// long as they are equal
+// // ENTRY
+// obj = new Obj();
+// obj.field = 3;
+// switch (param) {
+// case 1:
+// escape(obj);
+// break;
+// case 2:
+// obj.field = 10;
+// break;
+// case 3:
+// obj.field = 10;
+// break;
+// }
+// return obj.field;
+TEST_F(LoadStoreEliminationTest, SimplifyTest3) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "case1"},
+ {"entry", "case2"},
+ {"entry", "case3"},
+ {"case1", "breturn"},
+ {"case2", "breturn"},
+ {"case3", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(case1);
+ GET_BLOCK(case2);
+ GET_BLOCK(case3);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {case1, case2, case3});
+
+ HInstruction* int_val = MakeParam(DataType::Type::kInt32);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c10 = graph_->GetIntConstant(10);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_start = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, int_val);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_start);
+ entry->AddInstruction(switch_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_case1 = MakeInvoke(DataType::Type::kVoid, {new_inst});
+ HInstruction* goto_case1 = new (GetAllocator()) HGoto();
+ case1->AddInstruction(call_case1);
+ case1->AddInstruction(goto_case1);
+ call_case1->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_case2 = MakeIFieldSet(new_inst, c10, MemberOffset(32));
+ HInstruction* goto_case2 = new (GetAllocator()) HGoto();
+ case2->AddInstruction(write_case2);
+ case2->AddInstruction(goto_case2);
+
+ HInstruction* write_case3 = MakeIFieldSet(new_inst, c10, MemberOffset(32));
+ HInstruction* goto_case3 = new (GetAllocator()) HGoto();
+ case3->AddInstruction(write_case3);
+ case3->AddInstruction(goto_case3);
+
+ HInstruction* read_end = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
+ breturn->AddInstruction(read_end);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSE();
+
+ // Run the code-simplifier too
+ LOG(INFO) << "Pre simplification " << blks;
+ InstructionSimplifier simp(graph_, /*codegen=*/nullptr);
+ simp.Run();
+
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(write_case2);
+ EXPECT_INS_REMOVED(write_case3);
+ EXPECT_INS_REMOVED(write_start);
+ EXPECT_INS_REMOVED(read_end);
+ EXPECT_INS_RETAINED(call_case1);
+
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ ASSERT_NE(pred_get, nullptr);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue(), c10)
+ << pred_get->DumpWithArgs();
+}
+
+// TODO This should really be in an Instruction simplifier Gtest but (1) that
+// doesn't exist and (2) we should move this simplification to directly in the
+// LSE pass since there is more information then.
+//
+// This checks that we don't replace phis even when there are multiple
+// replacements if they are not equal
+// // ENTRY
+// obj = new Obj();
+// obj.field = 3;
+// switch (param) {
+// case 1:
+// escape(obj);
+// break;
+// case 2:
+// obj.field = 10;
+// break;
+// case 3:
+// obj.field = 20;
+// break;
+// }
+// return obj.field;
+TEST_F(LoadStoreEliminationTest, SimplifyTest4) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("entry",
+ "exit",
+ {{"entry", "case1"},
+ {"entry", "case2"},
+ {"entry", "case3"},
+ {"case1", "breturn"},
+ {"case2", "breturn"},
+ {"case3", "breturn"},
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(case1);
+ GET_BLOCK(case2);
+ GET_BLOCK(case3);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {case1, case2, case3});
+
+ HInstruction* int_val = MakeParam(DataType::Type::kInt32);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+ HInstruction* c10 = graph_->GetIntConstant(10);
+ HInstruction* c20 = graph_->GetIntConstant(20);
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_start = MakeIFieldSet(new_inst, c3, MemberOffset(32));
+ HInstruction* switch_inst = new (GetAllocator()) HPackedSwitch(0, 2, int_val);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_start);
+ entry->AddInstruction(switch_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* call_case1 = MakeInvoke(DataType::Type::kVoid, {new_inst});
+ HInstruction* goto_case1 = new (GetAllocator()) HGoto();
+ case1->AddInstruction(call_case1);
+ case1->AddInstruction(goto_case1);
+ call_case1->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* write_case2 = MakeIFieldSet(new_inst, c10, MemberOffset(32));
+ HInstruction* goto_case2 = new (GetAllocator()) HGoto();
+ case2->AddInstruction(write_case2);
+ case2->AddInstruction(goto_case2);
+
+ HInstruction* write_case3 = MakeIFieldSet(new_inst, c20, MemberOffset(32));
+ HInstruction* goto_case3 = new (GetAllocator()) HGoto();
+ case3->AddInstruction(write_case3);
+ case3->AddInstruction(goto_case3);
+
+ HInstruction* read_end = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
+ breturn->AddInstruction(read_end);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSE();
+
+ // Run the code-simplifier too
+ LOG(INFO) << "Pre simplification " << blks;
+ InstructionSimplifier simp(graph_, /*codegen=*/nullptr);
+ simp.Run();
+
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_INS_REMOVED(write_case2);
+ EXPECT_INS_REMOVED(write_case3);
+ EXPECT_INS_REMOVED(write_start);
+ EXPECT_INS_REMOVED(read_end);
+ EXPECT_INS_RETAINED(call_case1);
+
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ ASSERT_NE(pred_get, nullptr);
+ EXPECT_TRUE(pred_get->GetDefaultValue()->IsPhi())
+ << pred_get->DumpWithArgs();
+ EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(0), graph_->GetIntConstant(0));
+ EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(1), c10);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(2), c20);
+}
+
+// Make sure that irreducible loops don't screw up Partial LSE. We can't pull
+// phis through them so we need to treat them as escapes.
+// TODO We should be able to do better than this? Need to do some research.
+// // ENTRY
+// obj = new Obj();
+// obj.foo = 11;
+// if (param1) {
+// } else {
+// // irreducible loop here. NB the objdoesn't actually escape
+// obj.foo = 33;
+// if (param2) {
+// goto inner;
+// } else {
+// while (test()) {
+// if (test()) {
+// obj.foo = 66;
+// } else {
+// }
+// inner:
+// }
+// }
+// }
+// return obj.foo;
+TEST_F(LoadStoreEliminationTest, PartialIrreducibleLoop) {
+ VariableSizedHandleScope vshs(Thread::Current());
+ CreateGraph(&vshs);
+ AdjacencyListGraph blks(SetupFromAdjacencyList("start",
+ "exit",
+ {{"start", "entry"},
+ {"entry", "left"},
+ {"entry", "right"},
+ {"left", "breturn"},
+
+ {"right", "right_crit_break_loop"},
+ {"right_crit_break_loop", "loop_header"},
+ {"right", "right_crit_break_end"},
+ {"right_crit_break_end", "loop_end"},
+
+ {"loop_header", "loop_body"},
+ {"loop_body", "loop_left"},
+ {"loop_body", "loop_right"},
+ {"loop_left", "loop_end"},
+ {"loop_right", "loop_end"},
+ {"loop_end", "loop_header"},
+ {"loop_header", "loop_header_crit_break"},
+ {"loop_header_crit_break", "breturn"},
+
+ {"breturn", "exit"}}));
+#define GET_BLOCK(name) HBasicBlock* name = blks.Get(#name)
+ GET_BLOCK(start);
+ GET_BLOCK(entry);
+ GET_BLOCK(exit);
+ GET_BLOCK(breturn);
+ GET_BLOCK(left);
+ GET_BLOCK(right);
+ GET_BLOCK(right_crit_break_end);
+ GET_BLOCK(right_crit_break_loop);
+ GET_BLOCK(loop_header);
+ GET_BLOCK(loop_header_crit_break);
+ GET_BLOCK(loop_body);
+ GET_BLOCK(loop_left);
+ GET_BLOCK(loop_right);
+ GET_BLOCK(loop_end);
+#undef GET_BLOCK
+ EnsurePredecessorOrder(breturn, {left, loop_header_crit_break});
+ HInstruction* c11 = graph_->GetIntConstant(11);
+ HInstruction* c33 = graph_->GetIntConstant(33);
+ HInstruction* c66 = graph_->GetIntConstant(66);
+ HInstruction* param1 = MakeParam(DataType::Type::kBool);
+ HInstruction* param2 = MakeParam(DataType::Type::kBool);
+
+ HInstruction* suspend = new (GetAllocator()) HSuspendCheck();
+ HInstruction* start_goto = new (GetAllocator()) HGoto();
+ start->AddInstruction(suspend);
+ start->AddInstruction(start_goto);
+ ManuallyBuildEnvFor(suspend, {});
+
+ HInstruction* cls = MakeClassLoad();
+ HInstruction* new_inst = MakeNewInstance(cls);
+ HInstruction* write_start = MakeIFieldSet(new_inst, c11, MemberOffset(32));
+ HInstruction* if_inst = new (GetAllocator()) HIf(param1);
+ entry->AddInstruction(cls);
+ entry->AddInstruction(new_inst);
+ entry->AddInstruction(write_start);
+ entry->AddInstruction(if_inst);
+ ManuallyBuildEnvFor(cls, {});
+ new_inst->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ left->AddInstruction(new (GetAllocator()) HGoto());
+
+ right->AddInstruction(MakeIFieldSet(new_inst, c33, MemberOffset(32)));
+ right->AddInstruction(new (GetAllocator()) HIf(param2));
+
+ right_crit_break_end->AddInstruction(new (GetAllocator()) HGoto());
+ right_crit_break_loop->AddInstruction(new (GetAllocator()) HGoto());
+
+ HInstruction* header_suspend = new (GetAllocator()) HSuspendCheck();
+ HInstruction* header_invoke = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* header_if = new (GetAllocator()) HIf(header_invoke);
+ loop_header->AddInstruction(header_suspend);
+ loop_header->AddInstruction(header_invoke);
+ loop_header->AddInstruction(header_if);
+ header_suspend->CopyEnvironmentFrom(cls->GetEnvironment());
+ header_invoke->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* body_invoke = MakeInvoke(DataType::Type::kBool, {});
+ HInstruction* body_if = new (GetAllocator()) HIf(body_invoke);
+ loop_body->AddInstruction(body_invoke);
+ loop_body->AddInstruction(body_if);
+ body_invoke->CopyEnvironmentFrom(cls->GetEnvironment());
+
+ HInstruction* left_set = MakeIFieldSet(new_inst, c66, MemberOffset(32));
+ HInstruction* left_goto = MakeIFieldSet(new_inst, c66, MemberOffset(32));
+ loop_left->AddInstruction(left_set);
+ loop_left->AddInstruction(left_goto);
+
+ loop_right->AddInstruction(new (GetAllocator()) HGoto());
+
+ loop_end->AddInstruction(new (GetAllocator()) HGoto());
+
+ HInstruction* read_end = MakeIFieldGet(new_inst, DataType::Type::kInt32, MemberOffset(32));
+ HInstruction* return_exit = new (GetAllocator()) HReturn(read_end);
+ breturn->AddInstruction(read_end);
+ breturn->AddInstruction(return_exit);
+
+ SetupExit(exit);
+
+ // PerformLSE expects this to be empty.
+ graph_->ClearDominanceInformation();
+ LOG(INFO) << "Pre LSE " << blks;
+ PerformLSE();
+ LOG(INFO) << "Post LSE " << blks;
+
+ EXPECT_TRUE(loop_header->IsLoopHeader());
+ EXPECT_TRUE(loop_header->GetLoopInformation()->IsIrreducible());
+
+ EXPECT_INS_RETAINED(left_set);
+ EXPECT_INS_REMOVED(write_start);
+ EXPECT_INS_REMOVED(read_end);
+
+ HPredicatedInstanceFieldGet* pred_get =
+ FindSingleInstruction<HPredicatedInstanceFieldGet>(graph_, breturn);
+ ASSERT_NE(pred_get, nullptr);
+ ASSERT_TRUE(pred_get->GetDefaultValue()->IsPhi()) << pred_get->DumpWithArgs();
+ EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(0), c11);
+ EXPECT_INS_EQ(pred_get->GetDefaultValue()->InputAt(1), graph_->GetIntConstant(0));
+ ASSERT_TRUE(pred_get->GetTarget()->IsPhi()) << pred_get->DumpWithArgs();
+ EXPECT_INS_EQ(pred_get->GetTarget()->InputAt(0), graph_->GetNullConstant());
+ HNewInstance* mat = FindSingleInstruction<HNewInstance>(graph_, right->GetSinglePredecessor());
+ ASSERT_NE(mat, nullptr);
+ EXPECT_INS_EQ(pred_get->GetTarget()->InputAt(1), mat);
}
} // namespace art
diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc
index 7850517..a776c37 100644
--- a/compiler/optimizing/loop_analysis.cc
+++ b/compiler/optimizing/loop_analysis.cc
@@ -214,6 +214,9 @@
return 3;
case HInstruction::InstructionKind::kIf:
return 2;
+ case HInstruction::InstructionKind::kPredicatedInstanceFieldGet:
+ // test + cond-jump + IFieldGet
+ return 4;
case HInstruction::InstructionKind::kInstanceFieldGet:
return 2;
case HInstruction::InstructionKind::kInstanceFieldSet:
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 73db7e5..9e0f515 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -72,6 +72,7 @@
class HPhi;
class HSuspendCheck;
class HTryBoundary;
+class FieldInfo;
class LiveInterval;
class LocationSummary;
class SlowPathCode;
@@ -1097,6 +1098,10 @@
return predecessors_;
}
+ size_t GetNumberOfPredecessors() const {
+ return GetPredecessors().size();
+ }
+
const ArenaVector<HBasicBlock*>& GetSuccessors() const {
return successors_;
}
@@ -1439,6 +1444,8 @@
friend class HGraph;
friend class HInstruction;
+ // Allow manual control of the ordering of predecessors/successors
+ friend class OptimizingUnitTestHelper;
DISALLOW_COPY_AND_ASSIGN(HBasicBlock);
};
@@ -1503,6 +1510,7 @@
M(If, Instruction) \
M(InstanceFieldGet, Instruction) \
M(InstanceFieldSet, Instruction) \
+ M(PredicatedInstanceFieldGet, Instruction) \
M(InstanceOf, Instruction) \
M(IntConstant, Constant) \
M(IntermediateAddress, Instruction) \
@@ -1680,8 +1688,7 @@
H##type& operator=(const H##type&) = delete; \
public:
-#define DEFAULT_COPY_CONSTRUCTOR(type) \
- explicit H##type(const H##type& other) = default;
+#define DEFAULT_COPY_CONSTRUCTOR(type) H##type(const H##type& other) = default;
template <typename T>
class HUseListNode : public ArenaObject<kArenaAllocUseListNode>,
@@ -2105,6 +2112,23 @@
return GetParent() != nullptr;
}
+ class EnvInputSelector {
+ public:
+ explicit EnvInputSelector(const HEnvironment* e) : env_(e) {}
+ HInstruction* operator()(size_t s) const {
+ return env_->GetInstructionAt(s);
+ }
+ private:
+ const HEnvironment* env_;
+ };
+
+ using HConstEnvInputRef = TransformIterator<CountIter, EnvInputSelector>;
+ IterationRange<HConstEnvInputRef> GetEnvInputs() const {
+ IterationRange<CountIter> range(Range(Size()));
+ return MakeIterationRange(MakeTransformIterator(range.begin(), EnvInputSelector(this)),
+ MakeTransformIterator(range.end(), EnvInputSelector(this)));
+ }
+
private:
ArenaVector<HUserRecord<HEnvironment*>> vregs_;
ArenaVector<Location> locations_;
@@ -2122,6 +2146,40 @@
std::ostream& operator<<(std::ostream& os, const HInstruction& rhs);
+// Iterates over the Environments
+class HEnvironmentIterator : public ValueObject,
+ public std::iterator<std::forward_iterator_tag, HEnvironment*> {
+ public:
+ explicit HEnvironmentIterator(HEnvironment* cur) : cur_(cur) {}
+
+ HEnvironment* operator*() const {
+ return cur_;
+ }
+
+ HEnvironmentIterator& operator++() {
+ DCHECK(cur_ != nullptr);
+ cur_ = cur_->GetParent();
+ return *this;
+ }
+
+ HEnvironmentIterator operator++(int) {
+ HEnvironmentIterator prev(*this);
+ ++(*this);
+ return prev;
+ }
+
+ bool operator==(const HEnvironmentIterator& other) const {
+ return other.cur_ == cur_;
+ }
+
+ bool operator!=(const HEnvironmentIterator& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ HEnvironment* cur_;
+};
+
class HInstruction : public ArenaObject<kArenaAllocInstruction> {
public:
#define DECLARE_KIND(type, super) k##type,
@@ -2240,6 +2298,10 @@
// Does the instruction always throw an exception unconditionally?
virtual bool AlwaysThrows() const { return false; }
+ // Will this instruction only cause async exceptions if it causes any at all?
+ virtual bool OnlyThrowsAsyncExceptions() const {
+ return false;
+ }
bool CanThrowIntoCatchBlock() const { return CanThrow() && block_->IsTryBlock(); }
@@ -2361,6 +2423,10 @@
bool HasEnvironment() const { return environment_ != nullptr; }
HEnvironment* GetEnvironment() const { return environment_; }
+ IterationRange<HEnvironmentIterator> GetAllEnvironments() const {
+ return MakeIterationRange(HEnvironmentIterator(GetEnvironment()),
+ HEnvironmentIterator(nullptr));
+ }
// Set the `environment_` field. Raw because this method does not
// update the uses lists.
void SetRawEnvironment(HEnvironment* environment) {
@@ -2461,6 +2527,17 @@
UNREACHABLE();
}
+ virtual bool IsFieldAccess() const {
+ return false;
+ }
+
+ virtual const FieldInfo& GetFieldInfo() const {
+ CHECK(IsFieldAccess()) << "Only callable on field accessors not " << DebugName() << " "
+ << *this;
+ LOG(FATAL) << "Must be overridden by field accessors. Not implemented by " << *this;
+ UNREACHABLE();
+ }
+
// Return whether instruction can be cloned (copied).
virtual bool IsClonable() const { return false; }
@@ -2696,12 +2773,16 @@
friend class HGraph;
friend class HInstructionList;
};
+
std::ostream& operator<<(std::ostream& os, HInstruction::InstructionKind rhs);
std::ostream& operator<<(std::ostream& os, const HInstruction::NoArgsDump rhs);
std::ostream& operator<<(std::ostream& os, const HInstruction::ArgsDump rhs);
std::ostream& operator<<(std::ostream& os, const HUseList<HInstruction*>& lst);
std::ostream& operator<<(std::ostream& os, const HUseList<HEnvironment*>& lst);
+// Forward declarations for friends
+template <typename InnerIter> struct HSTLInstructionIterator;
+
// Iterates over the instructions, while preserving the next instruction
// in case the current instruction gets removed from the list by the user
// of this iterator.
@@ -2720,10 +2801,12 @@
}
private:
+ HInstructionIterator() : instruction_(nullptr), next_(nullptr) {}
+
HInstruction* instruction_;
HInstruction* next_;
- DISALLOW_COPY_AND_ASSIGN(HInstructionIterator);
+ friend struct HSTLInstructionIterator<HInstructionIterator>;
};
// Iterates over the instructions without saving the next instruction,
@@ -2742,9 +2825,11 @@
}
private:
+ HInstructionIteratorHandleChanges() : instruction_(nullptr) {}
+
HInstruction* instruction_;
- DISALLOW_COPY_AND_ASSIGN(HInstructionIteratorHandleChanges);
+ friend struct HSTLInstructionIterator<HInstructionIteratorHandleChanges>;
};
@@ -2763,12 +2848,63 @@
}
private:
+ HBackwardInstructionIterator() : instruction_(nullptr), next_(nullptr) {}
+
HInstruction* instruction_;
HInstruction* next_;
- DISALLOW_COPY_AND_ASSIGN(HBackwardInstructionIterator);
+ friend struct HSTLInstructionIterator<HBackwardInstructionIterator>;
};
+template <typename InnerIter>
+struct HSTLInstructionIterator : public ValueObject,
+ public std::iterator<std::forward_iterator_tag, HInstruction*> {
+ public:
+ static_assert(std::is_same_v<InnerIter, HBackwardInstructionIterator> ||
+ std::is_same_v<InnerIter, HInstructionIterator> ||
+ std::is_same_v<InnerIter, HInstructionIteratorHandleChanges>,
+ "Unknown wrapped iterator!");
+
+ explicit HSTLInstructionIterator(InnerIter inner) : inner_(inner) {}
+ HInstruction* operator*() const {
+ DCHECK(inner_.Current() != nullptr);
+ return inner_.Current();
+ }
+
+ HSTLInstructionIterator<InnerIter>& operator++() {
+ DCHECK(*this != HSTLInstructionIterator<InnerIter>::EndIter());
+ inner_.Advance();
+ return *this;
+ }
+
+ HSTLInstructionIterator<InnerIter> operator++(int) {
+ HSTLInstructionIterator<InnerIter> prev(*this);
+ ++(*this);
+ return prev;
+ }
+
+ bool operator==(const HSTLInstructionIterator<InnerIter>& other) const {
+ return inner_.Current() == other.inner_.Current();
+ }
+
+ bool operator!=(const HSTLInstructionIterator<InnerIter>& other) const {
+ return !(*this == other);
+ }
+
+ static HSTLInstructionIterator<InnerIter> EndIter() {
+ return HSTLInstructionIterator<InnerIter>(InnerIter());
+ }
+
+ private:
+ InnerIter inner_;
+};
+
+template <typename InnerIter>
+IterationRange<HSTLInstructionIterator<InnerIter>> MakeSTLInstructionIteratorRange(InnerIter iter) {
+ return MakeIterationRange(HSTLInstructionIterator<InnerIter>(iter),
+ HSTLInstructionIterator<InnerIter>::EndIter());
+}
+
class HVariableInputSizeInstruction : public HInstruction {
public:
using HInstruction::GetInputRecords; // Keep the const version visible.
@@ -4345,11 +4481,16 @@
dex_file_(dex_file),
entrypoint_(entrypoint) {
SetPackedFlag<kFlagFinalizable>(finalizable);
+ SetPackedFlag<kFlagPartialMaterialization>(false);
SetRawInputAt(0, cls);
}
bool IsClonable() const override { return true; }
+ void SetPartialMaterialization() {
+ SetPackedFlag<kFlagPartialMaterialization>(true);
+ }
+
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
@@ -4358,6 +4499,9 @@
// Can throw errors when out-of-memory or if it's not instantiable/accessible.
bool CanThrow() const override { return true; }
+ bool OnlyThrowsAsyncExceptions() const override {
+ return !IsFinalizable() && !NeedsChecks();
+ }
bool NeedsChecks() const {
return entrypoint_ == kQuickAllocObjectWithChecks;
@@ -4367,6 +4511,10 @@
bool CanBeNull() const override { return false; }
+ bool IsPartialMaterialization() const {
+ return GetPackedFlag<kFlagPartialMaterialization>();
+ }
+
QuickEntrypointEnum GetEntrypoint() const { return entrypoint_; }
void SetEntrypoint(QuickEntrypointEnum entrypoint) {
@@ -4391,7 +4539,8 @@
private:
static constexpr size_t kFlagFinalizable = kNumberOfGenericPackedBits;
- static constexpr size_t kNumberOfNewInstancePackedBits = kFlagFinalizable + 1;
+ static constexpr size_t kFlagPartialMaterialization = kFlagFinalizable + 1;
+ static constexpr size_t kNumberOfNewInstancePackedBits = kFlagPartialMaterialization + 1;
static_assert(kNumberOfNewInstancePackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
@@ -5965,6 +6114,23 @@
const DexFile& GetDexFile() const { return dex_file_; }
bool IsVolatile() const { return is_volatile_; }
+ bool Equals(const FieldInfo& other) const {
+ return field_ == other.field_ &&
+ field_offset_ == other.field_offset_ &&
+ field_type_ == other.field_type_ &&
+ is_volatile_ == other.is_volatile_ &&
+ index_ == other.index_ &&
+ declaring_class_def_index_ == other.declaring_class_def_index_ &&
+ &dex_file_ == &other.dex_file_;
+ }
+
+ std::ostream& Dump(std::ostream& os) const {
+ os << field_ << ", off: " << field_offset_ << ", type: " << field_type_
+ << ", volatile: " << std::boolalpha << is_volatile_ << ", index_: " << std::dec << index_
+ << ", declaring_class: " << declaring_class_def_index_ << ", dex: " << dex_file_;
+ return os;
+ }
+
private:
ArtField* const field_;
const MemberOffset field_offset_;
@@ -5975,6 +6141,14 @@
const DexFile& dex_file_;
};
+inline bool operator==(const FieldInfo& a, const FieldInfo& b) {
+ return a.Equals(b);
+}
+
+inline std::ostream& operator<<(std::ostream& os, const FieldInfo& a) {
+ return a.Dump(os);
+}
+
class HInstanceFieldGet final : public HExpression<1> {
public:
HInstanceFieldGet(HInstruction* value,
@@ -6016,7 +6190,8 @@
return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
}
- const FieldInfo& GetFieldInfo() const { return field_info_; }
+ bool IsFieldAccess() const override { return true; }
+ const FieldInfo& GetFieldInfo() const override { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
@@ -6037,6 +6212,96 @@
const FieldInfo field_info_;
};
+class HPredicatedInstanceFieldGet final : public HExpression<2> {
+ public:
+ HPredicatedInstanceFieldGet(HInstanceFieldGet* orig,
+ HInstruction* target,
+ HInstruction* default_val)
+ : HExpression(kPredicatedInstanceFieldGet,
+ orig->GetFieldType(),
+ orig->GetSideEffects(),
+ orig->GetDexPc()),
+ field_info_(orig->GetFieldInfo()) {
+ // NB Default-val is at 0 so we can avoid doing a move.
+ SetRawInputAt(1, target);
+ SetRawInputAt(0, default_val);
+ }
+
+ HPredicatedInstanceFieldGet(HInstruction* value,
+ ArtField* field,
+ HInstruction* default_value,
+ DataType::Type field_type,
+ MemberOffset field_offset,
+ bool is_volatile,
+ uint32_t field_idx,
+ uint16_t declaring_class_def_index,
+ const DexFile& dex_file,
+ uint32_t dex_pc)
+ : HExpression(kPredicatedInstanceFieldGet,
+ field_type,
+ SideEffects::FieldReadOfType(field_type, is_volatile),
+ dex_pc),
+ field_info_(field,
+ field_offset,
+ field_type,
+ is_volatile,
+ field_idx,
+ declaring_class_def_index,
+ dex_file) {
+ SetRawInputAt(0, value);
+ SetRawInputAt(1, default_value);
+ }
+
+ bool IsClonable() const override {
+ return true;
+ }
+ bool CanBeMoved() const override {
+ return !IsVolatile();
+ }
+
+ HInstruction* GetDefaultValue() const {
+ return InputAt(0);
+ }
+ HInstruction* GetTarget() const {
+ return InputAt(1);
+ }
+
+ bool InstructionDataEquals(const HInstruction* other) const override {
+ const HPredicatedInstanceFieldGet* other_get = other->AsPredicatedInstanceFieldGet();
+ return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue() &&
+ GetDefaultValue() == other_get->GetDefaultValue();
+ }
+
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
+ return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
+ }
+
+ size_t ComputeHashCode() const override {
+ return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
+ }
+
+ bool IsFieldAccess() const override { return true; }
+ const FieldInfo& GetFieldInfo() const override { return field_info_; }
+ MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
+ DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
+ bool IsVolatile() const { return field_info_.IsVolatile(); }
+
+ void SetType(DataType::Type new_type) {
+ DCHECK(DataType::IsIntegralType(GetType()));
+ DCHECK(DataType::IsIntegralType(new_type));
+ DCHECK_EQ(DataType::Size(GetType()), DataType::Size(new_type));
+ SetPackedField<TypeField>(new_type);
+ }
+
+ DECLARE_INSTRUCTION(PredicatedInstanceFieldGet);
+
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(PredicatedInstanceFieldGet);
+
+ private:
+ const FieldInfo field_info_;
+};
+
class HInstanceFieldSet final : public HExpression<2> {
public:
HInstanceFieldSet(HInstruction* object,
@@ -6060,6 +6325,7 @@
declaring_class_def_index,
dex_file) {
SetPackedFlag<kFlagValueCanBeNull>(true);
+ SetPackedFlag<kFlagIsPredicatedSet>(false);
SetRawInputAt(0, object);
SetRawInputAt(1, value);
}
@@ -6070,13 +6336,16 @@
return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
}
- const FieldInfo& GetFieldInfo() const { return field_info_; }
+ bool IsFieldAccess() const override { return true; }
+ const FieldInfo& GetFieldInfo() const override { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
HInstruction* GetValue() const { return InputAt(1); }
bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
+ bool GetIsPredicatedSet() const { return GetPackedFlag<kFlagIsPredicatedSet>(); }
+ void SetIsPredicatedSet(bool value = true) { SetPackedFlag<kFlagIsPredicatedSet>(value); }
DECLARE_INSTRUCTION(InstanceFieldSet);
@@ -6085,7 +6354,8 @@
private:
static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits;
- static constexpr size_t kNumberOfInstanceFieldSetPackedBits = kFlagValueCanBeNull + 1;
+ static constexpr size_t kFlagIsPredicatedSet = kFlagValueCanBeNull + 1;
+ static constexpr size_t kNumberOfInstanceFieldSetPackedBits = kFlagIsPredicatedSet + 1;
static_assert(kNumberOfInstanceFieldSetPackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
@@ -7016,7 +7286,8 @@
return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
}
- const FieldInfo& GetFieldInfo() const { return field_info_; }
+ bool IsFieldAccess() const override { return true; }
+ const FieldInfo& GetFieldInfo() const override { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
@@ -7065,7 +7336,8 @@
}
bool IsClonable() const override { return true; }
- const FieldInfo& GetFieldInfo() const { return field_info_; }
+ bool IsFieldAccess() const override { return true; }
+ const FieldInfo& GetFieldInfo() const override { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
@@ -7984,7 +8256,7 @@
DCHECK(!destination.OverlapsWith(move.GetDestination()))
<< "Overlapped destination for two moves in a parallel move: "
<< move.GetSource() << " ==> " << move.GetDestination() << " and "
- << source << " ==> " << destination;
+ << source << " ==> " << destination << " for " << *instruction;
}
}
moves_.emplace_back(source, destination, type, instruction);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index ac241aa..8cd34cf 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -194,7 +194,9 @@
GraphChecker checker(graph_, codegen_);
last_seen_graph_size_ = checker.Run(pass_change, last_seen_graph_size_);
if (!checker.IsValid()) {
- LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<GraphChecker>(checker);
+ LOG(FATAL_WITHOUT_ABORT) << "Error after " << pass_name << "(" << graph_->PrettyMethod()
+ << "): " << *graph_;
+ LOG(FATAL) << "(" << pass_name << "): " << Dumpable<GraphChecker>(checker);
}
}
}
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 4322eb7..a2f71cf 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -113,6 +113,9 @@
kNonPartialLoadRemoved,
kPartialLSEPossible,
kPartialStoreRemoved,
+ kPartialAllocationMoved,
+ kPredicatedLoadAdded,
+ kPredicatedStoreAdded,
kLastStat
};
std::ostream& operator<<(std::ostream& os, MethodCompilationStat rhs);
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 89b606d..cf97c41 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -18,8 +18,10 @@
#define ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
#include <memory>
+#include <string_view>
#include <vector>
+#include "base/indenter.h"
#include "base/malloc_arena_pool.h"
#include "base/scoped_arena_allocator.h"
#include "builder.h"
@@ -30,7 +32,9 @@
#include "dex/standard_dex_file.h"
#include "driver/dex_compilation_unit.h"
#include "graph_checker.h"
+#include "gtest/gtest.h"
#include "handle_scope-inl.h"
+#include "handle_scope.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "nodes.h"
@@ -38,8 +42,6 @@
#include "ssa_builder.h"
#include "ssa_liveness_analysis.h"
-#include "gtest/gtest.h"
-
namespace art {
#define NUM_INSTRUCTIONS(...) \
@@ -183,8 +185,8 @@
}
}
- void InitGraph() {
- CreateGraph();
+ void InitGraph(VariableSizedHandleScope* handles = nullptr) {
+ CreateGraph(handles);
entry_block_ = AddNewBlock();
return_block_ = AddNewBlock();
exit_block_ = AddNewBlock();
@@ -246,6 +248,48 @@
return environment;
}
+ void EnsurePredecessorOrder(HBasicBlock* target, std::initializer_list<HBasicBlock*> preds) {
+ // Make sure the given preds and block predecessors have the same blocks.
+ BitVector bv(preds.size(), false, Allocator::GetMallocAllocator());
+ auto preds_and_idx = ZipCount(MakeIterationRange(target->GetPredecessors()));
+ bool correct_preds = preds.size() == target->GetPredecessors().size() &&
+ std::all_of(preds.begin(), preds.end(), [&](HBasicBlock* pred) {
+ return std::any_of(preds_and_idx.begin(),
+ preds_and_idx.end(),
+ // Make sure every target predecessor is used only
+ // once.
+ [&](std::pair<HBasicBlock*, uint32_t> cur) {
+ if (cur.first == pred && !bv.IsBitSet(cur.second)) {
+ bv.SetBit(cur.second);
+ return true;
+ } else {
+ return false;
+ }
+ });
+ }) &&
+ bv.NumSetBits() == preds.size();
+ auto dump_list = [](auto it) {
+ std::ostringstream oss;
+ oss << "[";
+ bool first = true;
+ for (HBasicBlock* b : it) {
+ if (!first) {
+ oss << ", ";
+ }
+ first = false;
+ oss << b->GetBlockId();
+ }
+ oss << "]";
+ return oss.str();
+ };
+ ASSERT_TRUE(correct_preds) << "Predecessors of " << target->GetBlockId() << " are "
+ << dump_list(target->GetPredecessors()) << " not "
+ << dump_list(preds);
+ if (correct_preds) {
+ std::copy(preds.begin(), preds.end(), target->predecessors_.begin());
+ }
+ }
+
protected:
bool CheckGraph(HGraph* graph, bool check_ref_type_info, std::ostream& oss) {
GraphChecker checker(graph);
@@ -342,12 +386,34 @@
AdjacencyListGraph& operator=(AdjacencyListGraph&&) = default;
AdjacencyListGraph& operator=(const AdjacencyListGraph&) = default;
+ std::ostream& Dump(std::ostream& os) const {
+ struct Namer : public BlockNamer {
+ public:
+ explicit Namer(const AdjacencyListGraph& alg) : BlockNamer(), alg_(alg) {}
+ std::ostream& PrintName(std::ostream& os, HBasicBlock* blk) const override {
+ if (alg_.HasBlock(blk)) {
+ return os << alg_.GetName(blk) << " (" << blk->GetBlockId() << ")";
+ } else {
+ return os << "<Unnamed B" << blk->GetBlockId() << ">";
+ }
+ }
+
+ const AdjacencyListGraph& alg_;
+ };
+ Namer namer(*this);
+ return graph_->Dump(os, namer);
+ }
+
private:
HGraph* graph_;
SafeMap<const std::string_view, HBasicBlock*> name_to_block_;
SafeMap<const HBasicBlock*, const std::string_view> block_to_name_;
};
+inline std::ostream& operator<<(std::ostream& oss, const AdjacencyListGraph& alg) {
+ return alg.Dump(oss);
+}
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index d5edc3d..c2f3d0e 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -268,6 +268,10 @@
return false;
}
+ if (user->IsNewInstance() && user->AsNewInstance()->IsPartialMaterialization()) {
+ return false;
+ }
+
// Now do a thorough environment check that this is really coming from the same instruction in
// the same inlined graph. Unfortunately, we have to go through the whole environment chain.
HEnvironment* user_environment = user->GetEnvironment();
@@ -296,8 +300,8 @@
if (kIsDebugBuild) {
for (HInstruction* between = input->GetNext(); between != user; between = between->GetNext()) {
CHECK(between != nullptr); // User must be after input in the same block.
- CHECK(!between->CanThrow());
- CHECK(!between->HasSideEffects());
+ CHECK(!between->CanThrow()) << *between << " User: " << *user;
+ CHECK(!between->HasSideEffects()) << *between << " User: " << *user;
}
}
return true;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 953329d..1b2f71f 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -67,6 +67,7 @@
void VisitLoadException(HLoadException* instr) override;
void VisitNewArray(HNewArray* instr) override;
void VisitParameterValue(HParameterValue* instr) override;
+ void VisitPredicatedInstanceFieldGet(HPredicatedInstanceFieldGet* instr) override;
void VisitInstanceFieldGet(HInstanceFieldGet* instr) override;
void VisitStaticFieldGet(HStaticFieldGet* instr) override;
void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) override;
@@ -313,10 +314,8 @@
return;
}
- HInstanceFieldGet* field_get = (load_class == input_one)
- ? input_two->AsInstanceFieldGet()
- : input_one->AsInstanceFieldGet();
- if (field_get == nullptr) {
+ HInstruction* field_get = (load_class == input_one) ? input_two : input_one;
+ if (!field_get->IsInstanceFieldGet() && !field_get->IsPredicatedInstanceFieldGet()) {
return;
}
HInstruction* receiver = field_get->InputAt(0);
@@ -624,6 +623,11 @@
SetClassAsTypeInfo(instr, klass, /* is_exact= */ false);
}
+void ReferenceTypePropagation::RTPVisitor::VisitPredicatedInstanceFieldGet(
+ HPredicatedInstanceFieldGet* instr) {
+ UpdateFieldAccessTypeInfo(instr, instr->GetFieldInfo());
+}
+
void ReferenceTypePropagation::RTPVisitor::VisitInstanceFieldGet(HInstanceFieldGet* instr) {
UpdateFieldAccessTypeInfo(instr, instr->GetFieldInfo());
}
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index c1891de..7140e24 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -14,13 +14,14 @@
* limitations under the License.
*/
-#include <string>
-
#include "scheduler.h"
+#include <string>
+
#include "base/scoped_arena_allocator.h"
#include "base/scoped_arena_containers.h"
#include "data_type-inl.h"
+#include "optimizing/load_store_analysis.h"
#include "prepare_for_register_allocation.h"
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -107,6 +108,7 @@
static bool IsInstanceFieldAccess(const HInstruction* instruction) {
return instruction->IsInstanceFieldGet() ||
instruction->IsInstanceFieldSet() ||
+ instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsUnresolvedInstanceFieldGet() ||
instruction->IsUnresolvedInstanceFieldSet();
}
@@ -121,6 +123,7 @@
static bool IsResolvedFieldAccess(const HInstruction* instruction) {
return instruction->IsInstanceFieldGet() ||
instruction->IsInstanceFieldSet() ||
+ instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsStaticFieldGet() ||
instruction->IsStaticFieldSet();
}
@@ -137,18 +140,7 @@
}
static const FieldInfo* GetFieldInfo(const HInstruction* instruction) {
- if (instruction->IsInstanceFieldGet()) {
- return &instruction->AsInstanceFieldGet()->GetFieldInfo();
- } else if (instruction->IsInstanceFieldSet()) {
- return &instruction->AsInstanceFieldSet()->GetFieldInfo();
- } else if (instruction->IsStaticFieldGet()) {
- return &instruction->AsStaticFieldGet()->GetFieldInfo();
- } else if (instruction->IsStaticFieldSet()) {
- return &instruction->AsStaticFieldSet()->GetFieldInfo();
- } else {
- LOG(FATAL) << "Unexpected field access type";
- UNREACHABLE();
- }
+ return &instruction->GetFieldInfo();
}
size_t SideEffectDependencyAnalysis::MemoryDependencyAnalysis::FieldAccessHeapLocation(
@@ -560,7 +552,7 @@
// should run the analysis or not.
const HeapLocationCollector* heap_location_collector = nullptr;
ScopedArenaAllocator allocator(graph->GetArenaStack());
- LoadStoreAnalysis lsa(graph, /*stats=*/nullptr, &allocator, /*for_elimination=*/false);
+ LoadStoreAnalysis lsa(graph, /*stats=*/nullptr, &allocator, LoadStoreAnalysisType::kBasic);
if (!only_optimize_loop_blocks_ || graph->HasLoops()) {
lsa.Run();
heap_location_collector = &lsa.GetHeapLocationCollector();
@@ -730,35 +722,37 @@
// TODO: Some of the instructions above may be safe to schedule (maybe as
// scheduling barriers).
return instruction->IsArrayGet() ||
- instruction->IsArraySet() ||
- instruction->IsArrayLength() ||
- instruction->IsBoundType() ||
- instruction->IsBoundsCheck() ||
- instruction->IsCheckCast() ||
- instruction->IsClassTableGet() ||
- instruction->IsCurrentMethod() ||
- instruction->IsDivZeroCheck() ||
- (instruction->IsInstanceFieldGet() && !instruction->AsInstanceFieldGet()->IsVolatile()) ||
- (instruction->IsInstanceFieldSet() && !instruction->AsInstanceFieldSet()->IsVolatile()) ||
- instruction->IsInstanceOf() ||
- instruction->IsInvokeInterface() ||
- instruction->IsInvokeStaticOrDirect() ||
- instruction->IsInvokeUnresolved() ||
- instruction->IsInvokeVirtual() ||
- instruction->IsLoadString() ||
- instruction->IsNewArray() ||
- instruction->IsNewInstance() ||
- instruction->IsNullCheck() ||
- instruction->IsPackedSwitch() ||
- instruction->IsParameterValue() ||
- instruction->IsPhi() ||
- instruction->IsReturn() ||
- instruction->IsReturnVoid() ||
- instruction->IsSelect() ||
- (instruction->IsStaticFieldGet() && !instruction->AsStaticFieldGet()->IsVolatile()) ||
- (instruction->IsStaticFieldSet() && !instruction->AsStaticFieldSet()->IsVolatile()) ||
- instruction->IsSuspendCheck() ||
- instruction->IsTypeConversion();
+ instruction->IsArraySet() ||
+ instruction->IsArrayLength() ||
+ instruction->IsBoundType() ||
+ instruction->IsBoundsCheck() ||
+ instruction->IsCheckCast() ||
+ instruction->IsClassTableGet() ||
+ instruction->IsCurrentMethod() ||
+ instruction->IsDivZeroCheck() ||
+ (instruction->IsInstanceFieldGet() && !instruction->AsInstanceFieldGet()->IsVolatile()) ||
+ (instruction->IsPredicatedInstanceFieldGet() &&
+ !instruction->AsPredicatedInstanceFieldGet()->IsVolatile()) ||
+ (instruction->IsInstanceFieldSet() && !instruction->AsInstanceFieldSet()->IsVolatile()) ||
+ instruction->IsInstanceOf() ||
+ instruction->IsInvokeInterface() ||
+ instruction->IsInvokeStaticOrDirect() ||
+ instruction->IsInvokeUnresolved() ||
+ instruction->IsInvokeVirtual() ||
+ instruction->IsLoadString() ||
+ instruction->IsNewArray() ||
+ instruction->IsNewInstance() ||
+ instruction->IsNullCheck() ||
+ instruction->IsPackedSwitch() ||
+ instruction->IsParameterValue() ||
+ instruction->IsPhi() ||
+ instruction->IsReturn() ||
+ instruction->IsReturnVoid() ||
+ instruction->IsSelect() ||
+ (instruction->IsStaticFieldGet() && !instruction->AsStaticFieldGet()->IsVolatile()) ||
+ (instruction->IsStaticFieldSet() && !instruction->AsStaticFieldSet()->IsVolatile()) ||
+ instruction->IsSuspendCheck() ||
+ instruction->IsTypeConversion();
}
bool HScheduler::IsSchedulable(const HBasicBlock* block) const {
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index 858a555..f9004d8 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -853,6 +853,11 @@
}
}
+void SchedulingLatencyVisitorARM::VisitPredicatedInstanceFieldGet(
+ HPredicatedInstanceFieldGet* instruction) {
+ HandleFieldGetLatencies(instruction, instruction->GetFieldInfo());
+}
+
void SchedulingLatencyVisitorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGetLatencies(instruction, instruction->GetFieldInfo());
}
@@ -913,7 +918,9 @@
void SchedulingLatencyVisitorARM::HandleFieldGetLatencies(HInstruction* instruction,
const FieldInfo& field_info) {
- DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
+ DCHECK(instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsPredicatedInstanceFieldGet());
DCHECK(codegen_ != nullptr);
bool is_volatile = field_info.IsVolatile();
DataType::Type field_type = field_info.GetFieldType();
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index 4c7a3bb..d11222d 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -61,36 +61,37 @@
// We add a second unused parameter to be able to use this macro like the others
// defined in `nodes.h`.
-#define FOR_EACH_SCHEDULED_ARM_INSTRUCTION(M) \
- M(ArrayGet , unused) \
- M(ArrayLength , unused) \
- M(ArraySet , unused) \
- M(Add , unused) \
- M(Sub , unused) \
- M(And , unused) \
- M(Or , unused) \
- M(Ror , unused) \
- M(Xor , unused) \
- M(Shl , unused) \
- M(Shr , unused) \
- M(UShr , unused) \
- M(Mul , unused) \
- M(Div , unused) \
- M(Condition , unused) \
- M(Compare , unused) \
- M(BoundsCheck , unused) \
- M(InstanceFieldGet , unused) \
- M(InstanceFieldSet , unused) \
- M(InstanceOf , unused) \
- M(Invoke , unused) \
- M(LoadString , unused) \
- M(NewArray , unused) \
- M(NewInstance , unused) \
- M(Rem , unused) \
- M(StaticFieldGet , unused) \
- M(StaticFieldSet , unused) \
- M(SuspendCheck , unused) \
- M(TypeConversion , unused)
+#define FOR_EACH_SCHEDULED_ARM_INSTRUCTION(M) \
+ M(ArrayGet, unused) \
+ M(ArrayLength, unused) \
+ M(ArraySet, unused) \
+ M(Add, unused) \
+ M(Sub, unused) \
+ M(And, unused) \
+ M(Or, unused) \
+ M(Ror, unused) \
+ M(Xor, unused) \
+ M(Shl, unused) \
+ M(Shr, unused) \
+ M(UShr, unused) \
+ M(Mul, unused) \
+ M(Div, unused) \
+ M(Condition, unused) \
+ M(Compare, unused) \
+ M(BoundsCheck, unused) \
+ M(PredicatedInstanceFieldGet, unused) \
+ M(InstanceFieldGet, unused) \
+ M(InstanceFieldSet, unused) \
+ M(InstanceOf, unused) \
+ M(Invoke, unused) \
+ M(LoadString, unused) \
+ M(NewArray, unused) \
+ M(NewInstance, unused) \
+ M(Rem, unused) \
+ M(StaticFieldGet, unused) \
+ M(StaticFieldSet, unused) \
+ M(SuspendCheck, unused) \
+ M(TypeConversion, unused)
#define FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(M) \
M(BitwiseNegatedRight, unused) \
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index c166a46..a1cc202 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -274,7 +274,7 @@
}
HeapLocationCollector heap_location_collector(
- graph_, GetScopedAllocator(), /*for_partial_elimination=*/false);
+ graph_, GetScopedAllocator(), LoadStoreAnalysisType::kBasic);
heap_location_collector.VisitBasicBlock(entry);
heap_location_collector.BuildAliasingMatrix();
TestSchedulingGraph scheduling_graph(GetScopedAllocator(), &heap_location_collector);