diff options
Diffstat (limited to 'compiler/optimizing')
19 files changed, 345 insertions, 256 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 8a7f6d3a33..1dd526f404 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -3936,7 +3936,6 @@ void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) { } else { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } locations->SetOut(Location::RegisterLocation(R0)); } @@ -3954,7 +3953,7 @@ void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) { codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); } } diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 5c33fe1a7d..240e39df4b 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -4738,7 +4738,6 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { locations->AddTemp(LocationFrom(kArtMethodRegister)); } else { locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); } locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); } @@ -4756,7 +4755,7 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); } } diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 00ad3e34b7..cf4d94deea 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -3948,7 +3948,6 @@ void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) { } else { InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); } locations->SetOut(LocationFrom(r0)); } @@ -3970,7 +3969,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); } } diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 01e0dac33e..29f8b2aa3c 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -5900,7 +5900,6 @@ void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) { locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); } else { locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); } @@ -5917,7 +5916,7 @@ void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) { codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); } } diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 36690c0569..dd3f0fee5a 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -3841,7 +3841,6 @@ void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) { locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); } else { locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); } @@ -3859,7 +3858,7 @@ void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); } } diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 0abe85540c..786bc50345 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -4150,7 +4150,6 @@ void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) { } else { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } } @@ -4166,7 +4165,7 @@ void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) { codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); DCHECK(!codegen_->IsLeafMethod()); } } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 903844fcdb..06b48c489c 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -4038,7 +4038,6 @@ void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) { locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); } else { locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } locations->SetOut(Location::RegisterLocation(RAX)); } @@ -4055,7 +4054,7 @@ void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction) codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); DCHECK(!codegen_->IsLeafMethod()); } } diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index 879b4ce59e..e3f3df0ff5 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -15,6 +15,7 @@ */ #include <functional> +#include <memory> #include "arch/instruction_set.h" #include "arch/arm/instruction_set_features_arm.h" @@ -299,8 +300,8 @@ static void RunCode(CodegenTargetConfig target_config, bool has_result, Expected expected) { CompilerOptions compiler_options; - CodeGenerator* codegen = target_config.CreateCodeGenerator(graph, compiler_options); - RunCode(codegen, graph, hook_before_codegen, has_result, expected); + std::unique_ptr<CodeGenerator> codegen(target_config.CreateCodeGenerator(graph, compiler_options)); + RunCode(codegen.get(), graph, hook_before_codegen, has_result, expected); } #ifdef ART_ENABLE_CODEGEN_arm diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc index 437d35ccb7..f8d37bd714 100644 --- a/compiler/optimizing/gvn_test.cc +++ b/compiler/optimizing/gvn_test.cc @@ -28,7 +28,6 @@ class GVNTest : public CommonCompilerTest {}; TEST_F(GVNTest, LocalFieldElimination) { ArenaPool pool; ArenaAllocator allocator(&pool); - ScopedNullHandle<mirror::DexCache> dex_cache; HGraph* graph = CreateGraph(&allocator); HBasicBlock* entry = new (&allocator) HBasicBlock(graph); @@ -45,53 +44,53 @@ TEST_F(GVNTest, LocalFieldElimination) { entry->AddSuccessor(block); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* to_remove = block->GetLastInstruction(); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimNot, MemberOffset(43), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* different_offset = block->GetLastInstruction(); // Kill the value. block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* use_after_kill = block->GetLastInstruction(); block->AddInstruction(new (&allocator) HExit()); @@ -113,7 +112,6 @@ TEST_F(GVNTest, LocalFieldElimination) { TEST_F(GVNTest, GlobalFieldElimination) { ArenaPool pool; ArenaAllocator allocator(&pool); - ScopedNullHandle<mirror::DexCache> dex_cache; HGraph* graph = CreateGraph(&allocator); HBasicBlock* entry = new (&allocator) HBasicBlock(graph); @@ -129,13 +127,13 @@ TEST_F(GVNTest, GlobalFieldElimination) { graph->AddBlock(block); entry->AddSuccessor(block); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction())); @@ -152,33 +150,33 @@ TEST_F(GVNTest, GlobalFieldElimination) { else_->AddSuccessor(join); then->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); then->AddInstruction(new (&allocator) HGoto()); else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); else_->AddInstruction(new (&allocator) HGoto()); join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); join->AddInstruction(new (&allocator) HExit()); @@ -196,7 +194,6 @@ TEST_F(GVNTest, GlobalFieldElimination) { TEST_F(GVNTest, LoopFieldElimination) { ArenaPool pool; ArenaAllocator allocator(&pool); - ScopedNullHandle<mirror::DexCache> dex_cache; HGraph* graph = CreateGraph(&allocator); HBasicBlock* entry = new (&allocator) HBasicBlock(graph); @@ -213,13 +210,13 @@ TEST_F(GVNTest, LoopFieldElimination) { graph->AddBlock(block); entry->AddSuccessor(block); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); block->AddInstruction(new (&allocator) HGoto()); @@ -236,13 +233,13 @@ TEST_F(GVNTest, LoopFieldElimination) { loop_body->AddSuccessor(loop_header); loop_header->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction(); loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction())); @@ -251,35 +248,35 @@ TEST_F(GVNTest, LoopFieldElimination) { // and the body to be GVN'ed. loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* field_set = loop_body->GetLastInstruction(); loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction(); loop_body->AddInstruction(new (&allocator) HGoto()); exit->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* field_get_in_exit = exit->GetLastInstruction(); exit->AddInstruction(new (&allocator) HExit()); @@ -319,7 +316,6 @@ TEST_F(GVNTest, LoopFieldElimination) { TEST_F(GVNTest, LoopSideEffects) { ArenaPool pool; ArenaAllocator allocator(&pool); - ScopedNullHandle<mirror::DexCache> dex_cache; static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC(); @@ -376,13 +372,13 @@ TEST_F(GVNTest, LoopSideEffects) { // Make one block with a side effect. entry->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); SideEffectsAnalysis side_effects(graph); @@ -401,13 +397,13 @@ TEST_F(GVNTest, LoopSideEffects) { outer_loop_body->InsertInstructionBefore( new (&allocator) HInstanceFieldSet(parameter, parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0), outer_loop_body->GetLastInstruction()); @@ -427,13 +423,13 @@ TEST_F(GVNTest, LoopSideEffects) { inner_loop_body->InsertInstructionBefore( new (&allocator) HInstanceFieldSet(parameter, parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0), inner_loop_body->GetLastInstruction()); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 3b83e95071..c970e5cbba 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -429,13 +429,13 @@ HInstanceFieldGet* HInliner::BuildGetReceiverClass(ClassLinker* class_linker, DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_"); HInstanceFieldGet* result = new (graph_->GetArena()) HInstanceFieldGet( receiver, + field, Primitive::kPrimNot, field->GetOffset(), field->IsVolatile(), field->GetDexFieldIndex(), field->GetDeclaringClass()->GetDexClassDefIndex(), *field->GetDexFile(), - handles_->NewHandle(field->GetDexCache()), dex_pc); // The class of a field is effectively final, and does not have any memory dependencies. result->SetSideEffects(SideEffects::None()); @@ -618,6 +618,9 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction, } else { one_target_inlined = true; + VLOG(compiler) << "Polymorphic call to " << ArtMethod::PrettyMethod(resolved_method) + << " has inlined " << ArtMethod::PrettyMethod(method); + // If we have inlined all targets before, and this receiver is the last seen, // we deoptimize instead of keeping the original invoke instruction. bool deoptimize = all_targets_inlined && @@ -655,6 +658,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction, << " of its targets could be inlined"; return false; } + MaybeRecordStat(kInlinedPolymorphicCall); // Run type propagation to get the guards typed. @@ -1161,13 +1165,13 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex DCHECK(resolved_field != nullptr); HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet( obj, + resolved_field, resolved_field->GetTypeAsPrimitiveType(), resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, resolved_field->GetDeclaringClass()->GetDexClassDefIndex(), *dex_cache->GetDexFile(), - dex_cache, // Read barrier generates a runtime call in slow path and we need a valid // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537. /* dex_pc */ 0); @@ -1190,13 +1194,13 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(Handle<mirror::DexCache> dex HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet( obj, value, + resolved_field, resolved_field->GetTypeAsPrimitiveType(), resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, resolved_field->GetDeclaringClass()->GetDexClassDefIndex(), *dex_cache->GetDexFile(), - dex_cache, // Read barrier generates a runtime call in slow path and we need a valid // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537. /* dex_pc */ 0); @@ -1424,15 +1428,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, return false; } - if (current->IsNewInstance() && - (current->AsNewInstance()->GetEntrypoint() == kQuickAllocObjectWithAccessCheck)) { - VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index) - << " could not be inlined because it is using an entrypoint" - << " with access checks"; - // Allocation entrypoint does not handle inlined frames. - return false; - } - if (current->IsNewArray() && (current->AsNewArray()->GetEntrypoint() == kQuickAllocArrayWithAccessCheck)) { VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index) @@ -1579,6 +1574,13 @@ bool HInliner::ReturnTypeMoreSpecific(HInvoke* invoke_instruction, /* declared_can_be_null */ true, return_replacement)) { return true; + } else if (return_replacement->IsInstanceFieldGet()) { + HInstanceFieldGet* field_get = return_replacement->AsInstanceFieldGet(); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + if (field_get->GetFieldInfo().GetField() == + class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0)) { + return true; + } } } else if (return_replacement->IsInstanceOf()) { // Inlining InstanceOf into an If may put a tighter bound on reference types. diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index af8e2c8a7c..009d549547 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -917,11 +917,11 @@ bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t d bool finalizable; bool needs_access_check = NeedsAccessCheck(type_index, dex_cache, &finalizable); - // Only the non-resolved entrypoint handles the finalizable class case. If we + // Only the access check entrypoint handles the finalizable class case. If we // need access checks, then we haven't resolved the method and the class may // again be finalizable. QuickEntrypointEnum entrypoint = (finalizable || needs_access_check) - ? kQuickAllocObject + ? kQuickAllocObjectWithChecks : kQuickAllocObjectInitialized; if (outer_dex_cache.Get() != dex_cache.Get()) { @@ -946,7 +946,6 @@ bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t d AppendInstruction(new (arena_) HNewInstance( cls, - graph_->GetCurrentMethod(), dex_pc, type_index, *dex_compilation_unit_->GetDexFile(), @@ -1235,13 +1234,13 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex(); field_set = new (arena_) HInstanceFieldSet(object, value, + resolved_field, field_type, resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, class_def_index, *dex_file_, - dex_compilation_unit_->GetDexCache(), dex_pc); } AppendInstruction(field_set); @@ -1256,13 +1255,13 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio } else { uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex(); field_get = new (arena_) HInstanceFieldGet(object, + resolved_field, field_type, resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, class_def_index, *dex_file_, - dex_compilation_unit_->GetDexCache(), dex_pc); } AppendInstruction(field_get); @@ -1311,9 +1310,9 @@ bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) c } void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction, - uint32_t dex_pc, - bool is_put, - Primitive::Type field_type) { + uint32_t dex_pc, + bool is_put, + Primitive::Type field_type) { uint32_t source_or_dest_reg = instruction.VRegA_21c(); uint16_t field_index = instruction.VRegB_21c(); @@ -1400,23 +1399,23 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction, DCHECK_EQ(HPhi::ToPhiType(value->GetType()), HPhi::ToPhiType(field_type)); AppendInstruction(new (arena_) HStaticFieldSet(cls, value, + resolved_field, field_type, resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, class_def_index, *dex_file_, - dex_cache_, dex_pc)); } else { AppendInstruction(new (arena_) HStaticFieldGet(cls, + resolved_field, field_type, resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, class_def_index, *dex_file_, - dex_cache_, dex_pc)); UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction()); } diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 439e3b66db..911bfb9cc6 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -1118,7 +1118,66 @@ void InstructionSimplifierVisitor::VisitAboveOrEqual(HAboveOrEqual* condition) { VisitCondition(condition); } +// Recognize the following pattern: +// obj.getClass() ==/!= Foo.class +// And replace it with a constant value if the type of `obj` is statically known. +static bool RecognizeAndSimplifyClassCheck(HCondition* condition) { + HInstruction* input_one = condition->InputAt(0); + HInstruction* input_two = condition->InputAt(1); + HLoadClass* load_class = input_one->IsLoadClass() + ? input_one->AsLoadClass() + : input_two->AsLoadClass(); + if (load_class == nullptr) { + return false; + } + + ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); + if (!class_rti.IsValid()) { + // Unresolved class. + return false; + } + + HInstanceFieldGet* field_get = (load_class == input_one) + ? input_two->AsInstanceFieldGet() + : input_one->AsInstanceFieldGet(); + if (field_get == nullptr) { + return false; + } + + HInstruction* receiver = field_get->InputAt(0); + ReferenceTypeInfo receiver_type = receiver->GetReferenceTypeInfo(); + if (!receiver_type.IsExact()) { + return false; + } + + { + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0); + DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_"); + if (field_get->GetFieldInfo().GetField() != field) { + return false; + } + + // We can replace the compare. + int value = 0; + if (receiver_type.IsEqual(class_rti)) { + value = condition->IsEqual() ? 1 : 0; + } else { + value = condition->IsNotEqual() ? 1 : 0; + } + condition->ReplaceWith(condition->GetBlock()->GetGraph()->GetIntConstant(value)); + return true; + } +} + void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) { + if (condition->IsEqual() || condition->IsNotEqual()) { + if (RecognizeAndSimplifyClassCheck(condition)) { + return; + } + } + // Reverse condition if left is constant. Our code generators prefer constant // on the right hand side. if (condition->GetLeft()->IsConstant() && !condition->GetRight()->IsConstant()) { @@ -1843,11 +1902,11 @@ void InstructionSimplifierVisitor::SimplifyStringCharAt(HInvoke* invoke) { // so create the HArrayLength, HBoundsCheck and HArrayGet. HArrayLength* length = new (arena) HArrayLength(str, dex_pc, /* is_string_length */ true); invoke->GetBlock()->InsertInstructionBefore(length, invoke); - HBoundsCheck* bounds_check = - new (arena) HBoundsCheck(index, length, dex_pc, invoke->GetDexMethodIndex()); + HBoundsCheck* bounds_check = new (arena) HBoundsCheck( + index, length, dex_pc, invoke->GetDexMethodIndex()); invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke); - HArrayGet* array_get = - new (arena) HArrayGet(str, index, Primitive::kPrimChar, dex_pc, /* is_string_char_at */ true); + HArrayGet* array_get = new (arena) HArrayGet( + str, bounds_check, Primitive::kPrimChar, dex_pc, /* is_string_char_at */ true); invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get); bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment()); GetGraph()->SetHasBoundsChecks(true); diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc index 8c34dc6a86..5bcfa4c98b 100644 --- a/compiler/optimizing/licm_test.cc +++ b/compiler/optimizing/licm_test.cc @@ -111,20 +111,19 @@ TEST_F(LICMTest, FieldHoisting) { BuildLoop(); // Populate the loop with instructions: set/get field with different types. - ScopedNullHandle<mirror::DexCache> dex_cache; HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_, + nullptr, Primitive::kPrimLong, MemberOffset(10), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), - dex_cache, 0); loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction()); HInstruction* set_field = new (&allocator_) HInstanceFieldSet( - parameter_, int_constant_, Primitive::kPrimInt, MemberOffset(20), - false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), dex_cache, 0); + parameter_, int_constant_, nullptr, Primitive::kPrimInt, MemberOffset(20), + false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), 0); loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction()); EXPECT_EQ(get_field->GetBlock(), loop_body_); @@ -140,24 +139,24 @@ TEST_F(LICMTest, NoFieldHoisting) { // Populate the loop with instructions: set/get field with same types. ScopedNullHandle<mirror::DexCache> dex_cache; HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_, + nullptr, Primitive::kPrimLong, MemberOffset(10), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), - dex_cache, 0); loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction()); HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_, get_field, + nullptr, Primitive::kPrimLong, MemberOffset(10), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), - dex_cache, 0); loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction()); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index afa17cefa2..db1b277990 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -171,6 +171,7 @@ class HInstructionList : public ValueObject { friend class HGraph; friend class HInstruction; friend class HInstructionIterator; + friend class HInstructionIteratorHandleChanges; friend class HBackwardInstructionIterator; DISALLOW_COPY_AND_ASSIGN(HInstructionList); @@ -2312,6 +2313,9 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { }; std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs); +// Iterates over the instructions, while preserving the next instruction +// in case the current instruction gets removed from the list by the user +// of this iterator. class HInstructionIterator : public ValueObject { public: explicit HInstructionIterator(const HInstructionList& instructions) @@ -2333,6 +2337,28 @@ class HInstructionIterator : public ValueObject { DISALLOW_COPY_AND_ASSIGN(HInstructionIterator); }; +// Iterates over the instructions without saving the next instruction, +// therefore handling changes in the graph potentially made by the user +// of this iterator. +class HInstructionIteratorHandleChanges : public ValueObject { + public: + explicit HInstructionIteratorHandleChanges(const HInstructionList& instructions) + : instruction_(instructions.first_instruction_) { + } + + bool Done() const { return instruction_ == nullptr; } + HInstruction* Current() const { return instruction_; } + void Advance() { + instruction_ = instruction_->GetNext(); + } + + private: + HInstruction* instruction_; + + DISALLOW_COPY_AND_ASSIGN(HInstructionIteratorHandleChanges); +}; + + class HBackwardInstructionIterator : public ValueObject { public: explicit HBackwardInstructionIterator(const HInstructionList& instructions) @@ -3748,10 +3774,9 @@ class HCompare FINAL : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HCompare); }; -class HNewInstance FINAL : public HExpression<2> { +class HNewInstance FINAL : public HExpression<1> { public: HNewInstance(HInstruction* cls, - HCurrentMethod* current_method, uint32_t dex_pc, dex::TypeIndex type_index, const DexFile& dex_file, @@ -3765,7 +3790,6 @@ class HNewInstance FINAL : public HExpression<2> { SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check); SetPackedFlag<kFlagFinalizable>(finalizable); SetRawInputAt(0, cls); - SetRawInputAt(1, current_method); } dex::TypeIndex GetTypeIndex() const { return type_index_; } @@ -5056,60 +5080,62 @@ class HNullCheck FINAL : public HExpression<1> { DISALLOW_COPY_AND_ASSIGN(HNullCheck); }; +// Embeds an ArtField and all the information required by the compiler. We cache +// that information to avoid requiring the mutator lock every time we need it. class FieldInfo : public ValueObject { public: - FieldInfo(MemberOffset field_offset, + FieldInfo(ArtField* field, + MemberOffset field_offset, Primitive::Type field_type, bool is_volatile, uint32_t index, uint16_t declaring_class_def_index, - const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache) - : field_offset_(field_offset), + const DexFile& dex_file) + : field_(field), + field_offset_(field_offset), field_type_(field_type), is_volatile_(is_volatile), index_(index), declaring_class_def_index_(declaring_class_def_index), - dex_file_(dex_file), - dex_cache_(dex_cache) {} + dex_file_(dex_file) {} + ArtField* GetField() const { return field_; } MemberOffset GetFieldOffset() const { return field_offset_; } Primitive::Type GetFieldType() const { return field_type_; } uint32_t GetFieldIndex() const { return index_; } uint16_t GetDeclaringClassDefIndex() const { return declaring_class_def_index_;} const DexFile& GetDexFile() const { return dex_file_; } bool IsVolatile() const { return is_volatile_; } - Handle<mirror::DexCache> GetDexCache() const { return dex_cache_; } private: + ArtField* const field_; const MemberOffset field_offset_; const Primitive::Type field_type_; const bool is_volatile_; const uint32_t index_; const uint16_t declaring_class_def_index_; const DexFile& dex_file_; - const Handle<mirror::DexCache> dex_cache_; }; class HInstanceFieldGet FINAL : public HExpression<1> { public: HInstanceFieldGet(HInstruction* value, + ArtField* field, Primitive::Type field_type, MemberOffset field_offset, bool is_volatile, uint32_t field_idx, uint16_t declaring_class_def_index, const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache, uint32_t dex_pc) : HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc), - field_info_(field_offset, + field_info_(field, + field_offset, field_type, is_volatile, field_idx, declaring_class_def_index, - dex_file, - dex_cache) { + dex_file) { SetRawInputAt(0, value); } @@ -5145,22 +5171,22 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> { public: HInstanceFieldSet(HInstruction* object, HInstruction* value, + ArtField* field, Primitive::Type field_type, MemberOffset field_offset, bool is_volatile, uint32_t field_idx, uint16_t declaring_class_def_index, const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache, uint32_t dex_pc) : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc), - field_info_(field_offset, + field_info_(field, + field_offset, field_type, is_volatile, field_idx, declaring_class_def_index, - dex_file, - dex_cache) { + dex_file) { SetPackedFlag<kFlagValueCanBeNull>(true); SetRawInputAt(0, object); SetRawInputAt(1, value); @@ -5762,7 +5788,6 @@ class HLoadString FINAL : public HInstruction { : HInstruction(SideEffectsForArchRuntimeCalls(), dex_pc), special_input_(HUserRecord<HInstruction*>(current_method)), string_index_(string_index) { - SetPackedFlag<kFlagIsInDexCache>(false); SetPackedField<LoadKindField>(LoadKind::kDexCacheViaMethod); load_data_.dex_file_ = &dex_file; } @@ -5789,7 +5814,6 @@ class HLoadString FINAL : public HInstruction { const DexFile& GetDexFile() const; dex::StringIndex GetStringIndex() const { - DCHECK(HasStringReference(GetLoadKind()) || /* For slow paths. */ !IsInDexCache()); return string_index_; } @@ -5814,7 +5838,7 @@ class HLoadString FINAL : public HInstruction { load_kind == LoadKind::kJitTableAddress) { return false; } - return !IsInDexCache(); + return true; } bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { @@ -5828,15 +5852,6 @@ class HLoadString FINAL : public HInstruction { return SideEffects::CanTriggerGC(); } - bool IsInDexCache() const { return GetPackedFlag<kFlagIsInDexCache>(); } - - void MarkInDexCache() { - SetPackedFlag<kFlagIsInDexCache>(true); - DCHECK(!NeedsEnvironment()); - RemoveEnvironment(); - SetSideEffects(SideEffects::None()); - } - void AddSpecialInput(HInstruction* special_input); using HInstruction::GetInputRecords; // Keep the const version visible. @@ -5852,8 +5867,7 @@ class HLoadString FINAL : public HInstruction { DECLARE_INSTRUCTION(LoadString); private: - static constexpr size_t kFlagIsInDexCache = kNumberOfGenericPackedBits; - static constexpr size_t kFieldLoadKind = kFlagIsInDexCache + 1; + static constexpr size_t kFieldLoadKind = kNumberOfGenericPackedBits; static constexpr size_t kFieldLoadKindSize = MinimumBitsToStore(static_cast<size_t>(LoadKind::kLast)); static constexpr size_t kNumberOfLoadStringPackedBits = kFieldLoadKind + kFieldLoadKindSize; @@ -5949,22 +5963,22 @@ class HClinitCheck FINAL : public HExpression<1> { class HStaticFieldGet FINAL : public HExpression<1> { public: HStaticFieldGet(HInstruction* cls, + ArtField* field, Primitive::Type field_type, MemberOffset field_offset, bool is_volatile, uint32_t field_idx, uint16_t declaring_class_def_index, const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache, uint32_t dex_pc) : HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc), - field_info_(field_offset, + field_info_(field, + field_offset, field_type, is_volatile, field_idx, declaring_class_def_index, - dex_file, - dex_cache) { + dex_file) { SetRawInputAt(0, cls); } @@ -5997,22 +6011,22 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> { public: HStaticFieldSet(HInstruction* cls, HInstruction* value, + ArtField* field, Primitive::Type field_type, MemberOffset field_offset, bool is_volatile, uint32_t field_idx, uint16_t declaring_class_def_index, const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache, uint32_t dex_pc) : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc), - field_info_(field_offset, + field_info_(field, + field_offset, field_type, is_volatile, field_idx, declaring_class_def_index, - dex_file, - dex_cache) { + dex_file) { SetPackedFlag<kFlagValueCanBeNull>(true); SetRawInputAt(0, cls); SetRawInputAt(1, value); diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index f9ac3a0f72..db7c1fbb06 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -134,39 +134,6 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) { } } -void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) { - HLoadClass* load_class = instruction->InputAt(0)->AsLoadClass(); - const bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse(); - // Change the entrypoint to kQuickAllocObject if either: - // - the class is finalizable (only kQuickAllocObject handles finalizable classes), - // - the class needs access checks (we do not know if it's finalizable), - // - or the load class has only one use. - if (instruction->IsFinalizable() || has_only_one_use || load_class->NeedsAccessCheck()) { - instruction->SetEntrypoint(kQuickAllocObject); - instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex().index_), 0); - if (has_only_one_use) { - // We've just removed the only use of the HLoadClass. Since we don't run DCE after this pass, - // do it manually if possible. - if (!load_class->CanThrow()) { - // If the load class can not throw, it has no side effects and can be removed if there is - // only one use. - load_class->GetBlock()->RemoveInstruction(load_class); - } else if (!instruction->GetEnvironment()->IsFromInlinedInvoke() && - CanMoveClinitCheck(load_class, instruction)) { - // The allocation entry point that deals with access checks does not work with inlined - // methods, so we need to check whether this allocation comes from an inlined method. - // We also need to make the same check as for moving clinit check, whether the HLoadClass - // has the clinit check responsibility or not (HLoadClass can throw anyway). - // If it needed access checks, we delegate the access check to the allocation. - if (load_class->NeedsAccessCheck()) { - instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck); - } - load_class->GetBlock()->RemoveInstruction(load_class); - } - } - } -} - bool PrepareForRegisterAllocation::CanEmitConditionAt(HCondition* condition, HInstruction* user) const { if (condition->GetNext() != user) { diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h index a6791482a7..c128227654 100644 --- a/compiler/optimizing/prepare_for_register_allocation.h +++ b/compiler/optimizing/prepare_for_register_allocation.h @@ -44,7 +44,6 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor { void VisitClinitCheck(HClinitCheck* check) OVERRIDE; void VisitCondition(HCondition* condition) OVERRIDE; void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE; - void VisitNewInstance(HNewInstance* instruction) OVERRIDE; bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const; bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const; diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 33b3875e3b..f8a4469712 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -76,6 +76,7 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor { worklist_(worklist), is_first_run_(is_first_run) {} + void VisitDeoptimize(HDeoptimize* deopt) OVERRIDE; void VisitNewInstance(HNewInstance* new_instance) OVERRIDE; void VisitLoadClass(HLoadClass* load_class) OVERRIDE; void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE; @@ -151,38 +152,6 @@ void ReferenceTypePropagation::Visit(HInstruction* instruction) { instruction->Accept(&visitor); } -void ReferenceTypePropagation::Run() { - worklist_.reserve(kDefaultWorklistSize); - - // To properly propagate type info we need to visit in the dominator-based order. - // Reverse post order guarantees a node's dominators are visited first. - // We take advantage of this order in `VisitBasicBlock`. - for (HBasicBlock* block : graph_->GetReversePostOrder()) { - VisitBasicBlock(block); - } - - ProcessWorklist(); - ValidateTypes(); -} - -void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) { - RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_); - // Handle Phis first as there might be instructions in the same block who depend on them. - for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - VisitPhi(it.Current()->AsPhi()); - } - - // Handle instructions. - for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - instr->Accept(&visitor); - } - - // Add extra nodes to bound types. - BoundTypeForIfNotNull(block); - BoundTypeForIfInstanceOf(block); -} - // Check if we should create a bound type for the given object at the specified // position. Because of inlining and the fact we run RTP more than once and we // might have a HBoundType already. If we do, we should not create a new one. @@ -225,6 +194,153 @@ static bool ShouldCreateBoundType(HInstruction* position, return false; } +// Helper method to bound the type of `receiver` for all instructions dominated +// by `start_block`, or `start_instruction` if `start_block` is null. The new +// bound type will have its upper bound be `class_rti`. +static void BoundTypeIn(HInstruction* receiver, + HBasicBlock* start_block, + HInstruction* start_instruction, + const ReferenceTypeInfo& class_rti) { + // We only need to bound the type if we have uses in the relevant block. + // So start with null and create the HBoundType lazily, only if it's needed. + HBoundType* bound_type = nullptr; + DCHECK(!receiver->IsLoadClass()) << "We should not replace HLoadClass instructions"; + const HUseList<HInstruction*>& uses = receiver->GetUses(); + for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) { + HInstruction* user = it->GetUser(); + size_t index = it->GetIndex(); + // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput(). + ++it; + bool dominates = (start_instruction != nullptr) + ? start_instruction->StrictlyDominates(user) + : start_block->Dominates(user->GetBlock()); + if (!dominates) { + continue; + } + if (bound_type == nullptr) { + ScopedObjectAccess soa(Thread::Current()); + HInstruction* insert_point = (start_instruction != nullptr) + ? start_instruction->GetNext() + : start_block->GetFirstInstruction(); + if (ShouldCreateBoundType( + insert_point, receiver, class_rti, start_instruction, start_block)) { + bound_type = new (receiver->GetBlock()->GetGraph()->GetArena()) HBoundType(receiver); + bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false); + start_block->InsertInstructionBefore(bound_type, insert_point); + // To comply with the RTP algorithm, don't type the bound type just yet, it will + // be handled in RTPVisitor::VisitBoundType. + } else { + // We already have a bound type on the position we would need to insert + // the new one. The existing bound type should dominate all the users + // (dchecked) so there's no need to continue. + break; + } + } + user->ReplaceInput(bound_type, index); + } + // If the receiver is a null check, also bound the type of the actual + // receiver. + if (receiver->IsNullCheck()) { + BoundTypeIn(receiver->InputAt(0), start_block, start_instruction, class_rti); + } +} + +// Recognize the patterns: +// if (obj.shadow$_klass_ == Foo.class) ... +// deoptimize if (obj.shadow$_klass_ == Foo.class) +static void BoundTypeForClassCheck(HInstruction* check) { + if (!check->IsIf() && !check->IsDeoptimize()) { + return; + } + HInstruction* compare = check->InputAt(0); + if (!compare->IsEqual() && !compare->IsNotEqual()) { + return; + } + HInstruction* input_one = compare->InputAt(0); + HInstruction* input_two = compare->InputAt(1); + HLoadClass* load_class = input_one->IsLoadClass() + ? input_one->AsLoadClass() + : input_two->AsLoadClass(); + if (load_class == nullptr) { + return; + } + + ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); + if (!class_rti.IsValid()) { + // We have loaded an unresolved class. Don't bother bounding the type. + return; + } + + HInstanceFieldGet* field_get = (load_class == input_one) + ? input_two->AsInstanceFieldGet() + : input_one->AsInstanceFieldGet(); + if (field_get == nullptr) { + return; + } + HInstruction* receiver = field_get->InputAt(0); + ReferenceTypeInfo receiver_type = receiver->GetReferenceTypeInfo(); + if (receiver_type.IsExact()) { + // If we already know the receiver type, don't bother updating its users. + return; + } + + { + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0); + DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_"); + if (field_get->GetFieldInfo().GetField() != field) { + return; + } + } + + if (check->IsIf()) { + HBasicBlock* trueBlock = check->IsEqual() + ? check->AsIf()->IfTrueSuccessor() + : check->AsIf()->IfFalseSuccessor(); + BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti); + } else { + DCHECK(check->IsDeoptimize()); + if (check->IsEqual()) { + BoundTypeIn(receiver, check->GetBlock(), check, class_rti); + } + } +} + +void ReferenceTypePropagation::Run() { + worklist_.reserve(kDefaultWorklistSize); + + // To properly propagate type info we need to visit in the dominator-based order. + // Reverse post order guarantees a node's dominators are visited first. + // We take advantage of this order in `VisitBasicBlock`. + for (HBasicBlock* block : graph_->GetReversePostOrder()) { + VisitBasicBlock(block); + } + + ProcessWorklist(); + ValidateTypes(); +} + +void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) { + RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_); + // Handle Phis first as there might be instructions in the same block who depend on them. + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { + VisitPhi(it.Current()->AsPhi()); + } + + // Handle instructions. Since RTP may add HBoundType instructions just after the + // last visited instruction, use `HInstructionIteratorHandleChanges` iterator. + for (HInstructionIteratorHandleChanges it(block->GetInstructions()); !it.Done(); it.Advance()) { + HInstruction* instr = it.Current(); + instr->Accept(&visitor); + } + + // Add extra nodes to bound types. + BoundTypeForIfNotNull(block); + BoundTypeForIfInstanceOf(block); + BoundTypeForClassCheck(block->GetLastInstruction()); +} + void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) { HIf* ifInstruction = block->GetLastInstruction()->AsIf(); if (ifInstruction == nullptr) { @@ -254,40 +370,14 @@ void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) { // We only need to bound the type if we have uses in the relevant block. // So start with null and create the HBoundType lazily, only if it's needed. - HBoundType* bound_type = nullptr; HBasicBlock* notNullBlock = ifInput->IsNotEqual() ? ifInstruction->IfTrueSuccessor() : ifInstruction->IfFalseSuccessor(); - const HUseList<HInstruction*>& uses = obj->GetUses(); - for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) { - HInstruction* user = it->GetUser(); - size_t index = it->GetIndex(); - // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput(). - ++it; - if (notNullBlock->Dominates(user->GetBlock())) { - if (bound_type == nullptr) { - ScopedObjectAccess soa(Thread::Current()); - HInstruction* insert_point = notNullBlock->GetFirstInstruction(); - ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create( - handle_cache_.GetObjectClassHandle(), /* is_exact */ false); - if (ShouldCreateBoundType(insert_point, obj, object_rti, nullptr, notNullBlock)) { - bound_type = new (graph_->GetArena()) HBoundType(obj); - bound_type->SetUpperBound(object_rti, /* bound_can_be_null */ false); - if (obj->GetReferenceTypeInfo().IsValid()) { - bound_type->SetReferenceTypeInfo(obj->GetReferenceTypeInfo()); - } - notNullBlock->InsertInstructionBefore(bound_type, insert_point); - } else { - // We already have a bound type on the position we would need to insert - // the new one. The existing bound type should dominate all the users - // (dchecked) so there's no need to continue. - break; - } - } - user->ReplaceInput(bound_type, index); - } - } + ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create( + handle_cache_.GetObjectClassHandle(), /* is_exact */ false); + + BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti); } // Returns true if one of the patterns below has been recognized. If so, the @@ -378,15 +468,10 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) { HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass(); ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); - { - if (!class_rti.IsValid()) { - // He have loaded an unresolved class. Don't bother bounding the type. - return; - } + if (!class_rti.IsValid()) { + // He have loaded an unresolved class. Don't bother bounding the type. + return; } - // We only need to bound the type if we have uses in the relevant block. - // So start with null and create the HBoundType lazily, only if it's needed. - HBoundType* bound_type = nullptr; HInstruction* obj = instanceOf->InputAt(0); if (obj->GetReferenceTypeInfo().IsExact() && !obj->IsPhi()) { @@ -398,33 +483,14 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) { // input. return; } - DCHECK(!obj->IsLoadClass()) << "We should not replace HLoadClass instructions"; - const HUseList<HInstruction*>& uses = obj->GetUses(); - for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) { - HInstruction* user = it->GetUser(); - size_t index = it->GetIndex(); - // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput(). - ++it; - if (instanceOfTrueBlock->Dominates(user->GetBlock())) { - if (bound_type == nullptr) { - ScopedObjectAccess soa(Thread::Current()); - HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction(); - if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) { - bound_type = new (graph_->GetArena()) HBoundType(obj); - bool is_exact = class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes(); - bound_type->SetUpperBound(ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), is_exact), - /* InstanceOf fails for null. */ false); - instanceOfTrueBlock->InsertInstructionBefore(bound_type, insert_point); - } else { - // We already have a bound type on the position we would need to insert - // the new one. The existing bound type should dominate all the users - // (dchecked) so there's no need to continue. - break; - } - } - user->ReplaceInput(bound_type, index); + + { + ScopedObjectAccess soa(Thread::Current()); + if (!class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) { + class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false); } } + BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction */ nullptr, class_rti); } void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* instr, @@ -464,6 +530,10 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst } } +void ReferenceTypePropagation::RTPVisitor::VisitDeoptimize(HDeoptimize* instr) { + BoundTypeForClassCheck(instr); +} + void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr, dex::TypeIndex type_idx, const DexFile& dex_file, @@ -515,16 +585,9 @@ void ReferenceTypePropagation::RTPVisitor::UpdateFieldAccessTypeInfo(HInstructio ScopedObjectAccess soa(Thread::Current()); ObjPtr<mirror::Class> klass; - // The field index is unknown only during tests. - if (info.GetFieldIndex() != kUnknownFieldIndex) { - ClassLinker* cl = Runtime::Current()->GetClassLinker(); - ArtField* field = cl->GetResolvedField(info.GetFieldIndex(), - MakeObjPtr(info.GetDexCache().Get())); - // TODO: There are certain cases where we can't resolve the field. - // b/21914925 is open to keep track of a repro case for this issue. - if (field != nullptr) { - klass = field->GetType<false>(); - } + // The field is unknown only during tests. + if (info.GetField() != nullptr) { + klass = info.GetField()->GetType<false>(); } SetClassAsTypeInfo(instr, klass, /* is_exact */ false); diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc index 559f40923b..2227872f76 100644 --- a/compiler/optimizing/register_allocator_test.cc +++ b/compiler/optimizing/register_allocator_test.cc @@ -492,7 +492,6 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, HInstruction** input2) { HGraph* graph = CreateGraph(allocator); HBasicBlock* entry = new (allocator) HBasicBlock(graph); - ScopedNullHandle<mirror::DexCache> dex_cache; graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* parameter = new (allocator) HParameterValue( @@ -504,13 +503,13 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, entry->AddSuccessor(block); HInstruction* test = new (allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(22), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0); block->AddInstruction(test); block->AddInstruction(new (allocator) HIf(test)); @@ -531,22 +530,22 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, *phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt); join->AddPhi(*phi); *input1 = new (allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimInt, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0); *input2 = new (allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimInt, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0); then->AddInstruction(*input1); else_->AddInstruction(*input2); @@ -654,7 +653,6 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator, HInstruction** field, HInstruction** ret) { HGraph* graph = CreateGraph(allocator); - ScopedNullHandle<mirror::DexCache> dex_cache; HBasicBlock* entry = new (allocator) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); @@ -667,13 +665,13 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator, entry->AddSuccessor(block); *field = new (allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimInt, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0); block->AddInstruction(*field); *ret = new (allocator) HReturn(*field); diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index 9fdeccfa1a..ca26c30dcf 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -270,7 +270,6 @@ void HSharpening::SharpenClass(HLoadClass* load_class, void HSharpening::ProcessLoadString(HLoadString* load_string) { DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kDexCacheViaMethod); - DCHECK(!load_string->IsInDexCache()); const DexFile& dex_file = load_string->GetDexFile(); dex::StringIndex string_index = load_string->GetStringIndex(); |