diff options
author | 2023-12-17 19:34:50 +0000 | |
---|---|---|
committer | 2023-12-19 13:33:14 +0000 | |
commit | b7223542c7ee3f667822fb0eff3020b8123601af (patch) | |
tree | 11049423af057e5054be8d90a455110f7e43fb64 /compiler | |
parent | e828df143f5eefedfede026d8913b20725f528a0 (diff) |
Reland "Move the construction of ProfilingInfo in the compiler."
This reverts commit 9fedb9f473fd77f31285203f5baa9533b8e21ce6.
Reason for reland:
- Use CodeGenerator::IsImplementedIntrinsic in IsInlineCacheUseful, to
match inliner behavior.
- Address some missing type propagation opportunities in aosp/2880687
- Be robust when there is a missing inline cache.
Test: test.py
Change-Id: Ib6e4a624174d6891a0fd425af88a9c16e09afa99
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/Android.bp | 1 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 35 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 40 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_riscv64.cc | 50 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 46 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 34 | ||||
-rw-r--r-- | compiler/optimizing/inliner.cc | 12 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 18 | ||||
-rw-r--r-- | compiler/optimizing/profiling_info_builder.cc | 90 | ||||
-rw-r--r-- | compiler/optimizing/profiling_info_builder.h | 64 |
10 files changed, 290 insertions, 100 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp index 4427cd0f22..e0c1744ce3 100644 --- a/compiler/Android.bp +++ b/compiler/Android.bp @@ -177,6 +177,7 @@ art_cc_defaults { "optimizing/optimizing_compiler.cc", "optimizing/parallel_move_resolver.cc", "optimizing/prepare_for_register_allocation.cc", + "optimizing/profiling_info_builder.cc", "optimizing/reference_type_propagation.cc", "optimizing/register_allocation_resolver.cc", "optimizing/register_allocator.cc", diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 4f5db1f1c2..9027976165 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -45,6 +45,7 @@ #include "offsets.h" #include "optimizing/common_arm64.h" #include "optimizing/nodes.h" +#include "profiling_info_builder.h" #include "thread.h" #include "trace.h" #include "utils/arm64/assembler_arm64.h" @@ -4593,24 +4594,26 @@ void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) { void CodeGeneratorARM64::MaybeGenerateInlineCacheCheck(HInstruction* instruction, Register klass) { DCHECK_EQ(klass.GetCode(), 0u); - // We know the destination of an intrinsic, so no need to record inline - // caches. - if (!instruction->GetLocations()->Intrinsified() && - GetGraph()->IsCompilingBaseline() && - !Runtime::Current()->IsAotCompiler()) { - DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke()); + if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) { ProfilingInfo* info = GetGraph()->GetProfilingInfo(); DCHECK(info != nullptr); - InlineCache* cache = info->GetInlineCache(instruction->GetDexPc()); - uint64_t address = reinterpret_cast64<uint64_t>(cache); - vixl::aarch64::Label done; - __ Mov(x8, address); - __ Ldr(w9, MemOperand(x8, InlineCache::ClassesOffset().Int32Value())); - // Fast path for a monomorphic cache. - __ Cmp(klass.W(), w9); - __ B(eq, &done); - InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); - __ Bind(&done); + InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke()); + if (cache != nullptr) { + uint64_t address = reinterpret_cast64<uint64_t>(cache); + vixl::aarch64::Label done; + __ Mov(x8, address); + __ Ldr(w9, MemOperand(x8, InlineCache::ClassesOffset().Int32Value())); + // Fast path for a monomorphic cache. + __ Cmp(klass.W(), w9); + __ B(eq, &done); + InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); + __ Bind(&done); + } else { + // This is unexpected, but we don't guarantee stable compilation across + // JIT runs so just warn about it. + ScopedObjectAccess soa(Thread::Current()); + LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod(); + } } } diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index dacf034327..00c14b0b46 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -39,6 +39,7 @@ #include "mirror/array-inl.h" #include "mirror/class-inl.h" #include "mirror/var_handle.h" +#include "profiling_info_builder.h" #include "scoped_thread_state_change-inl.h" #include "thread.h" #include "trace.h" @@ -3684,26 +3685,27 @@ void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) { void CodeGeneratorARMVIXL::MaybeGenerateInlineCacheCheck(HInstruction* instruction, vixl32::Register klass) { DCHECK_EQ(r0.GetCode(), klass.GetCode()); - // We know the destination of an intrinsic, so no need to record inline - // caches. - if (!instruction->GetLocations()->Intrinsified() && - GetGraph()->IsCompilingBaseline() && - !Runtime::Current()->IsAotCompiler()) { - DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke()); + if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) { ProfilingInfo* info = GetGraph()->GetProfilingInfo(); - DCHECK(info != nullptr); - InlineCache* cache = info->GetInlineCache(instruction->GetDexPc()); - uint32_t address = reinterpret_cast32<uint32_t>(cache); - vixl32::Label done; - UseScratchRegisterScope temps(GetVIXLAssembler()); - temps.Exclude(ip); - __ Mov(r4, address); - __ Ldr(ip, MemOperand(r4, InlineCache::ClassesOffset().Int32Value())); - // Fast path for a monomorphic cache. - __ Cmp(klass, ip); - __ B(eq, &done, /* is_far_target= */ false); - InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); - __ Bind(&done); + InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke()); + if (cache != nullptr) { + uint32_t address = reinterpret_cast32<uint32_t>(cache); + vixl32::Label done; + UseScratchRegisterScope temps(GetVIXLAssembler()); + temps.Exclude(ip); + __ Mov(r4, address); + __ Ldr(ip, MemOperand(r4, InlineCache::ClassesOffset().Int32Value())); + // Fast path for a monomorphic cache. + __ Cmp(klass, ip); + __ B(eq, &done, /* is_far_target= */ false); + InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); + __ Bind(&done); + } else { + // This is unexpected, but we don't guarantee stable compilation across + // JIT runs so just warn about it. + ScopedObjectAccess soa(Thread::Current()); + LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod(); + } } } diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc index 1e1dd4ab3b..182c1d4d05 100644 --- a/compiler/optimizing/code_generator_riscv64.cc +++ b/compiler/optimizing/code_generator_riscv64.cc @@ -34,6 +34,7 @@ #include "linker/linker_patch.h" #include "mirror/class-inl.h" #include "optimizing/nodes.h" +#include "optimizing/profiling_info_builder.h" #include "runtime.h" #include "scoped_thread_state_change-inl.h" #include "stack_map_stream.h" @@ -6720,32 +6721,35 @@ void CodeGeneratorRISCV64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* inv void CodeGeneratorRISCV64::MaybeGenerateInlineCacheCheck(HInstruction* instruction, XRegister klass) { - // We know the destination of an intrinsic, so no need to record inline caches. - if (!instruction->GetLocations()->Intrinsified() && - GetGraph()->IsCompilingBaseline() && - !Runtime::Current()->IsAotCompiler()) { - DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke()); + if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) { ProfilingInfo* info = GetGraph()->GetProfilingInfo(); DCHECK(info != nullptr); - InlineCache* cache = info->GetInlineCache(instruction->GetDexPc()); - uint64_t address = reinterpret_cast64<uint64_t>(cache); - Riscv64Label done; - // The `art_quick_update_inline_cache` expects the inline cache in T5. - XRegister ic_reg = T5; - ScratchRegisterScope srs(GetAssembler()); - DCHECK_EQ(srs.AvailableXRegisters(), 2u); - srs.ExcludeXRegister(ic_reg); - DCHECK_EQ(srs.AvailableXRegisters(), 1u); - __ LoadConst64(ic_reg, address); - { - ScratchRegisterScope srs2(GetAssembler()); - XRegister tmp = srs2.AllocateXRegister(); - __ Loadd(tmp, ic_reg, InlineCache::ClassesOffset().Int32Value()); - // Fast path for a monomorphic cache. - __ Beq(klass, tmp, &done); + InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke()); + if (cache != nullptr) { + uint64_t address = reinterpret_cast64<uint64_t>(cache); + Riscv64Label done; + // The `art_quick_update_inline_cache` expects the inline cache in T5. + XRegister ic_reg = T5; + ScratchRegisterScope srs(GetAssembler()); + DCHECK_EQ(srs.AvailableXRegisters(), 2u); + srs.ExcludeXRegister(ic_reg); + DCHECK_EQ(srs.AvailableXRegisters(), 1u); + __ LoadConst64(ic_reg, address); + { + ScratchRegisterScope srs2(GetAssembler()); + XRegister tmp = srs2.AllocateXRegister(); + __ Loadd(tmp, ic_reg, InlineCache::ClassesOffset().Int32Value()); + // Fast path for a monomorphic cache. + __ Beq(klass, tmp, &done); + } + InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); + __ Bind(&done); + } else { + // This is unexpected, but we don't guarantee stable compilation across + // JIT runs so just warn about it. + ScopedObjectAccess soa(Thread::Current()); + LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod(); } - InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); - __ Bind(&done); } } diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index a923e578c0..71db5c99af 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -37,6 +37,7 @@ #include "mirror/class-inl.h" #include "mirror/var_handle.h" #include "optimizing/nodes.h" +#include "profiling_info_builder.h" #include "scoped_thread_state_change-inl.h" #include "thread.h" #include "trace.h" @@ -2798,7 +2799,7 @@ void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { HandleInvoke(invoke); - if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) { + if (ProfilingInfoBuilder::IsInlineCacheUseful(invoke, codegen_)) { // Add one temporary for inline cache update. invoke->GetLocations()->AddTemp(Location::RegisterLocation(EBP)); } @@ -2826,7 +2827,7 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) { // Add the hidden argument. invoke->GetLocations()->AddTemp(Location::FpuRegisterLocation(XMM7)); - if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) { + if (ProfilingInfoBuilder::IsInlineCacheUseful(invoke, codegen_)) { // Add one temporary for inline cache update. invoke->GetLocations()->AddTemp(Location::RegisterLocation(EBP)); } @@ -2844,29 +2845,30 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) { void CodeGeneratorX86::MaybeGenerateInlineCacheCheck(HInstruction* instruction, Register klass) { DCHECK_EQ(EAX, klass); - // We know the destination of an intrinsic, so no need to record inline - // caches (also the intrinsic location builder doesn't request an additional - // temporary). - if (!instruction->GetLocations()->Intrinsified() && - GetGraph()->IsCompilingBaseline() && - !Runtime::Current()->IsAotCompiler()) { - DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke()); + if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) { ProfilingInfo* info = GetGraph()->GetProfilingInfo(); DCHECK(info != nullptr); - InlineCache* cache = info->GetInlineCache(instruction->GetDexPc()); - uint32_t address = reinterpret_cast32<uint32_t>(cache); - if (kIsDebugBuild) { - uint32_t temp_index = instruction->GetLocations()->GetTempCount() - 1u; - CHECK_EQ(EBP, instruction->GetLocations()->GetTemp(temp_index).AsRegister<Register>()); + InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke()); + if (cache != nullptr) { + uint32_t address = reinterpret_cast32<uint32_t>(cache); + if (kIsDebugBuild) { + uint32_t temp_index = instruction->GetLocations()->GetTempCount() - 1u; + CHECK_EQ(EBP, instruction->GetLocations()->GetTemp(temp_index).AsRegister<Register>()); + } + Register temp = EBP; + NearLabel done; + __ movl(temp, Immediate(address)); + // Fast path for a monomorphic cache. + __ cmpl(klass, Address(temp, InlineCache::ClassesOffset().Int32Value())); + __ j(kEqual, &done); + GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(kQuickUpdateInlineCache).Int32Value()); + __ Bind(&done); + } else { + // This is unexpected, but we don't guarantee stable compilation across + // JIT runs so just warn about it. + ScopedObjectAccess soa(Thread::Current()); + LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod(); } - Register temp = EBP; - NearLabel done; - __ movl(temp, Immediate(address)); - // Fast path for a monomorphic cache. - __ cmpl(klass, Address(temp, InlineCache::ClassesOffset().Int32Value())); - __ j(kEqual, &done); - GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(kQuickUpdateInlineCache).Int32Value()); - __ Bind(&done); } } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 24cb0c30b7..81ffa9876c 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -38,6 +38,7 @@ #include "mirror/object_reference.h" #include "mirror/var_handle.h" #include "optimizing/nodes.h" +#include "profiling_info_builder.h" #include "scoped_thread_state_change-inl.h" #include "thread.h" #include "trace.h" @@ -3094,23 +3095,26 @@ void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) { void CodeGeneratorX86_64::MaybeGenerateInlineCacheCheck(HInstruction* instruction, CpuRegister klass) { DCHECK_EQ(RDI, klass.AsRegister()); - // We know the destination of an intrinsic, so no need to record inline - // caches. - if (!instruction->GetLocations()->Intrinsified() && - GetGraph()->IsCompilingBaseline() && - !Runtime::Current()->IsAotCompiler()) { + if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) { ProfilingInfo* info = GetGraph()->GetProfilingInfo(); DCHECK(info != nullptr); - InlineCache* cache = info->GetInlineCache(instruction->GetDexPc()); - uint64_t address = reinterpret_cast64<uint64_t>(cache); - NearLabel done; - __ movq(CpuRegister(TMP), Immediate(address)); - // Fast path for a monomorphic cache. - __ cmpl(Address(CpuRegister(TMP), InlineCache::ClassesOffset().Int32Value()), klass); - __ j(kEqual, &done); - GenerateInvokeRuntime( - GetThreadOffset<kX86_64PointerSize>(kQuickUpdateInlineCache).Int32Value()); - __ Bind(&done); + InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke()); + if (cache != nullptr) { + uint64_t address = reinterpret_cast64<uint64_t>(cache); + NearLabel done; + __ movq(CpuRegister(TMP), Immediate(address)); + // Fast path for a monomorphic cache. + __ cmpl(Address(CpuRegister(TMP), InlineCache::ClassesOffset().Int32Value()), klass); + __ j(kEqual, &done); + GenerateInvokeRuntime( + GetThreadOffset<kX86_64PointerSize>(kQuickUpdateInlineCache).Int32Value()); + __ Bind(&done); + } else { + // This is unexpected, but we don't guarantee stable compilation across + // JIT runs so just warn about it. + ScopedObjectAccess soa(Thread::Current()); + LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod(); + } } } diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 66a536d33b..37fa318403 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -674,9 +674,15 @@ HInliner::InlineCacheType HInliner::GetInlineCacheJIT( return kInlineCacheNoData; } - Runtime::Current()->GetJit()->GetCodeCache()->CopyInlineCacheInto( - *profiling_info->GetInlineCache(invoke_instruction->GetDexPc()), - classes); + InlineCache* cache = profiling_info->GetInlineCache(invoke_instruction->GetDexPc()); + if (cache == nullptr) { + // This shouldn't happen, but we don't guarantee that method resolution + // between baseline compilation and optimizing compilation is identical. Be robust, + // warn about it, and return that we don't have any inline cache data. + LOG(WARNING) << "No inline cache found for " << caller->PrettyMethod(); + return kInlineCacheNoData; + } + Runtime::Current()->GetJit()->GetCodeCache()->CopyInlineCacheInto(*cache, classes); return GetInlineCacheType(*classes); } diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 0069a20a26..d458462226 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -53,6 +53,7 @@ #include "oat_quick_method_header.h" #include "optimizing/write_barrier_elimination.h" #include "prepare_for_register_allocation.h" +#include "profiling_info_builder.h" #include "reference_type_propagation.h" #include "register_allocator_linear_scan.h" #include "select_generator.h" @@ -835,8 +836,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, jit::Jit* jit = Runtime::Current()->GetJit(); if (jit != nullptr) { ProfilingInfo* info = jit->GetCodeCache()->GetProfilingInfo(method, Thread::Current()); - DCHECK_IMPLIES(compilation_kind == CompilationKind::kBaseline, info != nullptr) - << "Compiling a method baseline should always have a ProfilingInfo"; graph->SetProfilingInfo(info); } @@ -920,6 +919,21 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, &pass_observer, regalloc_strategy, compilation_stats_.get()); + // If we are compiling baseline and we haven't created a profiling info for + // this method already, do it now. + if (jit != nullptr && + compilation_kind == CompilationKind::kBaseline && + graph->GetProfilingInfo() == nullptr) { + ProfilingInfoBuilder( + graph, codegen->GetCompilerOptions(), codegen.get(), compilation_stats_.get()).Run(); + // We expect a profiling info to be created and attached to the graph. + // However, we may have run out of memory trying to create it, so in this + // case just abort the compilation. + if (graph->GetProfilingInfo() == nullptr) { + MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); + return nullptr; + } + } codegen->Compile(); pass_observer.DumpDisassembly(); diff --git a/compiler/optimizing/profiling_info_builder.cc b/compiler/optimizing/profiling_info_builder.cc new file mode 100644 index 0000000000..7888753830 --- /dev/null +++ b/compiler/optimizing/profiling_info_builder.cc @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "profiling_info_builder.h" + +#include "art_method-inl.h" +#include "code_generator.h" +#include "driver/compiler_options.h" +#include "dex/code_item_accessors-inl.h" +#include "jit/profiling_info.h" +#include "optimizing_compiler_stats.h" +#include "scoped_thread_state_change-inl.h" + +namespace art HIDDEN { + +void ProfilingInfoBuilder::Run() { + DCHECK_EQ(GetGraph()->GetProfilingInfo(), nullptr); + // Order does not matter. + for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) { + // No need to visit the phis. + for (HInstructionIteratorHandleChanges inst_it(block->GetInstructions()); !inst_it.Done(); + inst_it.Advance()) { + inst_it.Current()->Accept(this); + } + } + + ScopedObjectAccess soa(Thread::Current()); + GetGraph()->SetProfilingInfo( + ProfilingInfo::Create(soa.Self(), GetGraph()->GetArtMethod(), inline_caches_)); +} + +void ProfilingInfoBuilder::HandleInvoke(HInvoke* invoke) { + DCHECK(!invoke->GetEnvironment()->IsFromInlinedInvoke()); + if (IsInlineCacheUseful(invoke, codegen_)) { + inline_caches_.push_back(invoke->GetDexPc()); + } +} + +void ProfilingInfoBuilder::VisitInvokeInterface(HInvokeInterface* invoke) { + HandleInvoke(invoke); +} + +void ProfilingInfoBuilder::VisitInvokeVirtual(HInvokeVirtual* invoke) { + HandleInvoke(invoke); +} + +bool ProfilingInfoBuilder::IsInlineCacheUseful(HInvoke* invoke, CodeGenerator* codegen) { + DCHECK(invoke->IsInvokeVirtual() || invoke->IsInvokeInterface()); + if (codegen->IsImplementedIntrinsic(invoke)) { + return false; + } + if (!invoke->GetBlock()->GetGraph()->IsCompilingBaseline()) { + return false; + } + if (Runtime::Current()->IsAotCompiler()) { + return false; + } + if (invoke->InputAt(0)->GetReferenceTypeInfo().IsExact()) { + return false; + } + if (invoke->GetResolvedMethod() != nullptr) { + ScopedObjectAccess soa(Thread::Current()); + if (invoke->GetResolvedMethod()->IsFinal() || + invoke->GetResolvedMethod()->GetDeclaringClass()->IsFinal()) { + return false; + } + } + return true; +} + +InlineCache* ProfilingInfoBuilder::GetInlineCache(ProfilingInfo* info, HInvoke* instruction) { + DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke()); + ScopedObjectAccess soa(Thread::Current()); + return info->GetInlineCache(instruction->GetDexPc()); +} + +} // namespace art diff --git a/compiler/optimizing/profiling_info_builder.h b/compiler/optimizing/profiling_info_builder.h new file mode 100644 index 0000000000..2185b0eed3 --- /dev/null +++ b/compiler/optimizing/profiling_info_builder.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_PROFILING_INFO_BUILDER_H_ +#define ART_COMPILER_OPTIMIZING_PROFILING_INFO_BUILDER_H_ + +#include "base/macros.h" +#include "nodes.h" + +namespace art HIDDEN { + +class CodeGenerator; +class CompilerOptions; +class InlineCache; +class ProfilingInfo; + +class ProfilingInfoBuilder : public HGraphDelegateVisitor { + public: + ProfilingInfoBuilder(HGraph* graph, + const CompilerOptions& compiler_options, + CodeGenerator* codegen, + OptimizingCompilerStats* stats = nullptr) + : HGraphDelegateVisitor(graph, stats), + codegen_(codegen), + compiler_options_(compiler_options) {} + + void Run(); + + static constexpr const char* kProfilingInfoBuilderPassName = + "profiling_info_builder"; + + static InlineCache* GetInlineCache(ProfilingInfo* info, HInvoke* invoke); + static bool IsInlineCacheUseful(HInvoke* invoke, CodeGenerator* codegen); + + private: + void VisitInvokeVirtual(HInvokeVirtual* invoke) override; + void VisitInvokeInterface(HInvokeInterface* invoke) override; + + void HandleInvoke(HInvoke* invoke); + + CodeGenerator* codegen_; + [[maybe_unused]] const CompilerOptions& compiler_options_; + std::vector<uint32_t> inline_caches_; + + DISALLOW_COPY_AND_ASSIGN(ProfilingInfoBuilder); +}; + +} // namespace art + + +#endif // ART_COMPILER_OPTIMIZING_PROFILING_INFO_BUILDER_H_ |