diff options
author | 2023-12-13 17:27:59 +0000 | |
---|---|---|
committer | 2023-12-14 16:45:27 +0000 | |
commit | d4faa43efb17e75fc99930f798103b29553a6a5e (patch) | |
tree | 6aa3c6728a7a7687fb5953e8def0954bb1156811 /compiler/optimizing | |
parent | d0a15c3de2867b2f566831307da1cd51b5957a62 (diff) |
Move the construction of ProfilingInfo in the compiler.
This reduces the number of inline caches when the compiler can
statically determine the target is fixed.
The removal of some inline cache profiling also improves performance of
compose scrolling. Jank data for 20 seconds, average of 50
runs:
- For Go Mokey:
- Before: ~485 frames drawn / ~17.02% janky frames
- After: ~525 frames drawn / ~14.64% janky frames
- For Pixel 8 pro:
- Before: ~2433 frames drawn / 1.02% janky frames
- After: ~2443 frames drawn / 0.91% janky frames
Test: test.py
Change-Id: Ide4fab058d55b65b66dcf10e835f05877e71b7fc
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 35 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 40 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_riscv64.cc | 50 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 46 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 34 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 17 | ||||
-rw-r--r-- | compiler/optimizing/profiling_info_builder.cc | 89 | ||||
-rw-r--r-- | compiler/optimizing/profiling_info_builder.h | 60 |
8 files changed, 274 insertions, 97 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 4f5db1f1c2..4be0542236 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -45,6 +45,7 @@ #include "offsets.h" #include "optimizing/common_arm64.h" #include "optimizing/nodes.h" +#include "profiling_info_builder.h" #include "thread.h" #include "trace.h" #include "utils/arm64/assembler_arm64.h" @@ -4593,24 +4594,26 @@ void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) { void CodeGeneratorARM64::MaybeGenerateInlineCacheCheck(HInstruction* instruction, Register klass) { DCHECK_EQ(klass.GetCode(), 0u); - // We know the destination of an intrinsic, so no need to record inline - // caches. - if (!instruction->GetLocations()->Intrinsified() && - GetGraph()->IsCompilingBaseline() && - !Runtime::Current()->IsAotCompiler()) { - DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke()); + if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke())) { ProfilingInfo* info = GetGraph()->GetProfilingInfo(); DCHECK(info != nullptr); - InlineCache* cache = info->GetInlineCache(instruction->GetDexPc()); - uint64_t address = reinterpret_cast64<uint64_t>(cache); - vixl::aarch64::Label done; - __ Mov(x8, address); - __ Ldr(w9, MemOperand(x8, InlineCache::ClassesOffset().Int32Value())); - // Fast path for a monomorphic cache. - __ Cmp(klass.W(), w9); - __ B(eq, &done); - InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); - __ Bind(&done); + InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke()); + if (cache != nullptr) { + uint64_t address = reinterpret_cast64<uint64_t>(cache); + vixl::aarch64::Label done; + __ Mov(x8, address); + __ Ldr(w9, MemOperand(x8, InlineCache::ClassesOffset().Int32Value())); + // Fast path for a monomorphic cache. + __ Cmp(klass.W(), w9); + __ B(eq, &done); + InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); + __ Bind(&done); + } else { + // This is unexpected, but we don't guarantee stable compilation across + // JIT runs so just warn about it. + ScopedObjectAccess soa(Thread::Current()); + LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod(); + } } } diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index dacf034327..627c9bcf0b 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -39,6 +39,7 @@ #include "mirror/array-inl.h" #include "mirror/class-inl.h" #include "mirror/var_handle.h" +#include "profiling_info_builder.h" #include "scoped_thread_state_change-inl.h" #include "thread.h" #include "trace.h" @@ -3684,26 +3685,27 @@ void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) { void CodeGeneratorARMVIXL::MaybeGenerateInlineCacheCheck(HInstruction* instruction, vixl32::Register klass) { DCHECK_EQ(r0.GetCode(), klass.GetCode()); - // We know the destination of an intrinsic, so no need to record inline - // caches. - if (!instruction->GetLocations()->Intrinsified() && - GetGraph()->IsCompilingBaseline() && - !Runtime::Current()->IsAotCompiler()) { - DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke()); + if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke())) { ProfilingInfo* info = GetGraph()->GetProfilingInfo(); - DCHECK(info != nullptr); - InlineCache* cache = info->GetInlineCache(instruction->GetDexPc()); - uint32_t address = reinterpret_cast32<uint32_t>(cache); - vixl32::Label done; - UseScratchRegisterScope temps(GetVIXLAssembler()); - temps.Exclude(ip); - __ Mov(r4, address); - __ Ldr(ip, MemOperand(r4, InlineCache::ClassesOffset().Int32Value())); - // Fast path for a monomorphic cache. - __ Cmp(klass, ip); - __ B(eq, &done, /* is_far_target= */ false); - InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); - __ Bind(&done); + InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke()); + if (cache != nullptr) { + uint32_t address = reinterpret_cast32<uint32_t>(cache); + vixl32::Label done; + UseScratchRegisterScope temps(GetVIXLAssembler()); + temps.Exclude(ip); + __ Mov(r4, address); + __ Ldr(ip, MemOperand(r4, InlineCache::ClassesOffset().Int32Value())); + // Fast path for a monomorphic cache. + __ Cmp(klass, ip); + __ B(eq, &done, /* is_far_target= */ false); + InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); + __ Bind(&done); + } else { + // This is unexpected, but we don't guarantee stable compilation across + // JIT runs so just warn about it. + ScopedObjectAccess soa(Thread::Current()); + LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod(); + } } } diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc index 1e1dd4ab3b..f5b16d43d1 100644 --- a/compiler/optimizing/code_generator_riscv64.cc +++ b/compiler/optimizing/code_generator_riscv64.cc @@ -34,6 +34,7 @@ #include "linker/linker_patch.h" #include "mirror/class-inl.h" #include "optimizing/nodes.h" +#include "optimizing/profiling_info_builder.h" #include "runtime.h" #include "scoped_thread_state_change-inl.h" #include "stack_map_stream.h" @@ -6720,32 +6721,35 @@ void CodeGeneratorRISCV64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* inv void CodeGeneratorRISCV64::MaybeGenerateInlineCacheCheck(HInstruction* instruction, XRegister klass) { - // We know the destination of an intrinsic, so no need to record inline caches. - if (!instruction->GetLocations()->Intrinsified() && - GetGraph()->IsCompilingBaseline() && - !Runtime::Current()->IsAotCompiler()) { - DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke()); + if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke())) { ProfilingInfo* info = GetGraph()->GetProfilingInfo(); DCHECK(info != nullptr); - InlineCache* cache = info->GetInlineCache(instruction->GetDexPc()); - uint64_t address = reinterpret_cast64<uint64_t>(cache); - Riscv64Label done; - // The `art_quick_update_inline_cache` expects the inline cache in T5. - XRegister ic_reg = T5; - ScratchRegisterScope srs(GetAssembler()); - DCHECK_EQ(srs.AvailableXRegisters(), 2u); - srs.ExcludeXRegister(ic_reg); - DCHECK_EQ(srs.AvailableXRegisters(), 1u); - __ LoadConst64(ic_reg, address); - { - ScratchRegisterScope srs2(GetAssembler()); - XRegister tmp = srs2.AllocateXRegister(); - __ Loadd(tmp, ic_reg, InlineCache::ClassesOffset().Int32Value()); - // Fast path for a monomorphic cache. - __ Beq(klass, tmp, &done); + InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke()); + if (cache != nullptr) { + uint64_t address = reinterpret_cast64<uint64_t>(cache); + Riscv64Label done; + // The `art_quick_update_inline_cache` expects the inline cache in T5. + XRegister ic_reg = T5; + ScratchRegisterScope srs(GetAssembler()); + DCHECK_EQ(srs.AvailableXRegisters(), 2u); + srs.ExcludeXRegister(ic_reg); + DCHECK_EQ(srs.AvailableXRegisters(), 1u); + __ LoadConst64(ic_reg, address); + { + ScratchRegisterScope srs2(GetAssembler()); + XRegister tmp = srs2.AllocateXRegister(); + __ Loadd(tmp, ic_reg, InlineCache::ClassesOffset().Int32Value()); + // Fast path for a monomorphic cache. + __ Beq(klass, tmp, &done); + } + InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); + __ Bind(&done); + } else { + // This is unexpected, but we don't guarantee stable compilation across + // JIT runs so just warn about it. + ScopedObjectAccess soa(Thread::Current()); + LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod(); } - InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc()); - __ Bind(&done); } } diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index a923e578c0..f07860031d 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -37,6 +37,7 @@ #include "mirror/class-inl.h" #include "mirror/var_handle.h" #include "optimizing/nodes.h" +#include "profiling_info_builder.h" #include "scoped_thread_state_change-inl.h" #include "thread.h" #include "trace.h" @@ -2798,7 +2799,7 @@ void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { HandleInvoke(invoke); - if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) { + if (ProfilingInfoBuilder::IsInlineCacheUseful(invoke)) { // Add one temporary for inline cache update. invoke->GetLocations()->AddTemp(Location::RegisterLocation(EBP)); } @@ -2826,7 +2827,7 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) { // Add the hidden argument. invoke->GetLocations()->AddTemp(Location::FpuRegisterLocation(XMM7)); - if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) { + if (ProfilingInfoBuilder::IsInlineCacheUseful(invoke)) { // Add one temporary for inline cache update. invoke->GetLocations()->AddTemp(Location::RegisterLocation(EBP)); } @@ -2844,29 +2845,30 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) { void CodeGeneratorX86::MaybeGenerateInlineCacheCheck(HInstruction* instruction, Register klass) { DCHECK_EQ(EAX, klass); - // We know the destination of an intrinsic, so no need to record inline - // caches (also the intrinsic location builder doesn't request an additional - // temporary). - if (!instruction->GetLocations()->Intrinsified() && - GetGraph()->IsCompilingBaseline() && - !Runtime::Current()->IsAotCompiler()) { - DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke()); + if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke())) { ProfilingInfo* info = GetGraph()->GetProfilingInfo(); DCHECK(info != nullptr); - InlineCache* cache = info->GetInlineCache(instruction->GetDexPc()); - uint32_t address = reinterpret_cast32<uint32_t>(cache); - if (kIsDebugBuild) { - uint32_t temp_index = instruction->GetLocations()->GetTempCount() - 1u; - CHECK_EQ(EBP, instruction->GetLocations()->GetTemp(temp_index).AsRegister<Register>()); + InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke()); + if (cache != nullptr) { + uint32_t address = reinterpret_cast32<uint32_t>(cache); + if (kIsDebugBuild) { + uint32_t temp_index = instruction->GetLocations()->GetTempCount() - 1u; + CHECK_EQ(EBP, instruction->GetLocations()->GetTemp(temp_index).AsRegister<Register>()); + } + Register temp = EBP; + NearLabel done; + __ movl(temp, Immediate(address)); + // Fast path for a monomorphic cache. + __ cmpl(klass, Address(temp, InlineCache::ClassesOffset().Int32Value())); + __ j(kEqual, &done); + GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(kQuickUpdateInlineCache).Int32Value()); + __ Bind(&done); + } else { + // This is unexpected, but we don't guarantee stable compilation across + // JIT runs so just warn about it. + ScopedObjectAccess soa(Thread::Current()); + LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod(); } - Register temp = EBP; - NearLabel done; - __ movl(temp, Immediate(address)); - // Fast path for a monomorphic cache. - __ cmpl(klass, Address(temp, InlineCache::ClassesOffset().Int32Value())); - __ j(kEqual, &done); - GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(kQuickUpdateInlineCache).Int32Value()); - __ Bind(&done); } } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 24cb0c30b7..c777258201 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -38,6 +38,7 @@ #include "mirror/object_reference.h" #include "mirror/var_handle.h" #include "optimizing/nodes.h" +#include "profiling_info_builder.h" #include "scoped_thread_state_change-inl.h" #include "thread.h" #include "trace.h" @@ -3094,23 +3095,26 @@ void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) { void CodeGeneratorX86_64::MaybeGenerateInlineCacheCheck(HInstruction* instruction, CpuRegister klass) { DCHECK_EQ(RDI, klass.AsRegister()); - // We know the destination of an intrinsic, so no need to record inline - // caches. - if (!instruction->GetLocations()->Intrinsified() && - GetGraph()->IsCompilingBaseline() && - !Runtime::Current()->IsAotCompiler()) { + if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke())) { ProfilingInfo* info = GetGraph()->GetProfilingInfo(); DCHECK(info != nullptr); - InlineCache* cache = info->GetInlineCache(instruction->GetDexPc()); - uint64_t address = reinterpret_cast64<uint64_t>(cache); - NearLabel done; - __ movq(CpuRegister(TMP), Immediate(address)); - // Fast path for a monomorphic cache. - __ cmpl(Address(CpuRegister(TMP), InlineCache::ClassesOffset().Int32Value()), klass); - __ j(kEqual, &done); - GenerateInvokeRuntime( - GetThreadOffset<kX86_64PointerSize>(kQuickUpdateInlineCache).Int32Value()); - __ Bind(&done); + InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke()); + if (cache != nullptr) { + uint64_t address = reinterpret_cast64<uint64_t>(cache); + NearLabel done; + __ movq(CpuRegister(TMP), Immediate(address)); + // Fast path for a monomorphic cache. + __ cmpl(Address(CpuRegister(TMP), InlineCache::ClassesOffset().Int32Value()), klass); + __ j(kEqual, &done); + GenerateInvokeRuntime( + GetThreadOffset<kX86_64PointerSize>(kQuickUpdateInlineCache).Int32Value()); + __ Bind(&done); + } else { + // This is unexpected, but we don't guarantee stable compilation across + // JIT runs so just warn about it. + ScopedObjectAccess soa(Thread::Current()); + LOG(WARNING) << "Missing inline cache for " << GetGraph()->GetArtMethod()->PrettyMethod(); + } } } diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 0069a20a26..2886e731b5 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -53,6 +53,7 @@ #include "oat_quick_method_header.h" #include "optimizing/write_barrier_elimination.h" #include "prepare_for_register_allocation.h" +#include "profiling_info_builder.h" #include "reference_type_propagation.h" #include "register_allocator_linear_scan.h" #include "select_generator.h" @@ -835,8 +836,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, jit::Jit* jit = Runtime::Current()->GetJit(); if (jit != nullptr) { ProfilingInfo* info = jit->GetCodeCache()->GetProfilingInfo(method, Thread::Current()); - DCHECK_IMPLIES(compilation_kind == CompilationKind::kBaseline, info != nullptr) - << "Compiling a method baseline should always have a ProfilingInfo"; graph->SetProfilingInfo(info); } @@ -920,6 +919,20 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, &pass_observer, regalloc_strategy, compilation_stats_.get()); + // If we are compiling baseline and we haven't created a profiling info for + // this method already, do it now. + if (jit != nullptr && + compilation_kind == CompilationKind::kBaseline && + graph->GetProfilingInfo() == nullptr) { + ProfilingInfoBuilder(graph, codegen->GetCompilerOptions(), compilation_stats_.get()).Run(); + // We expect a profiling info to be created and attached to the graph. + // However, we may have run out of memory trying to create it, so in this + // case just abort the compilation. + if (graph->GetProfilingInfo() == nullptr) { + MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); + return nullptr; + } + } codegen->Compile(); pass_observer.DumpDisassembly(); diff --git a/compiler/optimizing/profiling_info_builder.cc b/compiler/optimizing/profiling_info_builder.cc new file mode 100644 index 0000000000..7e8cdb1454 --- /dev/null +++ b/compiler/optimizing/profiling_info_builder.cc @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "profiling_info_builder.h" + +#include "art_method-inl.h" +#include "driver/compiler_options.h" +#include "dex/code_item_accessors-inl.h" +#include "jit/profiling_info.h" +#include "optimizing_compiler_stats.h" +#include "scoped_thread_state_change-inl.h" + +namespace art HIDDEN { + +void ProfilingInfoBuilder::Run() { + DCHECK_EQ(GetGraph()->GetProfilingInfo(), nullptr); + // Order does not matter. + for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) { + // No need to visit the phis. + for (HInstructionIteratorHandleChanges inst_it(block->GetInstructions()); !inst_it.Done(); + inst_it.Advance()) { + inst_it.Current()->Accept(this); + } + } + + ScopedObjectAccess soa(Thread::Current()); + GetGraph()->SetProfilingInfo( + ProfilingInfo::Create(soa.Self(), GetGraph()->GetArtMethod(), inline_caches_)); +} + +void ProfilingInfoBuilder::HandleInvoke(HInvoke* invoke) { + DCHECK(!invoke->GetEnvironment()->IsFromInlinedInvoke()); + if (IsInlineCacheUseful(invoke)) { + inline_caches_.push_back(invoke->GetDexPc()); + } +} + +void ProfilingInfoBuilder::VisitInvokeInterface(HInvokeInterface* invoke) { + HandleInvoke(invoke); +} + +void ProfilingInfoBuilder::VisitInvokeVirtual(HInvokeVirtual* invoke) { + HandleInvoke(invoke); +} + +bool ProfilingInfoBuilder::IsInlineCacheUseful(HInvoke* invoke) { + DCHECK(invoke->IsInvokeVirtual() || invoke->IsInvokeInterface()); + if (invoke->IsIntrinsic()) { + return false; + } + if (!invoke->GetBlock()->GetGraph()->IsCompilingBaseline()) { + return false; + } + if (Runtime::Current()->IsAotCompiler()) { + return false; + } + if (invoke->InputAt(0)->GetReferenceTypeInfo().IsExact()) { + return false; + } + if (invoke->GetResolvedMethod() != nullptr) { + ScopedObjectAccess soa(Thread::Current()); + if (invoke->GetResolvedMethod()->IsFinal() || + invoke->GetResolvedMethod()->GetDeclaringClass()->IsFinal()) { + return false; + } + } + return true; +} + +InlineCache* ProfilingInfoBuilder::GetInlineCache(ProfilingInfo* info, HInvoke* instruction) { + DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke()); + ScopedObjectAccess soa(Thread::Current()); + return info->GetInlineCache(instruction->GetDexPc()); +} + +} // namespace art diff --git a/compiler/optimizing/profiling_info_builder.h b/compiler/optimizing/profiling_info_builder.h new file mode 100644 index 0000000000..315b7de418 --- /dev/null +++ b/compiler/optimizing/profiling_info_builder.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_OPTIMIZING_PROFILING_INFO_BUILDER_H_ +#define ART_COMPILER_OPTIMIZING_PROFILING_INFO_BUILDER_H_ + +#include "base/macros.h" +#include "nodes.h" + +namespace art HIDDEN { + +class CompilerOptions; +class InlineCache; +class ProfilingInfo; + +class ProfilingInfoBuilder : public HGraphDelegateVisitor { + public: + ProfilingInfoBuilder(HGraph* graph, + const CompilerOptions& compiler_options, + OptimizingCompilerStats* stats = nullptr) + : HGraphDelegateVisitor(graph, stats), + compiler_options_(compiler_options) {} + + void Run(); + + static constexpr const char* kProfilingInfoBuilderPassName = + "profiling_info_builder"; + + static InlineCache* GetInlineCache(ProfilingInfo* info, HInvoke* invoke); + static bool IsInlineCacheUseful(HInvoke* invoke); + + private: + void VisitInvokeVirtual(HInvokeVirtual* invoke) override; + void VisitInvokeInterface(HInvokeInterface* invoke) override; + + void HandleInvoke(HInvoke* invoke); + + [[maybe_unused]] const CompilerOptions& compiler_options_; + std::vector<uint32_t> inline_caches_; + + DISALLOW_COPY_AND_ASSIGN(ProfilingInfoBuilder); +}; + +} // namespace art + + +#endif // ART_COMPILER_OPTIMIZING_PROFILING_INFO_BUILDER_H_ |