summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc9
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc9
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc9
-rw-r--r--compiler/optimizing/code_generator_x86.cc9
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc9
-rw-r--r--compiler/optimizing/graph_visualizer.cc3
-rw-r--r--compiler/optimizing/inliner.cc68
-rw-r--r--compiler/optimizing/inliner.h12
-rw-r--r--compiler/optimizing/optimization.cc1
-rw-r--r--compiler/optimizing/optimizing_compiler.cc36
-rw-r--r--compiler/optimizing/profiling_info_builder.cc59
-rw-r--r--compiler/optimizing/profiling_info_builder.h10
12 files changed, 44 insertions, 190 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index b90974ab80..9027976165 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1298,11 +1298,7 @@ void CodeGeneratorARM64::MaybeIncrementHotness(bool is_frame_entry) {
__ Bind(&done);
}
- if (GetGraph()->IsCompilingBaseline() &&
- is_frame_entry &&
- !Runtime::Current()->IsAotCompiler()) {
- // Note the slow path doesn't save SIMD registers, so if we were to
- // call it on loop back edge, we would need to fix this.
+ if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
@@ -4601,8 +4597,7 @@ void CodeGeneratorARM64::MaybeGenerateInlineCacheCheck(HInstruction* instruction
if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
- InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(
- info, GetCompilerOptions(), instruction->AsInvoke());
+ InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
if (cache != nullptr) {
uint64_t address = reinterpret_cast64<uint64_t>(cache);
vixl::aarch64::Label done;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 3e01e2fd17..00c14b0b46 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2299,11 +2299,7 @@ void CodeGeneratorARMVIXL::MaybeIncrementHotness(bool is_frame_entry) {
}
}
- if (GetGraph()->IsCompilingBaseline() &&
- is_frame_entry &&
- !Runtime::Current()->IsAotCompiler()) {
- // Note the slow path doesn't save SIMD registers, so if we were to
- // call it on loop back edge, we would need to fix this.
+ if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
@@ -3691,8 +3687,7 @@ void CodeGeneratorARMVIXL::MaybeGenerateInlineCacheCheck(HInstruction* instructi
DCHECK_EQ(r0.GetCode(), klass.GetCode());
if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
- InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(
- info, GetCompilerOptions(), instruction->AsInvoke());
+ InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
if (cache != nullptr) {
uint32_t address = reinterpret_cast32<uint32_t>(cache);
vixl32::Label done;
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index 6b1b9e30eb..0c0b8a9f14 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -5714,11 +5714,7 @@ void CodeGeneratorRISCV64::MaybeIncrementHotness(bool is_frame_entry) {
__ Bind(&done);
}
- if (GetGraph()->IsCompilingBaseline() &&
- is_frame_entry &&
- !Runtime::Current()->IsAotCompiler()) {
- // Note the slow path doesn't save SIMD registers, so if we were to
- // call it on loop back edge, we would need to fix this.
+ if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
@@ -6738,8 +6734,7 @@ void CodeGeneratorRISCV64::MaybeGenerateInlineCacheCheck(HInstruction* instructi
if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
- InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(
- info, GetCompilerOptions(), instruction->AsInvoke());
+ InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
if (cache != nullptr) {
uint64_t address = reinterpret_cast64<uint64_t>(cache);
Riscv64Label done;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 649b422f6d..71db5c99af 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1348,11 +1348,7 @@ void CodeGeneratorX86::MaybeIncrementHotness(bool is_frame_entry) {
}
}
- if (GetGraph()->IsCompilingBaseline() &&
- is_frame_entry &&
- !Runtime::Current()->IsAotCompiler()) {
- // Note the slow path doesn't save SIMD registers, so if we were to
- // call it on loop back edge, we would need to fix this.
+ if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
uint32_t address = reinterpret_cast32<uint32_t>(info) +
@@ -2852,8 +2848,7 @@ void CodeGeneratorX86::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
- InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(
- info, GetCompilerOptions(), instruction->AsInvoke());
+ InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
if (cache != nullptr) {
uint32_t address = reinterpret_cast32<uint32_t>(cache);
if (kIsDebugBuild) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b29ba67cb1..9d010190f7 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1780,11 +1780,7 @@ void CodeGeneratorX86_64::MaybeIncrementHotness(bool is_frame_entry) {
__ Bind(&overflow);
}
- if (GetGraph()->IsCompilingBaseline() &&
- is_frame_entry &&
- !Runtime::Current()->IsAotCompiler()) {
- // Note the slow path doesn't save SIMD registers, so if we were to
- // call it on loop back edge, we would need to fix this.
+ if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
CHECK(!HasEmptyFrame());
@@ -3145,8 +3141,7 @@ void CodeGeneratorX86_64::MaybeGenerateInlineCacheCheck(HInstruction* instructio
if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
- InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(
- info, GetCompilerOptions(), instruction->AsInvoke());
+ InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
if (cache != nullptr) {
uint64_t address = reinterpret_cast64<uint64_t>(cache);
NearLabel done;
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index afbf941355..b7f7a0f550 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -867,8 +867,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
std::ostringstream oss;
oss << pass_name_;
if (!IsDebugDump()) {
- oss << " (" << (GetGraph()->IsCompilingBaseline() ? "baseline " : "")
- << (is_after_pass_ ? "after" : "before")
+ oss << " (" << (is_after_pass_ ? "after" : "before")
<< (graph_in_bad_state_ ? ", bad_state" : "") << ")";
}
PrintProperty("name", oss.str().c_str());
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index fcd899ed1c..e0bf028138 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -37,7 +37,6 @@
#include "mirror/object_array-alloc-inl.h"
#include "mirror/object_array-inl.h"
#include "nodes.h"
-#include "profiling_info_builder.h"
#include "reference_type_propagation.h"
#include "register_allocator_linear_scan.h"
#include "scoped_thread_state_change-inl.h"
@@ -533,15 +532,6 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
return result;
}
- if (graph_->IsCompilingBaseline()) {
- LOG_FAIL_NO_STAT() << "Call to " << invoke_instruction->GetMethodReference().PrettyMethod()
- << " not inlined because we are compiling baseline and we could not"
- << " statically resolve the target";
- // For baseline compilation, we will collect inline caches, so we should not
- // try to inline using them.
- return false;
- }
-
DCHECK(!invoke_instruction->IsInvokeStaticOrDirect());
// No try catch inlining allowed here, or recursively. For try catch inlining we are banking on
@@ -692,36 +682,17 @@ HInliner::InlineCacheType HInliner::GetInlineCacheJIT(
ArtMethod* caller = graph_->GetArtMethod();
// Under JIT, we should always know the caller.
DCHECK(caller != nullptr);
-
- InlineCache* cache = nullptr;
- // Start with the outer graph profiling info.
- ProfilingInfo* profiling_info = outermost_graph_->GetProfilingInfo();
- if (profiling_info != nullptr) {
- if (depth_ == 0) {
- cache = profiling_info->GetInlineCache(invoke_instruction->GetDexPc());
- } else {
- uint32_t dex_pc = ProfilingInfoBuilder::EncodeInlinedDexPc(
- this, codegen_->GetCompilerOptions(), invoke_instruction);
- if (dex_pc != kNoDexPc) {
- cache = profiling_info->GetInlineCache(dex_pc);
- }
- }
- }
-
- if (cache == nullptr) {
- // Check the current graph profiling info.
- profiling_info = graph_->GetProfilingInfo();
- if (profiling_info == nullptr) {
- return kInlineCacheNoData;
- }
-
- cache = profiling_info->GetInlineCache(invoke_instruction->GetDexPc());
+ ProfilingInfo* profiling_info = graph_->GetProfilingInfo();
+ if (profiling_info == nullptr) {
+ return kInlineCacheNoData;
}
+ InlineCache* cache = profiling_info->GetInlineCache(invoke_instruction->GetDexPc());
if (cache == nullptr) {
- // Either we never hit this invoke and we never compiled the callee,
- // or the method wasn't resolved when we performed baseline compilation.
- // Bail for now.
+ // This shouldn't happen, but we don't guarantee that method resolution
+ // between baseline compilation and optimizing compilation is identical. Be robust,
+ // warn about it, and return that we don't have any inline cache data.
+ LOG(WARNING) << "No inline cache found for " << caller->PrettyMethod();
return kInlineCacheNoData;
}
Runtime::Current()->GetJit()->GetCodeCache()->CopyInlineCacheInto(*cache, classes);
@@ -747,12 +718,6 @@ HInliner::InlineCacheType HInliner::GetInlineCacheAOT(
const ProfileCompilationInfo::InlineCacheMap* inline_caches = hotness.GetInlineCacheMap();
DCHECK(inline_caches != nullptr);
-
- // Inlined inline caches are not supported in AOT, so we use the dex pc directly, and don't
- // call `InlineCache::EncodeDexPc`.
- // To support it, we would need to ensure `inline_max_code_units` remain the
- // same between dex2oat and runtime, for example by adding it to the boot
- // image oat header.
const auto it = inline_caches->find(invoke_instruction->GetDexPc());
if (it == inline_caches->end()) {
return kInlineCacheUninitialized;
@@ -2112,20 +2077,6 @@ bool HInliner::CanInlineBody(const HGraph* callee_graph,
<< " could not be inlined because it needs a BSS check";
return false;
}
-
- if (outermost_graph_->IsCompilingBaseline() &&
- (current->IsInvokeVirtual() || current->IsInvokeInterface()) &&
- ProfilingInfoBuilder::IsInlineCacheUseful(current->AsInvoke(), codegen_)) {
- uint32_t maximum_inlining_depth_for_baseline =
- InlineCache::MaxDexPcEncodingDepth(
- outermost_graph_->GetArtMethod(),
- codegen_->GetCompilerOptions().GetInlineMaxCodeUnits());
- if (depth_ + 1 > maximum_inlining_depth_for_baseline) {
- LOG_FAIL_NO_STAT() << "Reached maximum depth for inlining in baseline compilation: "
- << depth_ << " for " << callee_graph->GetArtMethod()->PrettyMethod();
- return false;
- }
- }
}
}
@@ -2237,7 +2188,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
// The current invoke is not a try block.
!invoke_instruction->GetBlock()->IsTryBlock();
RunOptimizations(callee_graph,
- invoke_instruction->GetEnvironment(),
code_item,
dex_compilation_unit,
try_catch_inlining_allowed_for_recursive_inline);
@@ -2277,7 +2227,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
}
void HInliner::RunOptimizations(HGraph* callee_graph,
- HEnvironment* caller_environment,
const dex::CodeItem* code_item,
const DexCompilationUnit& dex_compilation_unit,
bool try_catch_inlining_allowed_for_recursive_inline) {
@@ -2326,7 +2275,6 @@ void HInliner::RunOptimizations(HGraph* callee_graph,
total_number_of_dex_registers_ + accessor.RegistersSize(),
total_number_of_instructions_ + number_of_instructions,
this,
- caller_environment,
depth_ + 1,
try_catch_inlining_allowed_for_recursive_inline);
inliner.Run();
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 48600543c6..5e68dd866e 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -43,7 +43,6 @@ class HInliner : public HOptimization {
size_t total_number_of_dex_registers,
size_t total_number_of_instructions,
HInliner* parent,
- HEnvironment* caller_environment,
size_t depth,
bool try_catch_inlining_allowed,
const char* name = kInlinerPassName)
@@ -55,7 +54,6 @@ class HInliner : public HOptimization {
total_number_of_dex_registers_(total_number_of_dex_registers),
total_number_of_instructions_(total_number_of_instructions),
parent_(parent),
- caller_environment_(caller_environment),
depth_(depth),
inlining_budget_(0),
try_catch_inlining_allowed_(try_catch_inlining_allowed),
@@ -66,12 +64,6 @@ class HInliner : public HOptimization {
static constexpr const char* kInlinerPassName = "inliner";
- const HInliner* GetParent() const { return parent_; }
- const HEnvironment* GetCallerEnvironment() const { return caller_environment_; }
-
- const HGraph* GetOutermostGraph() const { return outermost_graph_; }
- const HGraph* GetGraph() const { return graph_; }
-
private:
enum InlineCacheType {
kInlineCacheNoData = 0,
@@ -117,7 +109,6 @@ class HInliner : public HOptimization {
// Run simple optimizations on `callee_graph`.
void RunOptimizations(HGraph* callee_graph,
- HEnvironment* caller_environment,
const dex::CodeItem* code_item,
const DexCompilationUnit& dex_compilation_unit,
bool try_catch_inlining_allowed_for_recursive_inline)
@@ -330,10 +321,9 @@ class HInliner : public HOptimization {
const size_t total_number_of_dex_registers_;
size_t total_number_of_instructions_;
- // The 'parent' inliner, that means the inlining optimization that requested
+ // The 'parent' inliner, that means the inlinigng optimization that requested
// `graph_` to be inlined.
const HInliner* const parent_;
- const HEnvironment* const caller_environment_;
const size_t depth_;
// The budget left for inlining, in number of instructions.
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index a4df48c0ee..16045d447c 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -257,7 +257,6 @@ ArenaVector<HOptimization*> ConstructOptimizations(
accessor.RegistersSize(),
/* total_number_of_instructions= */ 0,
/* parent= */ nullptr,
- /* caller_environment= */ nullptr,
/* depth= */ 0,
/* try_catch_inlining_allowed= */ true,
pass_name);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 5868f22ab8..d458462226 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -368,10 +368,10 @@ class OptimizingCompiler final : public Compiler {
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer) const;
- bool RunRequiredPasses(HGraph* graph,
- CodeGenerator* codegen,
- const DexCompilationUnit& dex_compilation_unit,
- PassObserver* pass_observer) const;
+ bool RunBaselineOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer) const;
std::vector<uint8_t> GenerateJitDebugInfo(const debug::MethodDebugInfo& method_debug_info);
@@ -444,10 +444,10 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) {
instruction_set == InstructionSet::kX86_64;
}
-bool OptimizingCompiler::RunRequiredPasses(HGraph* graph,
- CodeGenerator* codegen,
- const DexCompilationUnit& dex_compilation_unit,
- PassObserver* pass_observer) const {
+bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer) const {
switch (codegen->GetCompilerOptions().GetInstructionSet()) {
#if defined(ART_ENABLE_CODEGEN_arm)
case InstructionSet::kThumb2:
@@ -904,15 +904,21 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
}
- if (compilation_kind == CompilationKind::kBaseline && compiler_options.ProfileBranches()) {
- // Branch profiling currently doesn't support running optimizations.
- RunRequiredPasses(graph, codegen.get(), dex_compilation_unit, &pass_observer);
+ if (compilation_kind == CompilationKind::kBaseline) {
+ RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
} else {
RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
PassScope scope(WriteBarrierElimination::kWBEPassName, &pass_observer);
WriteBarrierElimination(graph, compilation_stats_.get()).Run();
}
+ RegisterAllocator::Strategy regalloc_strategy =
+ compiler_options.GetRegisterAllocationStrategy();
+ AllocateRegisters(graph,
+ codegen.get(),
+ &pass_observer,
+ regalloc_strategy,
+ compilation_stats_.get());
// If we are compiling baseline and we haven't created a profiling info for
// this method already, do it now.
if (jit != nullptr &&
@@ -929,14 +935,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
}
- RegisterAllocator::Strategy regalloc_strategy =
- compiler_options.GetRegisterAllocationStrategy();
- AllocateRegisters(graph,
- codegen.get(),
- &pass_observer,
- regalloc_strategy,
- compilation_stats_.get());
-
codegen->Compile();
pass_observer.DumpDisassembly();
diff --git a/compiler/optimizing/profiling_info_builder.cc b/compiler/optimizing/profiling_info_builder.cc
index 19795f5466..7888753830 100644
--- a/compiler/optimizing/profiling_info_builder.cc
+++ b/compiler/optimizing/profiling_info_builder.cc
@@ -20,7 +20,6 @@
#include "code_generator.h"
#include "driver/compiler_options.h"
#include "dex/code_item_accessors-inl.h"
-#include "inliner.h"
#include "jit/profiling_info.h"
#include "optimizing_compiler_stats.h"
#include "scoped_thread_state_change-inl.h"
@@ -43,53 +42,10 @@ void ProfilingInfoBuilder::Run() {
ProfilingInfo::Create(soa.Self(), GetGraph()->GetArtMethod(), inline_caches_));
}
-
-uint32_t ProfilingInfoBuilder::EncodeInlinedDexPc(const HInliner* inliner,
- const CompilerOptions& compiler_options,
- HInvoke* invoke) {
- DCHECK(inliner->GetCallerEnvironment() != nullptr);
- DCHECK(inliner->GetParent() != nullptr);
- std::vector<uint32_t> temp_vector;
- temp_vector.push_back(invoke->GetDexPc());
- while (inliner->GetCallerEnvironment() != nullptr) {
- temp_vector.push_back(inliner->GetCallerEnvironment()->GetDexPc());
- inliner = inliner->GetParent();
- }
-
- DCHECK_EQ(inliner->GetOutermostGraph(), inliner->GetGraph());
- return InlineCache::EncodeDexPc(
- inliner->GetOutermostGraph()->GetArtMethod(),
- temp_vector,
- compiler_options.GetInlineMaxCodeUnits());
-}
-
-static uint32_t EncodeDexPc(HInvoke* invoke, const CompilerOptions& compiler_options) {
- std::vector<uint32_t> dex_pcs;
- ArtMethod* outer_method = nullptr;
- for (HEnvironment* environment = invoke->GetEnvironment();
- environment != nullptr;
- environment = environment->GetParent()) {
- outer_method = environment->GetMethod();
- dex_pcs.push_back(environment->GetDexPc());
- }
-
- ScopedObjectAccess soa(Thread::Current());
- return InlineCache::EncodeDexPc(
- outer_method,
- dex_pcs,
- compiler_options.GetInlineMaxCodeUnits());
-}
-
void ProfilingInfoBuilder::HandleInvoke(HInvoke* invoke) {
+ DCHECK(!invoke->GetEnvironment()->IsFromInlinedInvoke());
if (IsInlineCacheUseful(invoke, codegen_)) {
- uint32_t dex_pc = EncodeDexPc(invoke, compiler_options_);
- if (dex_pc != kNoDexPc) {
- inline_caches_.push_back(dex_pc);
- } else {
- ScopedObjectAccess soa(Thread::Current());
- LOG(WARNING) << "Could not encode dex pc for "
- << invoke->GetResolvedMethod()->PrettyMethod();
- }
+ inline_caches_.push_back(invoke->GetDexPc());
}
}
@@ -125,15 +81,10 @@ bool ProfilingInfoBuilder::IsInlineCacheUseful(HInvoke* invoke, CodeGenerator* c
return true;
}
-InlineCache* ProfilingInfoBuilder::GetInlineCache(ProfilingInfo* info,
- const CompilerOptions& compiler_options,
- HInvoke* instruction) {
+InlineCache* ProfilingInfoBuilder::GetInlineCache(ProfilingInfo* info, HInvoke* instruction) {
+ DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
ScopedObjectAccess soa(Thread::Current());
- uint32_t dex_pc = EncodeDexPc(instruction, compiler_options);
- if (dex_pc == kNoDexPc) {
- return nullptr;
- }
- return info->GetInlineCache(dex_pc);
+ return info->GetInlineCache(instruction->GetDexPc());
}
} // namespace art
diff --git a/compiler/optimizing/profiling_info_builder.h b/compiler/optimizing/profiling_info_builder.h
index c8dc59a03c..2185b0eed3 100644
--- a/compiler/optimizing/profiling_info_builder.h
+++ b/compiler/optimizing/profiling_info_builder.h
@@ -24,7 +24,6 @@ namespace art HIDDEN {
class CodeGenerator;
class CompilerOptions;
-class HInliner;
class InlineCache;
class ProfilingInfo;
@@ -43,13 +42,8 @@ class ProfilingInfoBuilder : public HGraphDelegateVisitor {
static constexpr const char* kProfilingInfoBuilderPassName =
"profiling_info_builder";
- static InlineCache* GetInlineCache(ProfilingInfo* info,
- const CompilerOptions& compiler_options,
- HInvoke* invoke);
+ static InlineCache* GetInlineCache(ProfilingInfo* info, HInvoke* invoke);
static bool IsInlineCacheUseful(HInvoke* invoke, CodeGenerator* codegen);
- static uint32_t EncodeInlinedDexPc(
- const HInliner* inliner, const CompilerOptions& compiler_options, HInvoke* invoke)
- REQUIRES_SHARED(Locks::mutator_lock_);
private:
void VisitInvokeVirtual(HInvokeVirtual* invoke) override;
@@ -58,7 +52,7 @@ class ProfilingInfoBuilder : public HGraphDelegateVisitor {
void HandleInvoke(HInvoke* invoke);
CodeGenerator* codegen_;
- const CompilerOptions& compiler_options_;
+ [[maybe_unused]] const CompilerOptions& compiler_options_;
std::vector<uint32_t> inline_caches_;
DISALLOW_COPY_AND_ASSIGN(ProfilingInfoBuilder);