summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Eric Holk <eholk@google.com> 2020-02-12 09:10:21 -0800
committer Eric Holk <eholk@google.com> 2020-02-21 18:51:23 +0000
commit1868de9c954e057c30ff9a086a213c86a75d7fb6 (patch)
treecea533316ad0d1605a62620571426c28fbae147b
parent5b768893456ca3a998b7a2a93490229febbec1cf (diff)
Refactor inliner
This change rearranges some of the inliner code. The main goal is to make some of the larger functions more readable and make clearer how to modify the inliner in the future. Some of the specific changes include: * Code to find the actually call target has been factored into a separate method. * The call to TryInlineFromEarlyCache has been made into an early exit rather than a fall through case. This lowers the indentation level for the main inline case. * Split the initial checks for whether inlining is possible into IsInliningAllowed, IsInliningSupported, and IsInliningBudgetAvailable. This is to make it more clear why these restrictions are in place. Note that some of these checks are now in a different order. * Factor the checks that come after the inlined body has been optimized into a separate method. These haven't been further broken down yet, but this would be worthwhile future work. * Remove CanAllocateRegistersFor. We should be able to allocate registers for any compiler we support, and if not, this should be caught when we try to allocate registers instead. Bug: 149392334 Test: ./test/testrunner/testrunner.py --host --optimizing --no-jvmti --ndebug \ --64 Change-Id: Ic1b919e306b7b93944ee5686e2a487b2190c087c
-rw-r--r--compiler/optimizing/inliner.cc455
-rw-r--r--compiler/optimizing/inliner.h52
-rw-r--r--compiler/optimizing/nodes.h2
-rw-r--r--compiler/optimizing/register_allocator.cc9
-rw-r--r--compiler/optimizing/register_allocator.h3
5 files changed, 312 insertions, 209 deletions
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index ecaedc7cd8..24d6e656c2 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -465,6 +465,30 @@ static bool AlwaysThrows(const CompilerOptions& compiler_options, ArtMethod* met
return throw_seen;
}
+ArtMethod* HInliner::FindActualCallTarget(HInvoke* invoke_instruction, bool* cha_devirtualize) {
+ ArtMethod* resolved_method = invoke_instruction->GetResolvedMethod();
+ DCHECK(resolved_method != nullptr);
+
+ ArtMethod* actual_method = nullptr;
+ if (invoke_instruction->IsInvokeStaticOrDirect()) {
+ actual_method = resolved_method;
+ } else {
+ // Check if we can statically find the method.
+ actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method);
+ }
+
+ if (actual_method == nullptr) {
+ ArtMethod* method = TryCHADevirtualization(resolved_method);
+ if (method != nullptr) {
+ *cha_devirtualize = true;
+ actual_method = method;
+ LOG_NOTE() << "Try CHA-based inlining of " << actual_method->PrettyMethod();
+ }
+ }
+
+ return actual_method;
+}
+
bool HInliner::TryInline(HInvoke* invoke_instruction) {
if (invoke_instruction->IsInvokeUnresolved() ||
invoke_instruction->IsInvokePolymorphic() ||
@@ -485,56 +509,42 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
LOG_FAIL_NO_STAT() << "Not inlining a String.<init> method";
return false;
}
- ArtMethod* actual_method = nullptr;
-
- if (invoke_instruction->IsInvokeStaticOrDirect()) {
- actual_method = resolved_method;
- } else {
- // Check if we can statically find the method.
- actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method);
- }
bool cha_devirtualize = false;
- if (actual_method == nullptr) {
- ArtMethod* method = TryCHADevirtualization(resolved_method);
- if (method != nullptr) {
- cha_devirtualize = true;
- actual_method = method;
- LOG_NOTE() << "Try CHA-based inlining of " << actual_method->PrettyMethod();
- }
- }
+ ArtMethod* actual_method = FindActualCallTarget(invoke_instruction, &cha_devirtualize);
- if (actual_method != nullptr) {
- // Single target.
- bool result = TryInlineAndReplace(invoke_instruction,
- actual_method,
- ReferenceTypeInfo::CreateInvalid(),
- /* do_rtp= */ true,
- cha_devirtualize);
- if (result) {
- // Successfully inlined.
- if (!invoke_instruction->IsInvokeStaticOrDirect()) {
- if (cha_devirtualize) {
- // Add dependency due to devirtualization. We've assumed resolved_method
- // has single implementation.
- outermost_graph_->AddCHASingleImplementationDependency(resolved_method);
- MaybeRecordStat(stats_, MethodCompilationStat::kCHAInline);
- } else {
- MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
- }
+ // If we didn't find a method, see if we can inline from the inline caches.
+ if (actual_method == nullptr) {
+ DCHECK(!invoke_instruction->IsInvokeStaticOrDirect());
+
+ return TryInlineFromInlineCache(caller_dex_file, invoke_instruction, resolved_method);
+ }
+
+ // Single target.
+ bool result = TryInlineAndReplace(invoke_instruction,
+ actual_method,
+ ReferenceTypeInfo::CreateInvalid(),
+ /* do_rtp= */ true,
+ cha_devirtualize);
+ if (result) {
+ // Successfully inlined.
+ if (!invoke_instruction->IsInvokeStaticOrDirect()) {
+ if (cha_devirtualize) {
+ // Add dependency due to devirtualization. We've assumed resolved_method
+ // has single implementation.
+ outermost_graph_->AddCHASingleImplementationDependency(resolved_method);
+ MaybeRecordStat(stats_, MethodCompilationStat::kCHAInline);
+ } else {
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
}
- } else if (!cha_devirtualize && AlwaysThrows(codegen_->GetCompilerOptions(), actual_method)) {
- // Set always throws property for non-inlined method call with single target
- // (unless it was obtained through CHA, because that would imply we have
- // to add the CHA dependency, which seems not worth it).
- invoke_instruction->SetAlwaysThrows(true);
}
- return result;
+ } else if (!cha_devirtualize && AlwaysThrows(codegen_->GetCompilerOptions(), actual_method)) {
+ // Set always throws property for non-inlined method call with single target
+ // (unless it was obtained through CHA, because that would imply we have
+ // to add the CHA dependency, which seems not worth it).
+ invoke_instruction->SetAlwaysThrows(true);
}
- DCHECK(!invoke_instruction->IsInvokeStaticOrDirect());
-
- // Try using inline caches.
- return TryInlineFromInlineCache(caller_dex_file, invoke_instruction, resolved_method);
+ return result;
}
static Handle<mirror::ObjectArray<mirror::Class>> AllocateInlineCacheHolder(
@@ -1432,10 +1442,39 @@ static inline bool MayInline(const CompilerOptions& compiler_options,
return true;
}
-bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
- ArtMethod* method,
- ReferenceTypeInfo receiver_type,
- HInstruction** return_replacement) {
+// Returns whether inlining is allowed based on ART semantics.
+bool HInliner::IsInliningAllowed(ArtMethod* method, const CodeItemDataAccessor& accessor) const {
+ if (!accessor.HasCodeItem()) {
+ LOG_FAIL_NO_STAT()
+ << "Method " << method->PrettyMethod() << " is not inlined because it is native";
+ return false;
+ }
+
+ if (!method->IsCompilable()) {
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
+ << "Method " << method->PrettyMethod()
+ << " has soft failures un-handled by the compiler, so it cannot be inlined";
+ return false;
+ }
+
+ if (IsMethodUnverified(codegen_->GetCompilerOptions(), method)) {
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
+ << "Method " << method->PrettyMethod()
+ << " couldn't be verified, so it cannot be inlined";
+ return false;
+ }
+
+ return true;
+}
+
+// Returns whether ART supports inlining this method.
+//
+// Some methods are not supported because they have features for which inlining
+// is not implemented. For example, we do not currently support inlining throw
+// instructions into a try block.
+bool HInliner::IsInliningSupported(const HInvoke* invoke_instruction,
+ ArtMethod* method,
+ const CodeItemDataAccessor& accessor) const {
if (method->IsProxyMethod()) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedProxy)
<< "Method " << method->PrettyMethod()
@@ -1443,6 +1482,29 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
return false;
}
+ if (accessor.TriesSize() != 0) {
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedTryCatch)
+ << "Method " << method->PrettyMethod() << " is not inlined because of try block";
+ return false;
+ }
+
+ if (invoke_instruction->IsInvokeStaticOrDirect() &&
+ invoke_instruction->AsInvokeStaticOrDirect()->IsStaticWithImplicitClinitCheck()) {
+ // Case of a static method that cannot be inlined because it implicitly
+ // requires an initialization check of its declaring class.
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedDexCache)
+ << "Method " << method->PrettyMethod()
+ << " is not inlined because it is static and requires a clinit"
+ << " check that cannot be emitted due to Dex cache limitations";
+ return false;
+ }
+
+ return true;
+}
+
+// Returns whether our resource limits allow inlining this method.
+bool HInliner::IsInliningBudgetAvailable(ArtMethod* method,
+ const CodeItemDataAccessor& accessor) const {
if (CountRecursiveCallsOf(method) > kMaximumNumberOfRecursiveCalls) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedRecursiveBudget)
<< "Method "
@@ -1451,8 +1513,26 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
return false;
}
+ size_t inline_max_code_units = codegen_->GetCompilerOptions().GetInlineMaxCodeUnits();
+ if (accessor.InsnsSizeInCodeUnits() > inline_max_code_units) {
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCodeItem)
+ << "Method " << method->PrettyMethod()
+ << " is not inlined because its code item is too big: "
+ << accessor.InsnsSizeInCodeUnits()
+ << " > "
+ << inline_max_code_units;
+ return false;
+ }
+
+ return true;
+}
+
+bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
+ ArtMethod* method,
+ ReferenceTypeInfo receiver_type,
+ HInstruction** return_replacement) {
// Check whether we're allowed to inline. The outermost compilation unit is the relevant
- // dex file here (though the transitivity of an inline chain would allow checking the calller).
+ // dex file here (though the transitivity of an inline chain would allow checking the caller).
if (!MayInline(codegen_->GetCompilerOptions(),
*method->GetDexFile(),
*outer_compilation_unit_.GetDexFile())) {
@@ -1470,60 +1550,22 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
return false;
}
- bool same_dex_file = IsSameDexFile(*outer_compilation_unit_.GetDexFile(), *method->GetDexFile());
-
CodeItemDataAccessor accessor(method->DexInstructionData());
- if (!accessor.HasCodeItem()) {
- LOG_FAIL_NO_STAT()
- << "Method " << method->PrettyMethod() << " is not inlined because it is native";
- return false;
- }
-
- size_t inline_max_code_units = codegen_->GetCompilerOptions().GetInlineMaxCodeUnits();
- if (accessor.InsnsSizeInCodeUnits() > inline_max_code_units) {
- LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCodeItem)
- << "Method " << method->PrettyMethod()
- << " is not inlined because its code item is too big: "
- << accessor.InsnsSizeInCodeUnits()
- << " > "
- << inline_max_code_units;
+ if (!IsInliningAllowed(method, accessor)) {
return false;
}
- if (accessor.TriesSize() != 0) {
- LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedTryCatch)
- << "Method " << method->PrettyMethod() << " is not inlined because of try block";
+ if (!IsInliningSupported(invoke_instruction, method, accessor)) {
return false;
}
- if (!method->IsCompilable()) {
- LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
- << "Method " << method->PrettyMethod()
- << " has soft failures un-handled by the compiler, so it cannot be inlined";
- return false;
- }
-
- if (IsMethodUnverified(codegen_->GetCompilerOptions(), method)) {
- LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
- << "Method " << method->PrettyMethod()
- << " couldn't be verified, so it cannot be inlined";
- return false;
- }
-
- if (invoke_instruction->IsInvokeStaticOrDirect() &&
- invoke_instruction->AsInvokeStaticOrDirect()->IsStaticWithImplicitClinitCheck()) {
- // Case of a static method that cannot be inlined because it implicitly
- // requires an initialization check of its declaring class.
- LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedDexCache)
- << "Method " << method->PrettyMethod()
- << " is not inlined because it is static and requires a clinit"
- << " check that cannot be emitted due to Dex cache limitations";
+ if (!IsInliningBudgetAvailable(method, accessor)) {
return false;
}
if (!TryBuildAndInlineHelper(
- invoke_instruction, method, receiver_type, same_dex_file, return_replacement)) {
+ invoke_instruction, method, receiver_type, return_replacement)) {
return false;
}
@@ -1753,107 +1795,12 @@ static bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, Art
return false;
}
-bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
- ArtMethod* resolved_method,
- ReferenceTypeInfo receiver_type,
- bool same_dex_file,
- HInstruction** return_replacement) {
- DCHECK(!(resolved_method->IsStatic() && receiver_type.IsValid()));
- ScopedObjectAccess soa(Thread::Current());
- const dex::CodeItem* code_item = resolved_method->GetCodeItem();
- const DexFile& callee_dex_file = *resolved_method->GetDexFile();
- uint32_t method_index = resolved_method->GetDexMethodIndex();
- CodeItemDebugInfoAccessor code_item_accessor(resolved_method->DexInstructionDebugInfo());
- ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
- Handle<mirror::DexCache> dex_cache = NewHandleIfDifferent(resolved_method->GetDexCache(),
- caller_compilation_unit_.GetDexCache(),
- handles_);
- Handle<mirror::ClassLoader> class_loader =
- NewHandleIfDifferent(resolved_method->GetDeclaringClass()->GetClassLoader(),
- caller_compilation_unit_.GetClassLoader(),
- handles_);
-
- Handle<mirror::Class> compiling_class = handles_->NewHandle(resolved_method->GetDeclaringClass());
- DexCompilationUnit dex_compilation_unit(
- class_loader,
- class_linker,
- callee_dex_file,
- code_item,
- resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
- method_index,
- resolved_method->GetAccessFlags(),
- /* verified_method= */ nullptr,
- dex_cache,
- compiling_class);
-
- InvokeType invoke_type = invoke_instruction->GetInvokeType();
- if (invoke_type == kInterface) {
- // We have statically resolved the dispatch. To please the class linker
- // at runtime, we change this call as if it was a virtual call.
- invoke_type = kVirtual;
- }
-
- bool caller_dead_reference_safe = graph_->IsDeadReferenceSafe();
- const dex::ClassDef& callee_class = resolved_method->GetClassDef();
- // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
- // is currently rarely true.
- bool callee_dead_reference_safe =
- annotations::HasDeadReferenceSafeAnnotation(callee_dex_file, callee_class)
- && !annotations::MethodContainsRSensitiveAccess(callee_dex_file, callee_class, method_index);
-
- const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId();
- HGraph* callee_graph = new (graph_->GetAllocator()) HGraph(
- graph_->GetAllocator(),
- graph_->GetArenaStack(),
- callee_dex_file,
- method_index,
- codegen_->GetCompilerOptions().GetInstructionSet(),
- invoke_type,
- callee_dead_reference_safe,
- graph_->IsDebuggable(),
- /* osr= */ false,
- /* is_shared_jit_code= */ graph_->IsCompilingForSharedJitCode(),
- /* baseline= */ graph_->IsCompilingBaseline(),
- /* start_instruction_id= */ caller_instruction_counter);
- callee_graph->SetArtMethod(resolved_method);
-
- // When they are needed, allocate `inline_stats_` on the Arena instead
- // of on the stack, as Clang might produce a stack frame too large
- // for this function, that would not fit the requirements of the
- // `-Wframe-larger-than` option.
- if (stats_ != nullptr) {
- // Reuse one object for all inline attempts from this caller to keep Arena memory usage low.
- if (inline_stats_ == nullptr) {
- void* storage = graph_->GetAllocator()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc);
- inline_stats_ = new (storage) OptimizingCompilerStats;
- } else {
- inline_stats_->Reset();
- }
- }
- HGraphBuilder builder(callee_graph,
- code_item_accessor,
- &dex_compilation_unit,
- &outer_compilation_unit_,
- codegen_,
- inline_stats_,
- resolved_method->GetQuickenedInfo(),
- handles_);
-
- if (builder.BuildGraph() != kAnalysisSuccess) {
- LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCannotBuild)
- << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be built, so cannot be inlined";
- return false;
- }
-
- if (!RegisterAllocator::CanAllocateRegistersFor(
- *callee_graph, codegen_->GetCompilerOptions().GetInstructionSet())) {
- LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedRegisterAllocator)
- << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " cannot be inlined because of the register allocator";
- return false;
- }
-
+ // Substitutes parameters in the callee graph with their values from the caller.
+void HInliner::SubstituteArguments(HGraph* callee_graph,
+ HInvoke* invoke_instruction,
+ ReferenceTypeInfo receiver_type,
+ const DexCompilationUnit& dex_compilation_unit) {
+ ArtMethod* const resolved_method = callee_graph->GetArtMethod();
size_t parameter_index = 0;
bool run_rtp = false;
for (HInstructionIterator instructions(callee_graph->GetEntryBlock()->GetInstructions());
@@ -1896,8 +1843,23 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
handles_,
/* is_first_run= */ false).Run();
}
+}
- RunOptimizations(callee_graph, code_item, dex_compilation_unit);
+// Returns whether we can inline the callee_graph into the target_block.
+//
+// This performs a combination of semantics checks, compiler support checks, and
+// resource limit checks.
+//
+// If this function returns true, it will also set out_number_of_instructions to
+// the number of instructions in the inlined body.
+bool HInliner::CanInlineBody(const HGraph* callee_graph,
+ const HBasicBlock* target_block,
+ size_t* out_number_of_instructions) const {
+ const DexFile& callee_dex_file = callee_graph->GetDexFile();
+ ArtMethod* const resolved_method = callee_graph->GetArtMethod();
+ const uint32_t method_index = resolved_method->GetMethodIndex();
+ const bool same_dex_file =
+ IsSameDexFile(*outer_compilation_unit_.GetDexFile(), *resolved_method->GetDexFile());
HBasicBlock* exit_block = callee_graph->GetExitBlock();
if (exit_block == nullptr) {
@@ -1910,7 +1872,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
bool has_one_return = false;
for (HBasicBlock* predecessor : exit_block->GetPredecessors()) {
if (predecessor->GetLastInstruction()->IsThrow()) {
- if (invoke_instruction->GetBlock()->IsTryBlock()) {
+ if (target_block->IsTryBlock()) {
// TODO(ngeoffray): Support adding HTryBoundary in Hgraph::InlineInto.
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedTryCatch)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
@@ -2019,6 +1981,111 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
}
}
}
+
+ *out_number_of_instructions = number_of_instructions;
+ return true;
+}
+
+bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
+ ArtMethod* resolved_method,
+ ReferenceTypeInfo receiver_type,
+ HInstruction** return_replacement) {
+ DCHECK(!(resolved_method->IsStatic() && receiver_type.IsValid()));
+ const dex::CodeItem* code_item = resolved_method->GetCodeItem();
+ const DexFile& callee_dex_file = *resolved_method->GetDexFile();
+ uint32_t method_index = resolved_method->GetDexMethodIndex();
+ CodeItemDebugInfoAccessor code_item_accessor(resolved_method->DexInstructionDebugInfo());
+ ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
+ Handle<mirror::DexCache> dex_cache = NewHandleIfDifferent(resolved_method->GetDexCache(),
+ caller_compilation_unit_.GetDexCache(),
+ handles_);
+ Handle<mirror::ClassLoader> class_loader =
+ NewHandleIfDifferent(resolved_method->GetDeclaringClass()->GetClassLoader(),
+ caller_compilation_unit_.GetClassLoader(),
+ handles_);
+
+ Handle<mirror::Class> compiling_class = handles_->NewHandle(resolved_method->GetDeclaringClass());
+ DexCompilationUnit dex_compilation_unit(
+ class_loader,
+ class_linker,
+ callee_dex_file,
+ code_item,
+ resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
+ method_index,
+ resolved_method->GetAccessFlags(),
+ /* verified_method= */ nullptr,
+ dex_cache,
+ compiling_class);
+
+ InvokeType invoke_type = invoke_instruction->GetInvokeType();
+ if (invoke_type == kInterface) {
+ // We have statically resolved the dispatch. To please the class linker
+ // at runtime, we change this call as if it was a virtual call.
+ invoke_type = kVirtual;
+ }
+
+ bool caller_dead_reference_safe = graph_->IsDeadReferenceSafe();
+ const dex::ClassDef& callee_class = resolved_method->GetClassDef();
+ // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
+ // is currently rarely true.
+ bool callee_dead_reference_safe =
+ annotations::HasDeadReferenceSafeAnnotation(callee_dex_file, callee_class)
+ && !annotations::MethodContainsRSensitiveAccess(callee_dex_file, callee_class, method_index);
+
+ const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId();
+ HGraph* callee_graph = new (graph_->GetAllocator()) HGraph(
+ graph_->GetAllocator(),
+ graph_->GetArenaStack(),
+ callee_dex_file,
+ method_index,
+ codegen_->GetCompilerOptions().GetInstructionSet(),
+ invoke_type,
+ callee_dead_reference_safe,
+ graph_->IsDebuggable(),
+ /* osr= */ false,
+ /* is_shared_jit_code= */ graph_->IsCompilingForSharedJitCode(),
+ /* baseline= */ graph_->IsCompilingBaseline(),
+ /* start_instruction_id= */ caller_instruction_counter);
+ callee_graph->SetArtMethod(resolved_method);
+
+ // When they are needed, allocate `inline_stats_` on the Arena instead
+ // of on the stack, as Clang might produce a stack frame too large
+ // for this function, that would not fit the requirements of the
+ // `-Wframe-larger-than` option.
+ if (stats_ != nullptr) {
+ // Reuse one object for all inline attempts from this caller to keep Arena memory usage low.
+ if (inline_stats_ == nullptr) {
+ void* storage = graph_->GetAllocator()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc);
+ inline_stats_ = new (storage) OptimizingCompilerStats;
+ } else {
+ inline_stats_->Reset();
+ }
+ }
+ HGraphBuilder builder(callee_graph,
+ code_item_accessor,
+ &dex_compilation_unit,
+ &outer_compilation_unit_,
+ codegen_,
+ inline_stats_,
+ resolved_method->GetQuickenedInfo(),
+ handles_);
+
+ if (builder.BuildGraph() != kAnalysisSuccess) {
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCannotBuild)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be built, so cannot be inlined";
+ return false;
+ }
+
+ SubstituteArguments(callee_graph, invoke_instruction, receiver_type, dex_compilation_unit);
+
+ RunOptimizations(callee_graph, code_item, dex_compilation_unit);
+
+ size_t number_of_instructions = 0;
+ if (!CanInlineBody(callee_graph, invoke_instruction->GetBlock(), &number_of_instructions)) {
+ return false;
+ }
+
DCHECK_EQ(caller_instruction_counter, graph_->GetCurrentInstructionId())
<< "No instructions can be added to the outer graph while inner graph is being built";
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 15d7349694..882ba4e58b 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -73,6 +73,15 @@ class HInliner : public HOptimization {
bool TryInline(HInvoke* invoke_instruction);
+ // Attempt to resolve the target of the invoke instruction to an acutal call
+ // target.
+ //
+ // Returns the target directly in the case of static or direct invokes.
+ // Otherwise, uses CHA devirtualization or other methods to try to find the
+ // call target.
+ ArtMethod* FindActualCallTarget(HInvoke* invoke_instruction, bool* cha_devirtualize)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Try to inline `resolved_method` in place of `invoke_instruction`. `do_rtp` is whether
// reference type propagation can run after the inlining. If the inlining is successful, this
// method will replace and remove the `invoke_instruction`. If `cha_devirtualize` is true,
@@ -93,8 +102,15 @@ class HInliner : public HOptimization {
bool TryBuildAndInlineHelper(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
ReferenceTypeInfo receiver_type,
- bool same_dex_file,
- HInstruction** return_replacement);
+ HInstruction** return_replacement)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Substitutes parameters in the callee graph with their values from the caller.
+ void SubstituteArguments(HGraph* callee_graph,
+ HInvoke* invoke_instruction,
+ ReferenceTypeInfo receiver_type,
+ const DexCompilationUnit& dex_compilation_unit)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Run simple optimizations on `callee_graph`.
void RunOptimizations(HGraph* callee_graph,
@@ -108,6 +124,38 @@ class HInliner : public HOptimization {
HInstruction** return_replacement)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns whether inlining is allowed based on ART semantics.
+ bool IsInliningAllowed(art::ArtMethod* method, const CodeItemDataAccessor& accessor) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+
+ // Returns whether ART supports inlining this method.
+ //
+ // Some methods are not supported because they have features for which inlining
+ // is not implemented. For example, we do not currently support inlining throw
+ // instructions into a try block.
+ bool IsInliningSupported(const HInvoke* invoke_instruction,
+ art::ArtMethod* method,
+ const CodeItemDataAccessor& accessor) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Returns whether the inlining budget allows inlining method.
+ //
+ // For example, this checks whether the function has grown too large and
+ // inlining should be prevented.
+ bool IsInliningBudgetAvailable(art::ArtMethod* method, const CodeItemDataAccessor& accessor) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Inspects the body of a method (callee_graph) and returns whether it can be
+ // inlined.
+ //
+ // This checks for instructions and constructs that we do not support
+ // inlining, such as inlining a throw instruction into a try block.
+ bool CanInlineBody(const HGraph* callee_graph,
+ const HBasicBlock* target_block,
+ size_t* out_number_of_instructions) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Create a new HInstanceFieldGet.
HInstanceFieldGet* CreateInstanceFieldGet(uint32_t field_index,
ArtMethod* referrer,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index c18399c771..eece2e4b08 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -509,7 +509,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
return reverse_post_order_;
}
- ArrayRef<HBasicBlock* const> GetReversePostOrderSkipEntryBlock() {
+ ArrayRef<HBasicBlock* const> GetReversePostOrderSkipEntryBlock() const {
DCHECK(GetReversePostOrder()[0] == entry_block_);
return ArrayRef<HBasicBlock* const>(GetReversePostOrder()).SubArray(1);
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 793df98210..a9c217fc4f 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -68,15 +68,6 @@ RegisterAllocator::~RegisterAllocator() {
}
}
-bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
- InstructionSet instruction_set) {
- return instruction_set == InstructionSet::kArm
- || instruction_set == InstructionSet::kArm64
- || instruction_set == InstructionSet::kThumb2
- || instruction_set == InstructionSet::kX86
- || instruction_set == InstructionSet::kX86_64;
-}
-
class AllRangesIterator : public ValueObject {
public:
explicit AllRangesIterator(LiveInterval* interval)
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 18ef69fcab..4d226875bf 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -60,9 +60,6 @@ class RegisterAllocator : public DeletableArenaObject<kArenaAllocRegisterAllocat
// intervals that intersect each other. Returns false if it failed.
virtual bool Validate(bool log_fatal_on_failure) = 0;
- static bool CanAllocateRegistersFor(const HGraph& graph,
- InstructionSet instruction_set);
-
// Verifies that live intervals do not conflict. Used by unit testing.
static bool ValidateIntervals(ArrayRef<LiveInterval* const> intervals,
size_t number_of_spill_slots,