summaryrefslogtreecommitdiff
path: root/compiler/optimizing/inliner.cc
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing/inliner.cc')
-rw-r--r--compiler/optimizing/inliner.cc50
1 files changed, 39 insertions, 11 deletions
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 1551c1531a..01065959d8 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -22,8 +22,10 @@
#include "constant_folding.h"
#include "dead_code_elimination.h"
#include "driver/compiler_driver-inl.h"
+#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
#include "instruction_simplifier.h"
+#include "intrinsics.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "nodes.h"
@@ -38,9 +40,6 @@
namespace art {
-static constexpr int kMaxInlineCodeUnits = 18;
-static constexpr int kDepthLimit = 3;
-
void HInliner::Run() {
if (graph_->IsDebuggable()) {
// For simplicity, we currently never inline when the graph is debuggable. This avoids
@@ -86,7 +85,7 @@ void HInliner::Run() {
}
static bool IsMethodOrDeclaringClassFinal(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
return method->IsFinal() || method->GetDeclaringClass()->IsFinal();
}
@@ -96,7 +95,7 @@ static bool IsMethodOrDeclaringClassFinal(ArtMethod* method)
* Return nullptr if the runtime target cannot be proven.
*/
static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resolved_method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (IsMethodOrDeclaringClassFinal(resolved_method)) {
// No need to lookup further, the resolved method will be the target.
return resolved_method;
@@ -162,7 +161,7 @@ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resol
static uint32_t FindMethodIndexIn(ArtMethod* method,
const DexFile& dex_file,
uint32_t referrer_index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (method->GetDexFile()->GetLocation().compare(dex_file.GetLocation()) == 0) {
return method->GetDexMethodIndex();
} else {
@@ -219,7 +218,8 @@ bool HInliner::TryInline(HInvoke* invoke_instruction, uint32_t method_index) con
return false;
}
- if (code_item->insns_size_in_code_units_ > kMaxInlineCodeUnits) {
+ size_t inline_max_code_units = compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits();
+ if (code_item->insns_size_in_code_units_ > inline_max_code_units) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " is too big to inline";
return false;
@@ -271,11 +271,11 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
const DexFile& callee_dex_file = *resolved_method->GetDexFile();
uint32_t method_index = resolved_method->GetDexMethodIndex();
-
+ ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
DexCompilationUnit dex_compilation_unit(
nullptr,
caller_compilation_unit_.GetClassLoader(),
- caller_compilation_unit_.GetClassLinker(),
+ class_linker,
*resolved_method->GetDexFile(),
code_item,
resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
@@ -356,8 +356,10 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
HConstantFolding fold(callee_graph);
ReferenceTypePropagation type_propagation(callee_graph, handles_);
InstructionSimplifier simplify(callee_graph, stats_);
+ IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_);
HOptimization* optimizations[] = {
+ &intrinsics,
&dce,
&fold,
&type_propagation,
@@ -369,7 +371,7 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
optimization->Run();
}
- if (depth_ + 1 < kDepthLimit) {
+ if (depth_ + 1 < compiler_driver_->GetCompilerOptions().GetInlineDepthLimit()) {
HInliner inliner(callee_graph,
outer_compilation_unit_,
dex_compilation_unit,
@@ -448,7 +450,33 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
}
}
- callee_graph->InlineInto(graph_, invoke_instruction);
+ HInstruction* return_replacement = callee_graph->InlineInto(graph_, invoke_instruction);
+
+ // When merging the graph we might create a new NullConstant in the caller graph which does
+ // not have the chance to be typed. We assign the correct type here so that we can keep the
+ // assertion that every reference has a valid type. This also simplifies checks along the way.
+ HNullConstant* null_constant = graph_->GetNullConstant();
+ if (!null_constant->GetReferenceTypeInfo().IsValid()) {
+ ReferenceTypeInfo::TypeHandle obj_handle =
+ handles_->NewHandle(class_linker->GetClassRoot(ClassLinker::kJavaLangObject));
+ null_constant->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(obj_handle, false /* is_exact */));
+ }
+
+ if ((return_replacement != nullptr)
+ && (return_replacement->GetType() == Primitive::kPrimNot)) {
+ if (!return_replacement->GetReferenceTypeInfo().IsValid()) {
+ // Make sure that we have a valid type for the return. We may get an invalid one when
+ // we inline invokes with multiple branches and create a Phi for the result.
+ // TODO: we could be more precise by merging the phi inputs but that requires
+ // some functionality from the reference type propagation.
+ DCHECK(return_replacement->IsPhi());
+ ReferenceTypeInfo::TypeHandle return_handle =
+ handles_->NewHandle(resolved_method->GetReturnType());
+ return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
+ return_handle, return_handle->IsFinal() /* is_exact */));
+ }
+ }
return true;
}