Revert "Revert "Run type propagation after inliner only when needed.""

This reverts commit 271743601650308c7ac5c7a3ec35025d8130a298.

Change-Id: I173e27a0a4d7d54f90ca459eb48d280d1d40ab70
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 505603a..2b77901 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -24,6 +24,7 @@
 #include "code_generator.h"
 #include "dead_code_elimination.h"
 #include "disassembler.h"
+#include "inliner.h"
 #include "licm.h"
 #include "nodes.h"
 #include "optimization.h"
@@ -422,11 +423,6 @@
     return strcmp(pass_name_, name) == 0;
   }
 
-  bool IsReferenceTypePropagationPass() {
-    return strstr(pass_name_, ReferenceTypePropagation::kReferenceTypePropagationPassName)
-        != nullptr;
-  }
-
   void PrintInstruction(HInstruction* instruction) {
     output_ << instruction->DebugName();
     if (instruction->InputCount() > 0) {
@@ -490,7 +486,8 @@
       } else {
         StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId();
       }
-    } else if (IsReferenceTypePropagationPass()
+    } else if ((IsPass(ReferenceTypePropagation::kReferenceTypePropagationPassName)
+        || IsPass(HInliner::kInlinerPassName))
         && (instruction->GetType() == Primitive::kPrimNot)) {
       ReferenceTypeInfo info = instruction->IsLoadClass()
         ? instruction->AsLoadClass()->GetLoadedClassRTI()
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index c9319f5..df2013b 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -534,6 +534,7 @@
             ReferenceTypeInfo::Create(obj_handle, false /* is_exact */));
   }
 
+  // Check the integrity of reference types and run another type propagation if needed.
   if ((return_replacement != nullptr)
       && (return_replacement->GetType() == Primitive::kPrimNot)) {
     if (!return_replacement->GetReferenceTypeInfo().IsValid()) {
@@ -544,10 +545,20 @@
       DCHECK(return_replacement->IsPhi());
       size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
       ReferenceTypeInfo::TypeHandle return_handle =
-        handles_->NewHandle(resolved_method->GetReturnType(true /* resolve */, pointer_size));
+          handles_->NewHandle(resolved_method->GetReturnType(true /* resolve */, pointer_size));
       return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
          return_handle, return_handle->CannotBeAssignedFromOtherTypes() /* is_exact */));
     }
+
+    // If the return type is a refinement of the declared type run the type propagation again.
+    ReferenceTypeInfo return_rti = return_replacement->GetReferenceTypeInfo();
+    ReferenceTypeInfo invoke_rti = invoke_instruction->GetReferenceTypeInfo();
+    if (invoke_rti.IsStrictSupertypeOf(return_rti)
+        || (return_rti.IsExact() && !invoke_rti.IsExact())
+        || !return_replacement->CanBeNull()) {
+      ReferenceTypePropagation rtp_fixup(graph_, handles_);
+      rtp_fixup.Run();
+    }
   }
 
   return true;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index a7bd31e..79b79d7 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1731,6 +1731,13 @@
     return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
   }
 
+  bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+    DCHECK(IsValid());
+    DCHECK(rti.IsValid());
+    return GetTypeHandle().Get() != rti.GetTypeHandle().Get() &&
+        GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
+  }
+
   // Returns true if the type information provide the same amount of details.
   // Note that it does not mean that the instructions have the same actual type
   // (because the type can be the result of a merge).
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 9c19b98..368a30e 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -405,20 +405,9 @@
   if (!should_inline) {
     return;
   }
-
-  ArenaAllocator* arena = graph->GetArena();
-  HInliner* inliner = new (arena) HInliner(
+  HInliner* inliner = new (graph->GetArena()) HInliner(
     graph, codegen, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
-  ReferenceTypePropagation* type_propagation =
-    new (arena) ReferenceTypePropagation(graph, handles,
-        "reference_type_propagation_after_inlining");
-
-  HOptimization* optimizations[] = {
-    inliner,
-    // Run another type propagation phase: inlining will open up more opportunities
-    // to remove checkcast/instanceof and null checks.
-    type_propagation,
-  };
+  HOptimization* optimizations[] = { inliner };
 
   RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
 }
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 659da06..ecc085b 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -99,17 +99,9 @@
   }
 }
 
-void ReferenceTypePropagation::Run() {
-  // To properly propagate type info we need to visit in the dominator-based order.
-  // Reverse post order guarantees a node's dominators are visited first.
-  // We take advantage of this order in `VisitBasicBlock`.
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    VisitBasicBlock(it.Current());
-  }
-  ProcessWorklist();
-
+void ReferenceTypePropagation::ValidateTypes() {
+  // TODO: move this to the graph checker.
   if (kIsDebugBuild) {
-    // TODO: move this to the graph checker.
     ScopedObjectAccess soa(Thread::Current());
     for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
       HBasicBlock* block = it.Current();
@@ -135,6 +127,18 @@
   }
 }
 
+void ReferenceTypePropagation::Run() {
+  // To properly propagate type info we need to visit in the dominator-based order.
+  // Reverse post order guarantees a node's dominators are visited first.
+  // We take advantage of this order in `VisitBasicBlock`.
+  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+    VisitBasicBlock(it.Current());
+  }
+
+  ProcessWorklist();
+  ValidateTypes();
+}
+
 void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
   RTPVisitor visitor(graph_,
                      handles_,
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 5493601..5c05592 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -56,6 +56,8 @@
   ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  void ValidateTypes();
+
   StackHandleScopeCollection* handles_;
 
   ArenaVector<HInstruction*> worklist_;