Revert "Run optimizations with baseline compilation."

This reverts commit 41c5dde40d1c75d36a7f984c8d72ec65fbff3111.

Reason for revert: breaks test.java.util.Arrays.Sorting

Change-Id: I03385c9f1efff4b8e8bd315827dde6ed774bbb52
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index c14d5d3..523a666 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -226,9 +226,5 @@
   return compiler_options_->IsBaseline();
 }
 
-uint32_t JitCompiler::GetInlineMaxCodeUnits() const {
-  return compiler_options_->GetInlineMaxCodeUnits();
-}
-
 }  // namespace jit
 }  // namespace art
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 66aa545..5a919fb 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -64,8 +64,6 @@
                                          bool compress,
                                          /*out*/ size_t* num_symbols) override;
 
-  uint32_t GetInlineMaxCodeUnits() const override;
-
  private:
   std::unique_ptr<CompilerOptions> compiler_options_;
   std::unique_ptr<Compiler> compiler_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 907fdc0..9027976 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4597,8 +4597,7 @@
   if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) {
     ProfilingInfo* info = GetGraph()->GetProfilingInfo();
     DCHECK(info != nullptr);
-    InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(
-        info, GetCompilerOptions(), instruction->AsInvoke());
+    InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
     if (cache != nullptr) {
       uint64_t address = reinterpret_cast64<uint64_t>(cache);
       vixl::aarch64::Label done;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 144be63..00c14b0 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3687,8 +3687,7 @@
   DCHECK_EQ(r0.GetCode(), klass.GetCode());
   if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) {
     ProfilingInfo* info = GetGraph()->GetProfilingInfo();
-    InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(
-        info, GetCompilerOptions(), instruction->AsInvoke());
+    InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
     if (cache != nullptr) {
       uint32_t address = reinterpret_cast32<uint32_t>(cache);
       vixl32::Label done;
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index 9627fd2..182c1d4 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -6724,8 +6724,7 @@
   if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) {
     ProfilingInfo* info = GetGraph()->GetProfilingInfo();
     DCHECK(info != nullptr);
-    InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(
-        info, GetCompilerOptions(), instruction->AsInvoke());
+    InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
     if (cache != nullptr) {
       uint64_t address = reinterpret_cast64<uint64_t>(cache);
       Riscv64Label done;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 95f4c73..71db5c9 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2848,8 +2848,7 @@
   if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) {
     ProfilingInfo* info = GetGraph()->GetProfilingInfo();
     DCHECK(info != nullptr);
-    InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(
-        info, GetCompilerOptions(), instruction->AsInvoke());
+    InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
     if (cache != nullptr) {
       uint32_t address = reinterpret_cast32<uint32_t>(cache);
       if (kIsDebugBuild) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 69fde66..9d01019 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -3141,8 +3141,7 @@
   if (ProfilingInfoBuilder::IsInlineCacheUseful(instruction->AsInvoke(), this)) {
     ProfilingInfo* info = GetGraph()->GetProfilingInfo();
     DCHECK(info != nullptr);
-    InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(
-        info, GetCompilerOptions(), instruction->AsInvoke());
+    InlineCache* cache = ProfilingInfoBuilder::GetInlineCache(info, instruction->AsInvoke());
     if (cache != nullptr) {
       uint64_t address = reinterpret_cast64<uint64_t>(cache);
       NearLabel done;
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index afbf941..b7f7a0f 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -867,8 +867,7 @@
     std::ostringstream oss;
     oss << pass_name_;
     if (!IsDebugDump()) {
-      oss << " (" << (GetGraph()->IsCompilingBaseline() ? "baseline " : "")
-          << (is_after_pass_ ? "after" : "before")
+      oss << " (" << (is_after_pass_ ? "after" : "before")
           << (graph_in_bad_state_ ? ", bad_state" : "") << ")";
     }
     PrintProperty("name", oss.str().c_str());
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d86e3b5..37fa318 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -37,7 +37,6 @@
 #include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
 #include "nodes.h"
-#include "profiling_info_builder.h"
 #include "reference_type_propagation.h"
 #include "register_allocator_linear_scan.h"
 #include "scoped_thread_state_change-inl.h"
@@ -520,15 +519,6 @@
     return result;
   }
 
-  if (graph_->IsCompilingBaseline()) {
-    LOG_FAIL_NO_STAT() << "Call to " << invoke_instruction->GetMethodReference().PrettyMethod()
-                       << " not inlined because we are compiling baseline and we could not"
-                       << " statically resolve the target";
-    // For baseline compilation, we will collect inline caches, so we should not
-    // try to inline using them.
-    return false;
-  }
-
   DCHECK(!invoke_instruction->IsInvokeStaticOrDirect());
 
   // No try catch inlining allowed here, or recursively. For try catch inlining we are banking on
@@ -679,36 +669,17 @@
   ArtMethod* caller = graph_->GetArtMethod();
   // Under JIT, we should always know the caller.
   DCHECK(caller != nullptr);
-
-  InlineCache* cache = nullptr;
-  // Start with the outer graph profiling info.
-  ProfilingInfo* profiling_info = outermost_graph_->GetProfilingInfo();
-  if (profiling_info != nullptr) {
-    if (depth_ == 0) {
-      cache = profiling_info->GetInlineCache(invoke_instruction->GetDexPc());
-    } else {
-      uint32_t dex_pc = ProfilingInfoBuilder::EncodeInlinedDexPc(
-          this, codegen_->GetCompilerOptions(), invoke_instruction);
-      if (dex_pc != kNoDexPc) {
-        cache = profiling_info->GetInlineCache(dex_pc);
-      }
-    }
+  ProfilingInfo* profiling_info = graph_->GetProfilingInfo();
+  if (profiling_info == nullptr) {
+    return kInlineCacheNoData;
   }
 
+  InlineCache* cache = profiling_info->GetInlineCache(invoke_instruction->GetDexPc());
   if (cache == nullptr) {
-    // Check the current graph profiling info.
-    profiling_info = graph_->GetProfilingInfo();
-    if (profiling_info == nullptr) {
-      return kInlineCacheNoData;
-    }
-
-    cache = profiling_info->GetInlineCache(invoke_instruction->GetDexPc());
-  }
-
-  if (cache == nullptr) {
-    // Either we never hit this invoke and we never compiled the callee,
-    // or the method wasn't resolved when we performed baseline compilation.
-    // Bail for now.
+    // This shouldn't happen, but we don't guarantee that method resolution
+    // between baseline compilation and optimizing compilation is identical. Be robust,
+    // warn about it, and return that we don't have any inline cache data.
+    LOG(WARNING) << "No inline cache found for " << caller->PrettyMethod();
     return kInlineCacheNoData;
   }
   Runtime::Current()->GetJit()->GetCodeCache()->CopyInlineCacheInto(*cache, classes);
@@ -734,12 +705,6 @@
 
   const ProfileCompilationInfo::InlineCacheMap* inline_caches = hotness.GetInlineCacheMap();
   DCHECK(inline_caches != nullptr);
-
-  // Inlined inline caches are not supported in AOT, so we use the dex pc directly, and don't
-  // call `InlineCache::EncodeDexPc`.
-  // To support it, we would need to ensure `inline_max_code_units` remain the
-  // same between dex2oat and runtime, for example by adding it to the boot
-  // image oat header.
   const auto it = inline_caches->find(invoke_instruction->GetDexPc());
   if (it == inline_caches->end()) {
     return kInlineCacheUninitialized;
@@ -2109,20 +2074,6 @@
             << " could not be inlined because it needs a BSS check";
         return false;
       }
-
-      if (outermost_graph_->IsCompilingBaseline() &&
-          (current->IsInvokeVirtual() || current->IsInvokeInterface()) &&
-          ProfilingInfoBuilder::IsInlineCacheUseful(current->AsInvoke(), codegen_)) {
-        uint32_t maximum_inlining_depth_for_baseline =
-            InlineCache::MaxDexPcEncodingDepth(
-                outermost_graph_->GetArtMethod(),
-                codegen_->GetCompilerOptions().GetInlineMaxCodeUnits());
-        if (depth_ + 1 > maximum_inlining_depth_for_baseline) {
-          LOG_FAIL_NO_STAT() << "Reached maximum depth for inlining in baseline compilation: "
-                             << depth_ << " for " << callee_graph->GetArtMethod()->PrettyMethod();
-          return false;
-        }
-      }
     }
   }
 
@@ -2234,7 +2185,6 @@
       // The current invoke is not a try block.
       !invoke_instruction->GetBlock()->IsTryBlock();
   RunOptimizations(callee_graph,
-                   invoke_instruction->GetEnvironment(),
                    code_item,
                    dex_compilation_unit,
                    try_catch_inlining_allowed_for_recursive_inline);
@@ -2274,7 +2224,6 @@
 }
 
 void HInliner::RunOptimizations(HGraph* callee_graph,
-                                HEnvironment* caller_environment,
                                 const dex::CodeItem* code_item,
                                 const DexCompilationUnit& dex_compilation_unit,
                                 bool try_catch_inlining_allowed_for_recursive_inline) {
@@ -2323,7 +2272,6 @@
                    total_number_of_dex_registers_ + accessor.RegistersSize(),
                    total_number_of_instructions_ + number_of_instructions,
                    this,
-                   caller_environment,
                    depth_ + 1,
                    try_catch_inlining_allowed_for_recursive_inline);
   inliner.Run();
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index a001404..af067da 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -43,7 +43,6 @@
            size_t total_number_of_dex_registers,
            size_t total_number_of_instructions,
            HInliner* parent,
-           HEnvironment* caller_environment,
            size_t depth,
            bool try_catch_inlining_allowed,
            const char* name = kInlinerPassName)
@@ -55,7 +54,6 @@
         total_number_of_dex_registers_(total_number_of_dex_registers),
         total_number_of_instructions_(total_number_of_instructions),
         parent_(parent),
-        caller_environment_(caller_environment),
         depth_(depth),
         inlining_budget_(0),
         try_catch_inlining_allowed_(try_catch_inlining_allowed),
@@ -65,12 +63,6 @@
 
   static constexpr const char* kInlinerPassName = "inliner";
 
-  const HInliner* GetParent() const { return parent_; }
-  const HEnvironment* GetCallerEnvironment() const { return caller_environment_; }
-
-  const HGraph* GetOutermostGraph() const { return outermost_graph_; }
-  const HGraph* GetGraph() const { return graph_; }
-
  private:
   enum InlineCacheType {
     kInlineCacheNoData = 0,
@@ -116,7 +108,6 @@
 
   // Run simple optimizations on `callee_graph`.
   void RunOptimizations(HGraph* callee_graph,
-                        HEnvironment* caller_environment,
                         const dex::CodeItem* code_item,
                         const DexCompilationUnit& dex_compilation_unit,
                         bool try_catch_inlining_allowed_for_recursive_inline)
@@ -329,10 +320,9 @@
   const size_t total_number_of_dex_registers_;
   size_t total_number_of_instructions_;
 
-  // The 'parent' inliner, that means the inlining optimization that requested
+  // The 'parent' inliner, that means the inlinigng optimization that requested
   // `graph_` to be inlined.
   const HInliner* const parent_;
-  const HEnvironment* const caller_environment_;
   const size_t depth_;
 
   // The budget left for inlining, in number of instructions.
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index a4df48c..16045d4 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -257,7 +257,6 @@
                                        accessor.RegistersSize(),
                                        /* total_number_of_instructions= */ 0,
                                        /* parent= */ nullptr,
-                                       /* caller_environment= */ nullptr,
                                        /* depth= */ 0,
                                        /* try_catch_inlining_allowed= */ true,
                                        pass_name);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 5868f22..d458462 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -368,10 +368,10 @@
                             const DexCompilationUnit& dex_compilation_unit,
                             PassObserver* pass_observer) const;
 
-  bool RunRequiredPasses(HGraph* graph,
-                         CodeGenerator* codegen,
-                         const DexCompilationUnit& dex_compilation_unit,
-                         PassObserver* pass_observer) const;
+  bool RunBaselineOptimizations(HGraph* graph,
+                                CodeGenerator* codegen,
+                                const DexCompilationUnit& dex_compilation_unit,
+                                PassObserver* pass_observer) const;
 
   std::vector<uint8_t> GenerateJitDebugInfo(const debug::MethodDebugInfo& method_debug_info);
 
@@ -444,10 +444,10 @@
          instruction_set == InstructionSet::kX86_64;
 }
 
-bool OptimizingCompiler::RunRequiredPasses(HGraph* graph,
-                                           CodeGenerator* codegen,
-                                           const DexCompilationUnit& dex_compilation_unit,
-                                           PassObserver* pass_observer) const {
+bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
+                                                  CodeGenerator* codegen,
+                                                  const DexCompilationUnit& dex_compilation_unit,
+                                                  PassObserver* pass_observer) const {
   switch (codegen->GetCompilerOptions().GetInstructionSet()) {
 #if defined(ART_ENABLE_CODEGEN_arm)
     case InstructionSet::kThumb2:
@@ -904,15 +904,21 @@
     }
   }
 
-  if (compilation_kind == CompilationKind::kBaseline && compiler_options.ProfileBranches()) {
-    // Branch profiling currently doesn't support running optimizations.
-    RunRequiredPasses(graph, codegen.get(), dex_compilation_unit, &pass_observer);
+  if (compilation_kind == CompilationKind::kBaseline) {
+    RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
   } else {
     RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer);
     PassScope scope(WriteBarrierElimination::kWBEPassName, &pass_observer);
     WriteBarrierElimination(graph, compilation_stats_.get()).Run();
   }
 
+  RegisterAllocator::Strategy regalloc_strategy =
+    compiler_options.GetRegisterAllocationStrategy();
+  AllocateRegisters(graph,
+                    codegen.get(),
+                    &pass_observer,
+                    regalloc_strategy,
+                    compilation_stats_.get());
   // If we are compiling baseline and we haven't created a profiling info for
   // this method already, do it now.
   if (jit != nullptr &&
@@ -929,14 +935,6 @@
     }
   }
 
-  RegisterAllocator::Strategy regalloc_strategy =
-    compiler_options.GetRegisterAllocationStrategy();
-  AllocateRegisters(graph,
-                    codegen.get(),
-                    &pass_observer,
-                    regalloc_strategy,
-                    compilation_stats_.get());
-
   codegen->Compile();
   pass_observer.DumpDisassembly();
 
diff --git a/compiler/optimizing/profiling_info_builder.cc b/compiler/optimizing/profiling_info_builder.cc
index 19795f5..7888753 100644
--- a/compiler/optimizing/profiling_info_builder.cc
+++ b/compiler/optimizing/profiling_info_builder.cc
@@ -20,7 +20,6 @@
 #include "code_generator.h"
 #include "driver/compiler_options.h"
 #include "dex/code_item_accessors-inl.h"
-#include "inliner.h"
 #include "jit/profiling_info.h"
 #include "optimizing_compiler_stats.h"
 #include "scoped_thread_state_change-inl.h"
@@ -43,53 +42,10 @@
       ProfilingInfo::Create(soa.Self(), GetGraph()->GetArtMethod(), inline_caches_));
 }
 
-
-uint32_t ProfilingInfoBuilder::EncodeInlinedDexPc(const HInliner* inliner,
-                                                  const CompilerOptions& compiler_options,
-                                                  HInvoke* invoke) {
-  DCHECK(inliner->GetCallerEnvironment() != nullptr);
-  DCHECK(inliner->GetParent() != nullptr);
-  std::vector<uint32_t> temp_vector;
-  temp_vector.push_back(invoke->GetDexPc());
-  while (inliner->GetCallerEnvironment() != nullptr) {
-    temp_vector.push_back(inliner->GetCallerEnvironment()->GetDexPc());
-    inliner = inliner->GetParent();
-  }
-
-  DCHECK_EQ(inliner->GetOutermostGraph(), inliner->GetGraph());
-  return InlineCache::EncodeDexPc(
-      inliner->GetOutermostGraph()->GetArtMethod(),
-      temp_vector,
-      compiler_options.GetInlineMaxCodeUnits());
-}
-
-static uint32_t EncodeDexPc(HInvoke* invoke, const CompilerOptions& compiler_options) {
-  std::vector<uint32_t> dex_pcs;
-  ArtMethod* outer_method = nullptr;
-  for (HEnvironment* environment = invoke->GetEnvironment();
-       environment != nullptr;
-       environment = environment->GetParent()) {
-    outer_method = environment->GetMethod();
-    dex_pcs.push_back(environment->GetDexPc());
-  }
-
-  ScopedObjectAccess soa(Thread::Current());
-  return InlineCache::EncodeDexPc(
-      outer_method,
-      dex_pcs,
-      compiler_options.GetInlineMaxCodeUnits());
-}
-
 void ProfilingInfoBuilder::HandleInvoke(HInvoke* invoke) {
+  DCHECK(!invoke->GetEnvironment()->IsFromInlinedInvoke());
   if (IsInlineCacheUseful(invoke, codegen_)) {
-    uint32_t dex_pc = EncodeDexPc(invoke, compiler_options_);
-    if (dex_pc != kNoDexPc) {
-      inline_caches_.push_back(dex_pc);
-    } else {
-      ScopedObjectAccess soa(Thread::Current());
-      LOG(WARNING) << "Could not encode dex pc for "
-                   << invoke->GetResolvedMethod()->PrettyMethod();
-    }
+    inline_caches_.push_back(invoke->GetDexPc());
   }
 }
 
@@ -125,15 +81,10 @@
   return true;
 }
 
-InlineCache* ProfilingInfoBuilder::GetInlineCache(ProfilingInfo* info,
-                                                  const CompilerOptions& compiler_options,
-                                                  HInvoke* instruction) {
+InlineCache* ProfilingInfoBuilder::GetInlineCache(ProfilingInfo* info, HInvoke* instruction) {
+  DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
   ScopedObjectAccess soa(Thread::Current());
-  uint32_t dex_pc = EncodeDexPc(instruction, compiler_options);
-  if (dex_pc == kNoDexPc) {
-    return nullptr;
-  }
-  return info->GetInlineCache(dex_pc);
+  return info->GetInlineCache(instruction->GetDexPc());
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/profiling_info_builder.h b/compiler/optimizing/profiling_info_builder.h
index c8dc59a..2185b0e 100644
--- a/compiler/optimizing/profiling_info_builder.h
+++ b/compiler/optimizing/profiling_info_builder.h
@@ -24,7 +24,6 @@
 
 class CodeGenerator;
 class CompilerOptions;
-class HInliner;
 class InlineCache;
 class ProfilingInfo;
 
@@ -43,13 +42,8 @@
   static constexpr const char* kProfilingInfoBuilderPassName =
       "profiling_info_builder";
 
-  static InlineCache* GetInlineCache(ProfilingInfo* info,
-                                     const CompilerOptions& compiler_options,
-                                     HInvoke* invoke);
+  static InlineCache* GetInlineCache(ProfilingInfo* info, HInvoke* invoke);
   static bool IsInlineCacheUseful(HInvoke* invoke, CodeGenerator* codegen);
-  static uint32_t EncodeInlinedDexPc(
-      const HInliner* inliner, const CompilerOptions& compiler_options, HInvoke* invoke)
-      REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   void VisitInvokeVirtual(HInvokeVirtual* invoke) override;
@@ -58,7 +52,7 @@
   void HandleInvoke(HInvoke* invoke);
 
   CodeGenerator* codegen_;
-  const CompilerOptions& compiler_options_;
+  [[maybe_unused]] const CompilerOptions& compiler_options_;
   std::vector<uint32_t> inline_caches_;
 
   DISALLOW_COPY_AND_ASSIGN(ProfilingInfoBuilder);
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 2acbbf7..d5fcd35 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -197,7 +197,6 @@
   virtual void ParseCompilerOptions() = 0;
   virtual bool IsBaselineCompiler() const = 0;
   virtual void SetDebuggableCompilerOption(bool value) = 0;
-  virtual uint32_t GetInlineMaxCodeUnits() const = 0;
 
   virtual std::vector<uint8_t> PackElfFileForJIT(ArrayRef<const JITCodeEntry*> elf_files,
                                                  ArrayRef<const void*> removed_symbols,
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 1aaf53e..2e94a41 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -114,9 +114,6 @@
 
 void ProfilingInfo::AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls) {
   InlineCache* cache = GetInlineCache(dex_pc);
-  if (cache == nullptr) {
-    return;
-  }
   for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
     mirror::Class* existing = cache->classes_[i].Read<kWithoutReadBarrier>();
     mirror::Class* marked = ReadBarrier::IsMarked(existing);
@@ -162,39 +159,4 @@
   }
 }
 
-uint32_t InlineCache::EncodeDexPc(ArtMethod* method,
-                                  const std::vector<uint32_t>& dex_pcs,
-                                  uint32_t inline_max_code_units) {
-  if (kIsDebugBuild) {
-    // Make sure `inline_max_code_units` is always the same.
-    static uint32_t global_max_code_units = inline_max_code_units;
-    CHECK_EQ(global_max_code_units, inline_max_code_units);
-  }
-  if (dex_pcs.size() - 1 > MaxDexPcEncodingDepth(method, inline_max_code_units)) {
-    return -1;
-  }
-  uint32_t size = dex_pcs.size();
-  uint32_t insns_size = method->DexInstructions().InsnsSizeInCodeUnits();
-
-  uint32_t dex_pc = dex_pcs[size - 1];
-  uint32_t shift = MinimumBitsToStore(insns_size - 1);
-  for (uint32_t i = size - 1; i > 0; --i) {
-    DCHECK_LT(shift, BitSizeOf<uint32_t>());
-    dex_pc += ((dex_pcs[i - 1] + 1) << shift);
-    shift += MinimumBitsToStore(inline_max_code_units);
-  }
-  return dex_pc;
-}
-
-uint32_t InlineCache::MaxDexPcEncodingDepth(ArtMethod* method, uint32_t inline_max_code_units) {
-  uint32_t insns_size = method->DexInstructions().InsnsSizeInCodeUnits();
-  uint32_t num_bits = MinimumBitsToStore(insns_size - 1);
-  uint32_t depth = 0;
-  do {
-    depth++;
-    num_bits += MinimumBitsToStore(inline_max_code_units);
-  } while (num_bits <= BitSizeOf<uint32_t>());
-  return depth - 1;
-}
-
 }  // namespace art
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 4ce20ff..62b431d 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -28,7 +28,6 @@
 namespace art {
 
 class ArtMethod;
-class CompilerOptions;
 class ProfilingInfo;
 
 namespace jit {
@@ -51,18 +50,6 @@
     return MemberOffset(OFFSETOF_MEMBER(InlineCache, classes_));
   }
 
-  // Encode the list of `dex_pcs` to fit into an uint32_t.
-  static uint32_t EncodeDexPc(ArtMethod* method,
-                              const std::vector<uint32_t>& dex_pcs,
-                              uint32_t inline_max_code_units)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Return the maximum inlining depth that we support to encode a list of dex
-  // pcs.
-  static uint32_t MaxDexPcEncodingDepth(ArtMethod* method,
-                                        uint32_t inline_max_code_units)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
  private:
   uint32_t dex_pc_;
   GcRoot<mirror::Class> classes_[kIndividualCacheSize];
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 86a2dda..590a596 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -407,10 +407,6 @@
     return bottom_shadow_frame_;
   }
 
-  const std::vector<uint32_t>& GetDexPcs() const {
-    return dex_pcs_;
-  }
-
   void FinishStackWalk() REQUIRES_SHARED(Locks::mutator_lock_) {
     // This is the upcall, or the next full frame in single-frame deopt, or the
     // code isn't deoptimizeable. We remember the frame and last pc so that we
@@ -519,14 +515,11 @@
       }
       prev_shadow_frame_ = new_frame;
 
-      if (single_frame_deopt_) {
-        dex_pcs_.push_back(GetDexPc());
-        if (!IsInInlinedFrame()) {
-          // Single-frame deopt ends at the first non-inlined frame and needs to store that method.
-          single_frame_done_ = true;
-          single_frame_deopt_method_ = method;
-          single_frame_deopt_quick_method_header_ = GetCurrentOatQuickMethodHeader();
-        }
+      if (single_frame_deopt_ && !IsInInlinedFrame()) {
+        // Single-frame deopt ends at the first non-inlined frame and needs to store that method.
+        single_frame_done_ = true;
+        single_frame_deopt_method_ = method;
+        single_frame_deopt_quick_method_header_ = GetCurrentOatQuickMethodHeader();
       }
       callee_method_ = method;
       return true;
@@ -666,7 +659,6 @@
   // a deopt after running method exit callbacks if the callback throws or requests events that
   // need a deopt.
   bool skip_method_exit_callbacks_;
-  std::vector<uint32_t> dex_pcs_;
 
   DISALLOW_COPY_AND_ASSIGN(DeoptimizeStackVisitor);
 };
@@ -747,26 +739,11 @@
       case Instruction::INVOKE_VIRTUAL:
       case Instruction::INVOKE_INTERFACE_RANGE:
       case Instruction::INVOKE_VIRTUAL_RANGE: {
-        uint32_t encoded_dex_pc = InlineCache::EncodeDexPc(
-            visitor.GetSingleFrameDeoptMethod(),
-            visitor.GetDexPcs(),
-            runtime->GetJit()->GetJitCompiler()->GetInlineMaxCodeUnits());
-        if (encoded_dex_pc != static_cast<uint32_t>(-1)) {
-          // The inline cache comes from the top-level method.
-          runtime->GetJit()->GetCodeCache()->MaybeUpdateInlineCache(
-              visitor.GetSingleFrameDeoptMethod(),
-              encoded_dex_pc,
-              shadow_frame->GetVRegReference(inst->VRegC())->GetClass(),
-              self_);
-        } else {
-          // If the top-level inline cache did not exist, update the one for the
-          // bottom method, we know it's the one that was used for compilation.
-          runtime->GetJit()->GetCodeCache()->MaybeUpdateInlineCache(
-              shadow_frame->GetMethod(),
-              dex_pc,
-              shadow_frame->GetVRegReference(inst->VRegC())->GetClass(),
-              self_);
-        }
+        runtime->GetJit()->GetCodeCache()->MaybeUpdateInlineCache(
+            shadow_frame->GetMethod(),
+            dex_pc,
+            shadow_frame->GetVRegReference(inst->VRegC())->GetClass(),
+            self_);
         break;
       }
       default: {