Cache method lowering info in mir graph.

This should enable easy inlining checks. It should also
improve compilation time of methods that call the same
methods over and over - it is exactly such methods that
tend to exceed our 100ms time limit.

Change-Id: If01cd18e039071a74a1444570283c153429c9cd4
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index bd7c40b..1a90ca8 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -37,6 +37,20 @@
 };
 
 /**
+ * @class CacheMethodLoweringInfo
+ * @brief Cache the lowering info for methods called by INVOKEs.
+ */
+class CacheMethodLoweringInfo : public Pass {
+ public:
+  CacheMethodLoweringInfo() : Pass("CacheMethodLoweringInfo", kNoNodes) {
+  }
+
+  void Start(CompilationUnit* cUnit) const {
+    cUnit->mir_graph->DoCacheMethodLoweringInfo();
+  }
+};
+
+/**
  * @class CodeLayout
  * @brief Perform the code layout pass.
  */
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 5314bb7..b96c40d 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -19,6 +19,7 @@
 #include "dataflow_iterator-inl.h"
 #include "dex_instruction.h"
 #include "dex_instruction-inl.h"
+#include "dex/verified_method.h"
 #include "dex/quick/dex_file_method_inliner.h"
 #include "dex/quick/dex_file_to_method_inliner_map.h"
 #include "driver/compiler_options.h"
@@ -1168,6 +1169,121 @@
   }
 }
 
+void MIRGraph::DoCacheMethodLoweringInfo() {
+  static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
+
+  // Embed the map value in the entry to avoid extra padding in 64-bit builds.
+  struct MapEntry {
+    // Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
+    const MethodReference* devirt_target;
+    uint16_t target_method_idx;
+    uint16_t invoke_type;
+    // Map value.
+    uint32_t lowering_info_index;
+  };
+
+  // Sort INVOKEs by method index, then by opcode, then by devirtualization target.
+  struct MapEntryComparator {
+    bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
+      if (lhs.target_method_idx != rhs.target_method_idx) {
+        return lhs.target_method_idx < rhs.target_method_idx;
+      }
+      if (lhs.invoke_type != rhs.invoke_type) {
+        return lhs.invoke_type < rhs.invoke_type;
+      }
+      if (lhs.devirt_target != rhs.devirt_target) {
+        if (lhs.devirt_target == nullptr) {
+          return true;
+        }
+        if (rhs.devirt_target == nullptr) {
+          return false;
+        }
+        return devirt_cmp(*lhs.devirt_target, *rhs.devirt_target);
+      }
+      return false;
+    }
+    MethodReferenceComparator devirt_cmp;
+  };
+
+  // Map invoke key (see MapEntry) to lowering info index.
+  typedef std::set<MapEntry, MapEntryComparator, ScopedArenaAllocatorAdapter<MapEntry> > InvokeMap;
+
+  ScopedArenaAllocator allocator(&cu_->arena_stack);
+
+  // All INVOKE instructions take 3 code units and there must also be a RETURN.
+  uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 3u;
+
+  // The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
+  // multi_index_container with one ordered index and one sequential index.
+  InvokeMap invoke_map(MapEntryComparator(), allocator.Adapter());
+  const MapEntry** sequential_entries = reinterpret_cast<const MapEntry**>(
+      allocator.Alloc(max_refs * sizeof(sequential_entries[0]), kArenaAllocMisc));
+
+  // Find INVOKE insns and their devirtualization targets.
+  AllNodesIterator iter(this);
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+    if (bb->block_type != kDalvikByteCode) {
+      continue;
+    }
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+      if (mir->dalvikInsn.opcode >= Instruction::INVOKE_VIRTUAL &&
+          mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
+          mir->dalvikInsn.opcode != Instruction::RETURN_VOID_BARRIER) {
+        // Decode target method index and invoke type.
+        const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
+        uint16_t target_method_idx;
+        uint16_t invoke_type_idx;
+        if (mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE) {
+          target_method_idx = insn->VRegB_35c();
+          invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL;
+        } else {
+          target_method_idx = insn->VRegB_3rc();
+          invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL_RANGE;
+        }
+
+        // Find devirtualization target.
+        // TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
+        // ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
+        // and increment it as needed instead of making O(log n) lookups.
+        const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
+        const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
+
+        // Try to insert a new entry. If the insertion fails, we will have found an old one.
+        MapEntry entry = {
+            devirt_target,
+            target_method_idx,
+            invoke_types[invoke_type_idx],
+            static_cast<uint32_t>(invoke_map.size())
+        };
+        auto it = invoke_map.insert(entry).first;  // Iterator to either the old or the new entry.
+        mir->meta.method_lowering_info = it->lowering_info_index;
+        // If we didn't actually insert, this will just overwrite an existing value with the same.
+        sequential_entries[it->lowering_info_index] = &*it;
+      }
+    }
+  }
+
+  if (invoke_map.empty()) {
+    return;
+  }
+
+  // Prepare unique method infos, set method info indexes for their MIRs.
+  DCHECK_EQ(method_lowering_infos_.Size(), 0u);
+  const size_t count = invoke_map.size();
+  method_lowering_infos_.Resize(count);
+  for (size_t pos = 0u; pos != count; ++pos) {
+    const MapEntry* entry = sequential_entries[pos];
+    MirMethodLoweringInfo method_info(entry->target_method_idx,
+                                      static_cast<InvokeType>(entry->invoke_type));
+    if (entry->devirt_target != nullptr) {
+      method_info.SetDevirtualizationTarget(*entry->devirt_target);
+    }
+    method_lowering_infos_.Insert(method_info);
+  }
+  MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
+                                 method_lowering_infos_.GetRawStorage(), count);
+}
+
 bool MIRGraph::SkipCompilation(const std::string& methodname) {
   return cu_->compiler_driver->SkipCompilation(methodname);
 }
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 868730f..0b50e2f 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -89,7 +89,8 @@
       max_available_non_special_compiler_temps_(0),
       punt_to_interpreter_(false),
       ifield_lowering_infos_(arena, 0u),
-      sfield_lowering_infos_(arena, 0u) {
+      sfield_lowering_infos_(arena, 0u),
+      method_lowering_infos_(arena, 0u) {
   try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
   max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg))
       - std::abs(static_cast<int>(kVRegTempBaseReg));
@@ -1176,6 +1177,7 @@
   info->is_range = is_range;
   info->index = mir->dalvikInsn.vB;
   info->offset = mir->offset;
+  info->mir = mir;
   return info;
 }
 
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 94b3816..8a33414 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -20,8 +20,9 @@
 #include "dex_file.h"
 #include "dex_instruction.h"
 #include "compiler_ir.h"
-#include "mir_field_info.h"
 #include "invoke_type.h"
+#include "mir_field_info.h"
+#include "mir_method_info.h"
 #include "utils/arena_bit_vector.h"
 #include "utils/growable_array.h"
 #include "reg_storage.h"
@@ -267,6 +268,8 @@
     // SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
     // the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
     uint32_t sfield_lowering_info;
+    // INVOKE data index, points to MIRGraph::method_lowering_infos_.
+    uint32_t method_lowering_info;
   } meta;
 };
 
@@ -365,6 +368,7 @@
   bool skip_this;
   bool is_range;
   DexOffset offset;      // Offset in code units.
+  MIR* mir;
 };
 
 
@@ -491,6 +495,13 @@
     return sfield_lowering_infos_.GetRawStorage()[mir->meta.sfield_lowering_info];
   }
 
+  void DoCacheMethodLoweringInfo();
+
+  const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) {
+    DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.Size());
+    return method_lowering_infos_.GetRawStorage()[mir->meta.method_lowering_info];
+  }
+
   void InitRegLocations();
 
   void RemapRegLocations();
@@ -950,6 +961,7 @@
   bool punt_to_interpreter_;                    // Difficult or not worthwhile - just interpret.
   GrowableArray<MirIFieldLoweringInfo> ifield_lowering_infos_;
   GrowableArray<MirSFieldLoweringInfo> sfield_lowering_infos_;
+  GrowableArray<MirMethodLoweringInfo> method_lowering_infos_;
 
   friend class LocalValueNumberingTest;
 };
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
new file mode 100644
index 0000000..4580e76
--- /dev/null
+++ b/compiler/dex/mir_method_info.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+# include "mir_method_info.h"
+
+#include "driver/compiler_driver.h"
+#include "driver/dex_compilation_unit.h"
+#include "driver/compiler_driver-inl.h"
+#include "mirror/class_loader.h"  // Only to allow casts in SirtRef<ClassLoader>.
+#include "mirror/dex_cache.h"     // Only to allow casts in SirtRef<DexCache>.
+#include "scoped_thread_state_change.h"
+#include "sirt_ref.h"
+
+namespace art {
+
+void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver,
+                                    const DexCompilationUnit* mUnit,
+                                    MirMethodLoweringInfo* method_infos, size_t count) {
+  if (kIsDebugBuild) {
+    DCHECK(method_infos != nullptr);
+    DCHECK_NE(count, 0u);
+    for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
+      MirMethodLoweringInfo unresolved(it->MethodIndex(), it->GetInvokeType());
+      if (it->target_dex_file_ != nullptr) {
+        unresolved.target_dex_file_ = it->target_dex_file_;
+        unresolved.target_method_idx_ = it->target_method_idx_;
+      }
+      DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+    }
+  }
+
+  // We're going to resolve methods and check access in a tight loop. It's better to hold
+  // the lock and needed references once than re-acquiring them again and again.
+  ScopedObjectAccess soa(Thread::Current());
+  SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
+  SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+      compiler_driver->GetClassLoader(soa, mUnit));
+  SirtRef<mirror::Class> referrer_class(soa.Self(),
+      compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+  // Even if the referrer class is unresolved (i.e. we're compiling a method without class
+  // definition) we still want to resolve methods and record all available info.
+
+  for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
+    // Remember devirtualized invoke target and set the called method to the default.
+    MethodReference devirt_ref(it->target_dex_file_, it->target_method_idx_);
+    MethodReference* devirt_target = (it->target_dex_file_ != nullptr) ? &devirt_ref : nullptr;
+    it->target_dex_file_ = mUnit->GetDexFile();
+    it->target_method_idx_ = it->MethodIndex();
+
+    InvokeType invoke_type = it->GetInvokeType();
+    mirror::ArtMethod* resolved_method =
+        compiler_driver->ResolveMethod(soa, dex_cache, class_loader, mUnit, it->MethodIndex(),
+                                       invoke_type);
+    if (UNLIKELY(resolved_method == nullptr)) {
+      continue;
+    }
+    compiler_driver->GetResolvedMethodDexFileLocation(resolved_method,
+        &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_method_idx_);
+    it->vtable_idx_ = compiler_driver->GetResolvedMethodVTableIndex(resolved_method, invoke_type);
+
+    MethodReference target_method(mUnit->GetDexFile(), it->MethodIndex());
+    int fast_path_flags = compiler_driver->IsFastInvoke(
+        soa, dex_cache, class_loader, mUnit, referrer_class.get(), resolved_method, &invoke_type,
+        &target_method, devirt_target, &it->direct_code_, &it->direct_method_);
+    uint16_t other_flags = it->flags_ & ~kFlagFastPath & ~(kInvokeTypeMask << kBitSharpTypeBegin);
+    it->flags_ = other_flags |
+        (fast_path_flags != 0 ? kFlagFastPath : 0u) |
+        (static_cast<uint16_t>(invoke_type) << kBitSharpTypeBegin);
+    it->target_dex_file_ = target_method.dex_file;
+    it->target_method_idx_ = target_method.dex_method_index;
+    it->stats_flags_ = fast_path_flags;
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
new file mode 100644
index 0000000..a43238c
--- /dev/null
+++ b/compiler/dex/mir_method_info.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_MIR_METHOD_INFO_H_
+#define ART_COMPILER_DEX_MIR_METHOD_INFO_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "invoke_type.h"
+#include "method_reference.h"
+
+namespace art {
+
+class CompilerDriver;
+class DexCompilationUnit;
+class DexFile;
+
+class MirMethodInfo {
+ public:
+  uint16_t MethodIndex() const {
+    return method_idx_;
+  }
+
+  bool IsStatic() const {
+    return (flags_ & kFlagIsStatic) != 0u;
+  }
+
+  bool IsResolved() const {
+    return declaring_dex_file_ != nullptr;
+  }
+
+  const DexFile* DeclaringDexFile() const {
+    return declaring_dex_file_;
+  }
+
+  uint16_t DeclaringClassIndex() const {
+    return declaring_class_idx_;
+  }
+
+  uint16_t DeclaringMethodIndex() const {
+    return declaring_method_idx_;
+  }
+
+ protected:
+  enum {
+    kBitIsStatic = 0,
+    kMethodInfoBitEnd
+  };
+  COMPILE_ASSERT(kMethodInfoBitEnd <= 16, too_many_flags);
+  static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
+
+  MirMethodInfo(uint16_t method_idx, uint16_t flags)
+      : method_idx_(method_idx),
+        flags_(flags),
+        declaring_method_idx_(0u),
+        declaring_class_idx_(0u),
+        declaring_dex_file_(nullptr) {
+  }
+
+  // Make copy-ctor/assign/dtor protected to avoid slicing.
+  MirMethodInfo(const MirMethodInfo& other) = default;
+  MirMethodInfo& operator=(const MirMethodInfo& other) = default;
+  ~MirMethodInfo() = default;
+
+  // The method index in the compiling method's dex file.
+  uint16_t method_idx_;
+  // Flags, for volatility and derived class data.
+  uint16_t flags_;
+  // The method index in the dex file that defines the method, 0 if unresolved.
+  uint16_t declaring_method_idx_;
+  // The type index of the class declaring the method, 0 if unresolved.
+  uint16_t declaring_class_idx_;
+  // The dex file that defines the class containing the method and the method,
+  // nullptr if unresolved.
+  const DexFile* declaring_dex_file_;
+};
+
+class MirMethodLoweringInfo : public MirMethodInfo {
+ public:
+  // For each requested method retrieve the method's declaring location (dex file, class
+  // index and method index) and compute whether we can fast path the method call. For fast
+  // path methods, retrieve the method's vtable index and direct code and method when applicable.
+  static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
+                      MirMethodLoweringInfo* method_infos, size_t count)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+  MirMethodLoweringInfo(uint16_t method_idx, InvokeType type)
+      : MirMethodInfo(method_idx,
+                      ((type == kStatic) ? kFlagIsStatic : 0u) |
+                      (static_cast<uint16_t>(type) << kBitInvokeTypeBegin) |
+                      (static_cast<uint16_t>(type) << kBitSharpTypeBegin)),
+        direct_code_(0u),
+        direct_method_(0u),
+        target_dex_file_(nullptr),
+        target_method_idx_(0u),
+        vtable_idx_(0u),
+        stats_flags_(0) {
+  }
+
+  void SetDevirtualizationTarget(const MethodReference& ref) {
+    DCHECK(target_dex_file_ == nullptr);
+    DCHECK_EQ(target_method_idx_, 0u);
+    DCHECK_LE(ref.dex_method_index, 0xffffu);
+    target_dex_file_ = ref.dex_file;
+    target_method_idx_ = ref.dex_method_index;
+  }
+
+  bool FastPath() const {
+    return (flags_ & kFlagFastPath) != 0u;
+  }
+
+  InvokeType GetInvokeType() const {
+    return static_cast<InvokeType>((flags_ >> kBitInvokeTypeBegin) & kInvokeTypeMask);
+  }
+
+  art::InvokeType GetSharpType() const {
+    return static_cast<InvokeType>((flags_ >> kBitSharpTypeBegin) & kInvokeTypeMask);
+  }
+
+  MethodReference GetTargetMethod() const {
+    return MethodReference(target_dex_file_, target_method_idx_);
+  }
+
+  uint16_t VTableIndex() const {
+    return vtable_idx_;
+  }
+
+  uintptr_t DirectCode() const {
+    return direct_code_;
+  }
+
+  uintptr_t DirectMethod() const {
+    return direct_method_;
+  }
+
+  int StatsFlags() const {
+    return stats_flags_;
+  }
+
+ private:
+  enum {
+    kBitFastPath = kMethodInfoBitEnd,
+    kBitInvokeTypeBegin,
+    kBitInvokeTypeEnd = kBitInvokeTypeBegin + 3,  // 3 bits for invoke type.
+    kBitSharpTypeBegin,
+    kBitSharpTypeEnd = kBitSharpTypeBegin + 3,  // 3 bits for sharp type.
+    kMethodLoweringInfoEnd = kBitSharpTypeEnd
+  };
+  COMPILE_ASSERT(kMethodLoweringInfoEnd <= 16, too_many_flags);
+  static constexpr uint16_t kFlagFastPath = 1u << kBitFastPath;
+  static constexpr uint16_t kInvokeTypeMask = 7u;
+  COMPILE_ASSERT((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask,
+                 assert_invoke_type_bits_ok);
+  COMPILE_ASSERT((1u << (kBitSharpTypeEnd - kBitSharpTypeBegin)) - 1u == kInvokeTypeMask,
+                 assert_sharp_type_bits_ok);
+
+  uintptr_t direct_code_;
+  uintptr_t direct_method_;
+  // Before Resolve(), target_dex_file_ and target_method_idx_ hold the verification-based
+  // devirtualized invoke target if available, nullptr and 0u otherwise.
+  // After Resolve() they hold the actual target method that will be called; it will be either
+  // a devirtualized target method or the compilation's unit's dex file and MethodIndex().
+  const DexFile* target_dex_file_;
+  uint16_t target_method_idx_;
+  uint16_t vtable_idx_;
+  int stats_flags_;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_MIR_METHOD_INFO_H_
diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc
index 256bcb1..291012f 100644
--- a/compiler/dex/pass_driver.cc
+++ b/compiler/dex/pass_driver.cc
@@ -92,6 +92,7 @@
    */
   static const Pass* const passes[] = {
       GetPassInstance<CacheFieldLoweringInfo>(),
+      GetPassInstance<CacheMethodLoweringInfo>(),
       GetPassInstance<CodeLayout>(),
       GetPassInstance<SSATransformation>(),
       GetPassInstance<ConstantPropagation>(),
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 2f017c8..424cdd6 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -573,33 +573,32 @@
 static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
                                 int state,
                                 const MethodReference& target_method,
-                                uint32_t method_idx,
-                                uintptr_t unused, uintptr_t unused2,
-                                InvokeType unused3) {
+                                uint32_t unused, uintptr_t unused2,
+                                uintptr_t unused3, InvokeType unused4) {
   ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
 static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
                                 const MethodReference& target_method,
-                                uint32_t method_idx, uintptr_t unused,
-                                uintptr_t unused2, InvokeType unused3) {
+                                uint32_t unused, uintptr_t unused2,
+                                uintptr_t unused3, InvokeType unused4) {
   ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
 static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
                                const MethodReference& target_method,
-                               uint32_t method_idx, uintptr_t unused,
-                               uintptr_t unused2, InvokeType unused3) {
+                               uint32_t unused, uintptr_t unused2,
+                               uintptr_t unused3, InvokeType unused4) {
   ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
 static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
                            const MethodReference& target_method,
-                           uint32_t method_idx, uintptr_t unused,
-                           uintptr_t unused2, InvokeType unused3) {
+                           uint32_t unused, uintptr_t unused2,
+                           uintptr_t unused3, InvokeType unused4) {
   ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
@@ -607,9 +606,8 @@
 static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
                                                 CallInfo* info, int state,
                                                 const MethodReference& target_method,
-                                                uint32_t unused,
-                                                uintptr_t unused2, uintptr_t unused3,
-                                                InvokeType unused4) {
+                                                uint32_t unused, uintptr_t unused2,
+                                                uintptr_t unused3, InvokeType unused4) {
   ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
@@ -1400,7 +1398,6 @@
       return;
     }
   }
-  InvokeType original_type = info->type;  // avoiding mutation by ComputeInvokeInfo
   int call_state = 0;
   LIR* null_ck;
   LIR** p_null_ck = NULL;
@@ -1409,19 +1406,12 @@
   // Explicit register usage
   LockCallTemps();
 
-  DexCompilationUnit* cUnit = mir_graph_->GetCurrentDexCompilationUnit();
-  MethodReference target_method(cUnit->GetDexFile(), info->index);
-  int vtable_idx;
-  uintptr_t direct_code;
-  uintptr_t direct_method;
+  const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
+  cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
+  InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
+  info->type = static_cast<InvokeType>(method_info.GetSharpType());
+  bool fast_path = method_info.FastPath();
   bool skip_this;
-  bool fast_path =
-      cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
-                                              current_dalvik_offset_,
-                                              true, true,
-                                              &info->type, &target_method,
-                                              &vtable_idx,
-                                              &direct_code, &direct_method) && !SLOW_INVOKE_PATH;
   if (info->type == kInterface) {
     next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
     skip_this = fast_path;
@@ -1443,29 +1433,29 @@
     next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
     skip_this = fast_path;
   }
+  MethodReference target_method = method_info.GetTargetMethod();
   if (!info->is_range) {
     call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
-                                      next_call_insn, target_method,
-                                      vtable_idx, direct_code, direct_method,
+                                      next_call_insn, target_method, method_info.VTableIndex(),
+                                      method_info.DirectCode(), method_info.DirectMethod(),
                                       original_type, skip_this);
   } else {
     call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
-                                    next_call_insn, target_method, vtable_idx,
-                                    direct_code, direct_method, original_type,
-                                    skip_this);
+                                    next_call_insn, target_method, method_info.VTableIndex(),
+                                    method_info.DirectCode(), method_info.DirectMethod(),
+                                    original_type, skip_this);
   }
   // Finish up any of the call sequence not interleaved in arg loading
   while (call_state >= 0) {
-    call_state = next_call_insn(cu_, info, call_state, target_method,
-                                vtable_idx, direct_code, direct_method,
-                                original_type);
+    call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
+                                method_info.DirectCode(), method_info.DirectMethod(), original_type);
   }
   LIR* call_inst;
   if (cu_->instruction_set != kX86) {
     call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
   } else {
     if (fast_path) {
-      if (direct_code == static_cast<unsigned int>(-1)) {
+      if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
         // We can have the linker fixup a call relative.
         call_inst =
           reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(