Merge "Improve Checker error messages"
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index e30b968..c187536 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -361,6 +361,7 @@
                    const char* method_filter,
                    bool list_classes,
                    bool list_methods,
+                   bool dump_header_only,
                    const char* export_dex_location,
                    uint32_t addr2instr)
     : dump_raw_mapping_table_(dump_raw_mapping_table),
@@ -373,6 +374,7 @@
       method_filter_(method_filter),
       list_classes_(list_classes),
       list_methods_(list_methods),
+      dump_header_only_(dump_header_only),
       export_dex_location_(export_dex_location),
       addr2instr_(addr2instr),
       class_loader_(nullptr) {}
@@ -387,6 +389,7 @@
   const char* const method_filter_;
   const bool list_classes_;
   const bool list_methods_;
+  const bool dump_header_only_;
   const char* const export_dex_location_;
   uint32_t addr2instr_;
   Handle<mirror::ClassLoader>* class_loader_;
@@ -514,21 +517,24 @@
       os << StringPrintf("0x%08x\n\n", resolved_addr2instr_);
     }
 
-    for (size_t i = 0; i < oat_dex_files_.size(); i++) {
-      const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
-      CHECK(oat_dex_file != nullptr);
+    if (!options_.dump_header_only_) {
+      for (size_t i = 0; i < oat_dex_files_.size(); i++) {
+        const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
+        CHECK(oat_dex_file != nullptr);
 
-      // If file export selected skip file analysis
-      if (options_.export_dex_location_) {
-        if (!ExportDexFile(os, *oat_dex_file)) {
-          success = false;
-        }
-      } else {
-        if (!DumpOatDexFile(os, *oat_dex_file)) {
-          success = false;
+        // If file export selected skip file analysis
+        if (options_.export_dex_location_) {
+          if (!ExportDexFile(os, *oat_dex_file)) {
+            success = false;
+          }
+        } else {
+          if (!DumpOatDexFile(os, *oat_dex_file)) {
+            success = false;
+          }
         }
       }
     }
+
     os << std::flush;
     return success;
   }
@@ -2572,6 +2578,8 @@
       dump_code_info_stack_maps_ = true;
     } else if (option == "--no-disassemble") {
       disassemble_code_ = false;
+    } else if (option =="--header-only") {
+      dump_header_only_ = true;
     } else if (option.starts_with("--symbolize=")) {
       oat_filename_ = option.substr(strlen("--symbolize=")).data();
       symbolize_ = true;
@@ -2655,6 +2663,9 @@
         "  --no-disassemble may be used to disable disassembly.\n"
         "      Example: --no-disassemble\n"
         "\n"
+        "  --header-only may be used to print only the oat header.\n"
+        "      Example: --header-only\n"
+        "\n"
         "  --list-classes may be used to list target file classes (can be used with filters).\n"
         "      Example: --list-classes\n"
         "      Example: --list-classes --class-filter=com.example.foo\n"
@@ -2697,6 +2708,7 @@
   bool symbolize_ = false;
   bool list_classes_ = false;
   bool list_methods_ = false;
+  bool dump_header_only_ = false;
   uint32_t addr2instr_ = 0;
   const char* export_dex_location_ = nullptr;
 };
@@ -2719,6 +2731,7 @@
         args_->method_filter_,
         args_->list_classes_,
         args_->list_methods_,
+        args_->dump_header_only_,
         args_->export_dex_location_,
         args_->addr2instr_));
 
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index bfb1f9d..b7e5b30 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -295,9 +295,7 @@
     }
 
     jit::Jit* jit = Runtime::Current()->GetJit();
-    if (UNLIKELY(jit != nullptr &&
-                 jit->JitAtFirstUse() &&
-                 jit->GetCodeCache()->ContainsMethod(method))) {
+    if (jit != nullptr && jit->CanInvokeCompiledCode(method)) {
       JValue result;
 
       // Pop the shadow frame before calling into compiled code.
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index e1bde1b..ca727f4 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -150,7 +150,8 @@
   bool unhandled_instrumentation;
   // TODO: enable for other targets after more extensive testing.
   if ((kRuntimeISA == kArm64) || (kRuntimeISA == kArm) ||
-      (kRuntimeISA == kX86) || (kRuntimeISA == kMips)) {
+      (kRuntimeISA == kX86_64) || (kRuntimeISA == kX86) ||
+      (kRuntimeISA == kMips)) {
     unhandled_instrumentation = instrumentation->NonJitProfilingActive();
   } else {
     unhandled_instrumentation = instrumentation->IsActive();
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index 53fa50f..a1360e0 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -169,13 +169,23 @@
 #define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
 #define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
 
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
 /*
- *
- * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
- * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
- * mterp should do so as well.
+ * Profile branch. rINST should contain the offset. %eax is scratch.
  */
-#define MTERP_SUSPEND 0
+.macro MTERP_PROFILE_BRANCH
+#ifdef MTERP_PROFILE_BRANCHES
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    rINST, OUT_32_ARG2
+    call    SYMBOL(MterpProfileBranch)
+    testb   %al, %al
+    jnz     MterpOnStackReplacement
+#endif
+.endm
 
 /*
  * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
@@ -992,17 +1002,12 @@
  * double to get a byte offset.
  */
     /* goto +AA */
-    movsbq  rINSTbl, %rax                   # rax <- ssssssAA
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    movsbq  rINSTbl, rINSTq                 # rINSTq <- ssssssAA
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      1f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 /* ------------------------------ */
@@ -1016,17 +1021,12 @@
  * double to get a byte offset.
  */
     /* goto/16 +AAAA */
-    movswq  2(rPC), %rax                    # rax <- ssssAAAA
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    movswq  2(rPC), rINSTq                  # rINSTq <- ssssAAAA
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      1f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 /* ------------------------------ */
@@ -1043,17 +1043,12 @@
  * to convert from Dalvik offset to byte offset.
  */
     /* goto/32 +AAAAAAAA */
-    movslq  2(rPC), %rax                    # rax <- AAAAAAAA
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    movslq  2(rPC), rINSTq                  # rINSTq <- AAAAAAAA
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      1f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 /* ------------------------------ */
@@ -1074,17 +1069,12 @@
     leaq    (rPC,OUT_ARG0,2), OUT_ARG0      # rcx <- PC + BBBBbbbb*2
     GET_VREG OUT_32_ARG1, rINSTq            # eax <- vAA
     call    SYMBOL(MterpDoPackedSwitch)
-    addl    %eax, %eax
-    movslq  %eax, %rax
-    leaq    (rPC, %rax), rPC
+    movslq  %eax, rINSTq
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      1f
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue
     GOTO_NEXT
 
 /* ------------------------------ */
@@ -1106,17 +1096,12 @@
     leaq    (rPC,OUT_ARG0,2), OUT_ARG0      # rcx <- PC + BBBBbbbb*2
     GET_VREG OUT_32_ARG1, rINSTq            # eax <- vAA
     call    SYMBOL(MterpDoSparseSwitch)
-    addl    %eax, %eax
-    movslq  %eax, %rax
-    leaq    (rPC, %rax), rPC
+    movslq  %eax, rINSTq
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      1f
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue
     GOTO_NEXT
 
 
@@ -1324,20 +1309,15 @@
     andb    $0xf, %cl                      # rcx <- A
     GET_VREG %eax, %rcx                     # eax <- vA
     cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    movl    $2, %eax                       # assume not taken
+    movl    $2, rINST                      # assume not taken
     jne   1f
-    movswq  2(rPC),%rax                     # Get signed branch offset
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
 1:
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1359,20 +1339,15 @@
     andb    $0xf, %cl                      # rcx <- A
     GET_VREG %eax, %rcx                     # eax <- vA
     cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    movl    $2, %eax                       # assume not taken
+    movl    $2, rINST                      # assume not taken
     je   1f
-    movswq  2(rPC),%rax                     # Get signed branch offset
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
 1:
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1394,20 +1369,15 @@
     andb    $0xf, %cl                      # rcx <- A
     GET_VREG %eax, %rcx                     # eax <- vA
     cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    movl    $2, %eax                       # assume not taken
+    movl    $2, rINST                      # assume not taken
     jge   1f
-    movswq  2(rPC),%rax                     # Get signed branch offset
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
 1:
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1429,20 +1399,15 @@
     andb    $0xf, %cl                      # rcx <- A
     GET_VREG %eax, %rcx                     # eax <- vA
     cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    movl    $2, %eax                       # assume not taken
+    movl    $2, rINST                      # assume not taken
     jl   1f
-    movswq  2(rPC),%rax                     # Get signed branch offset
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
 1:
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1464,20 +1429,15 @@
     andb    $0xf, %cl                      # rcx <- A
     GET_VREG %eax, %rcx                     # eax <- vA
     cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    movl    $2, %eax                       # assume not taken
+    movl    $2, rINST                      # assume not taken
     jle   1f
-    movswq  2(rPC),%rax                     # Get signed branch offset
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
 1:
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1499,20 +1459,15 @@
     andb    $0xf, %cl                      # rcx <- A
     GET_VREG %eax, %rcx                     # eax <- vA
     cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    movl    $2, %eax                       # assume not taken
+    movl    $2, rINST                      # assume not taken
     jg   1f
-    movswq  2(rPC),%rax                     # Get signed branch offset
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
 1:
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1530,20 +1485,15 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    movl    $2, %eax                       # assume branch not taken
+    movl    $2, rINST                      # assume branch not taken
     jne   1f
-    movswq  2(rPC),%rax                     # fetch signed displacement
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
 1:
-    addq    %rax, %rax                      # eax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1561,20 +1511,15 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    movl    $2, %eax                       # assume branch not taken
+    movl    $2, rINST                      # assume branch not taken
     je   1f
-    movswq  2(rPC),%rax                     # fetch signed displacement
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
 1:
-    addq    %rax, %rax                      # eax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1592,20 +1537,15 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    movl    $2, %eax                       # assume branch not taken
+    movl    $2, rINST                      # assume branch not taken
     jge   1f
-    movswq  2(rPC),%rax                     # fetch signed displacement
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
 1:
-    addq    %rax, %rax                      # eax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1623,20 +1563,15 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    movl    $2, %eax                       # assume branch not taken
+    movl    $2, rINST                      # assume branch not taken
     jl   1f
-    movswq  2(rPC),%rax                     # fetch signed displacement
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
 1:
-    addq    %rax, %rax                      # eax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1654,20 +1589,15 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    movl    $2, %eax                       # assume branch not taken
+    movl    $2, rINST                      # assume branch not taken
     jle   1f
-    movswq  2(rPC),%rax                     # fetch signed displacement
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
 1:
-    addq    %rax, %rax                      # eax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -1685,20 +1615,15 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    movl    $2, %eax                       # assume branch not taken
+    movl    $2, rINST                      # assume branch not taken
     jg   1f
-    movswq  2(rPC),%rax                     # fetch signed displacement
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
 1:
-    addq    %rax, %rax                      # eax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
 
 
@@ -2931,7 +2856,12 @@
     call    SYMBOL(MterpInvokeVirtual)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
 
 /*
  * Handle a virtual method call.
@@ -2961,7 +2891,12 @@
     call    SYMBOL(MterpInvokeSuper)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
 
 /*
  * Handle a "super" method call.
@@ -2991,7 +2926,12 @@
     call    SYMBOL(MterpInvokeDirect)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3014,7 +2954,12 @@
     call    SYMBOL(MterpInvokeStatic)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
 
 
 
@@ -3038,7 +2983,12 @@
     call    SYMBOL(MterpInvokeInterface)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
 
 /*
  * Handle an interface method call.
@@ -3080,7 +3030,12 @@
     call    SYMBOL(MterpInvokeVirtualRange)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3103,7 +3058,12 @@
     call    SYMBOL(MterpInvokeSuperRange)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3126,7 +3086,12 @@
     call    SYMBOL(MterpInvokeDirectRange)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3149,7 +3114,12 @@
     call    SYMBOL(MterpInvokeStaticRange)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3172,7 +3142,12 @@
     call    SYMBOL(MterpInvokeInterfaceRange)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -5811,7 +5786,12 @@
     call    SYMBOL(MterpInvokeVirtualQuick)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -5834,7 +5814,12 @@
     call    SYMBOL(MterpInvokeVirtualQuickRange)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -11805,7 +11790,6 @@
  * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
  * TUNING: for consistency, we may want to just go ahead and handle these here.
  */
-#define MTERP_LOGGING 0
 common_errDivideByZero:
     EXPORT_PC
 #if MTERP_LOGGING
@@ -11891,13 +11875,17 @@
     call    SYMBOL(MterpHandleException)
     testb   %al, %al
     jz      MterpExceptionReturn
-    REFRESH_IBASE
     movq    OFF_FP_CODE_ITEM(rFP), %rax
     mov     OFF_FP_DEX_PC(rFP), %ecx
     leaq    CODEITEM_INSNS_OFFSET(%rax), rPC
     leaq    (rPC, %rcx, 2), rPC
     movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
+    /* Do we need to switch interpreters? */
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     /* resume execution at catch block */
+    REFRESH_IBASE
     FETCH_INST
     GOTO_NEXT
     /* NOTE: no fallthrough */
@@ -11917,6 +11905,19 @@
     GOTO_NEXT
 
 /*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    rINST, OUT_32_ARG2
+    call    SYMBOL(MterpLogOSR)
+#endif
+    movl    $1, %eax
+    jmp     MterpDone
+
+/*
  * Bail out to reference interpreter.
  */
 MterpFallback:
diff --git a/runtime/interpreter/mterp/x86_64/bincmp.S b/runtime/interpreter/mterp/x86_64/bincmp.S
index 5e4225f..a16050b 100644
--- a/runtime/interpreter/mterp/x86_64/bincmp.S
+++ b/runtime/interpreter/mterp/x86_64/bincmp.S
@@ -11,18 +11,13 @@
     andb    $$0xf, %cl                      # rcx <- A
     GET_VREG %eax, %rcx                     # eax <- vA
     cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
-    movl    $$2, %eax                       # assume not taken
+    movl    $$2, rINST                      # assume not taken
     j${revcmp}   1f
-    movswq  2(rPC),%rax                     # Get signed branch offset
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
 1:
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rax <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/footer.S b/runtime/interpreter/mterp/x86_64/footer.S
index cb60c01..573256b 100644
--- a/runtime/interpreter/mterp/x86_64/footer.S
+++ b/runtime/interpreter/mterp/x86_64/footer.S
@@ -12,7 +12,6 @@
  * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
  * TUNING: for consistency, we may want to just go ahead and handle these here.
  */
-#define MTERP_LOGGING 0
 common_errDivideByZero:
     EXPORT_PC
 #if MTERP_LOGGING
@@ -98,13 +97,17 @@
     call    SYMBOL(MterpHandleException)
     testb   %al, %al
     jz      MterpExceptionReturn
-    REFRESH_IBASE
     movq    OFF_FP_CODE_ITEM(rFP), %rax
     mov     OFF_FP_DEX_PC(rFP), %ecx
     leaq    CODEITEM_INSNS_OFFSET(%rax), rPC
     leaq    (rPC, %rcx, 2), rPC
     movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
+    /* Do we need to switch interpreters? */
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     /* resume execution at catch block */
+    REFRESH_IBASE
     FETCH_INST
     GOTO_NEXT
     /* NOTE: no fallthrough */
@@ -124,6 +127,19 @@
     GOTO_NEXT
 
 /*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    rINST, OUT_32_ARG2
+    call    SYMBOL(MterpLogOSR)
+#endif
+    movl    $$1, %eax
+    jmp     MterpDone
+
+/*
  * Bail out to reference interpreter.
  */
 MterpFallback:
diff --git a/runtime/interpreter/mterp/x86_64/header.S b/runtime/interpreter/mterp/x86_64/header.S
index dfc7b53..eb84ea1 100644
--- a/runtime/interpreter/mterp/x86_64/header.S
+++ b/runtime/interpreter/mterp/x86_64/header.S
@@ -162,13 +162,23 @@
 #define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
 #define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
 
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
 /*
- *
- * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
- * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
- * mterp should do so as well.
+ * Profile branch. rINST should contain the offset. %eax is scratch.
  */
-#define MTERP_SUSPEND 0
+.macro MTERP_PROFILE_BRANCH
+#ifdef MTERP_PROFILE_BRANCHES
+    EXPORT_PC
+    movq    rSELF, OUT_ARG0
+    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+    movl    rINST, OUT_32_ARG2
+    call    SYMBOL(MterpProfileBranch)
+    testb   %al, %al
+    jnz     MterpOnStackReplacement
+#endif
+.endm
 
 /*
  * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
diff --git a/runtime/interpreter/mterp/x86_64/invoke.S b/runtime/interpreter/mterp/x86_64/invoke.S
index 86eccdb..f7e6155 100644
--- a/runtime/interpreter/mterp/x86_64/invoke.S
+++ b/runtime/interpreter/mterp/x86_64/invoke.S
@@ -14,4 +14,9 @@
     call    SYMBOL($helper)
     testb   %al, %al
     jz      MterpException
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
+    FETCH_INST
+    GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_goto.S b/runtime/interpreter/mterp/x86_64/op_goto.S
index 05a2dda..c4fc976 100644
--- a/runtime/interpreter/mterp/x86_64/op_goto.S
+++ b/runtime/interpreter/mterp/x86_64/op_goto.S
@@ -5,15 +5,10 @@
  * double to get a byte offset.
  */
     /* goto +AA */
-    movsbq  rINSTbl, %rax                   # rax <- ssssssAA
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    movsbq  rINSTbl, rINSTq                 # rINSTq <- ssssssAA
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      1f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_goto_16.S b/runtime/interpreter/mterp/x86_64/op_goto_16.S
index 029749c..8cb9a5c 100644
--- a/runtime/interpreter/mterp/x86_64/op_goto_16.S
+++ b/runtime/interpreter/mterp/x86_64/op_goto_16.S
@@ -5,15 +5,10 @@
  * double to get a byte offset.
  */
     /* goto/16 +AAAA */
-    movswq  2(rPC), %rax                    # rax <- ssssAAAA
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    movswq  2(rPC), rINSTq                  # rINSTq <- ssssAAAA
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      1f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_goto_32.S b/runtime/interpreter/mterp/x86_64/op_goto_32.S
index 2823310..4ecdacd 100644
--- a/runtime/interpreter/mterp/x86_64/op_goto_32.S
+++ b/runtime/interpreter/mterp/x86_64/op_goto_32.S
@@ -8,15 +8,10 @@
  * to convert from Dalvik offset to byte offset.
  */
     /* goto/32 +AAAAAAAA */
-    movslq  2(rPC), %rax                    # rax <- AAAAAAAA
-    addq    %rax, %rax                      # rax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    movslq  2(rPC), rINSTq                  # rINSTq <- AAAAAAAA
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      1f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/op_packed_switch.S b/runtime/interpreter/mterp/x86_64/op_packed_switch.S
index 0400ca4..cb0acb7 100644
--- a/runtime/interpreter/mterp/x86_64/op_packed_switch.S
+++ b/runtime/interpreter/mterp/x86_64/op_packed_switch.S
@@ -13,15 +13,10 @@
     leaq    (rPC,OUT_ARG0,2), OUT_ARG0      # rcx <- PC + BBBBbbbb*2
     GET_VREG OUT_32_ARG1, rINSTq            # eax <- vAA
     call    SYMBOL($func)
-    addl    %eax, %eax
-    movslq  %eax, %rax
-    leaq    (rPC, %rax), rPC
+    movslq  %eax, rINSTq
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      1f
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-1:
+    jle     MterpCheckSuspendAndContinue
     GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/zcmp.S b/runtime/interpreter/mterp/x86_64/zcmp.S
index e503ec1..0051407 100644
--- a/runtime/interpreter/mterp/x86_64/zcmp.S
+++ b/runtime/interpreter/mterp/x86_64/zcmp.S
@@ -7,18 +7,13 @@
  */
     /* if-cmp vAA, +BBBB */
     cmpl    $$0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
-    movl    $$2, %eax                       # assume branch not taken
+    movl    $$2, rINST                      # assume branch not taken
     j${revcmp}   1f
-    movswq  2(rPC),%rax                     # fetch signed displacement
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
 1:
-    addq    %rax, %rax                      # eax <- AA * 2
-    leaq    (rPC, %rax), rPC
+    MTERP_PROFILE_BRANCH
+    addq    rINSTq, rINSTq                  # rINSTq <- AA * 2
+    leaq    (rPC, rINSTq), rPC
     FETCH_INST
-    jg      2f                              # AA * 2 > 0 => no suspend check
-#if MTERP_SUSPEND
-    REFRESH_IBASE
-#else
-    jmp     MterpCheckSuspendAndContinue
-#endif
-2:
+    jle     MterpCheckSuspendAndContinue    # AA * 2 <= 0 => suspend check
     GOTO_NEXT
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 3e66ce2..91b006a 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -213,6 +213,10 @@
   return false;
 }
 
+bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
+  return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
+}
+
 Jit::~Jit() {
   DCHECK(!save_profiling_info_ || !ProfileSaver::IsStarted());
   if (dump_info_on_shutdown_) {
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 109ca3d..3f54192 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -84,8 +84,12 @@
   // into the specified class linker to the jit debug interface,
   void DumpTypeInfoForLoadedTypes(ClassLinker* linker);
 
+  // Return whether we should try to JIT compiled code as soon as an ArtMethod is invoked.
   bool JitAtFirstUse();
 
+  // Return whether we can invoke JIT code for `method`.
+  bool CanInvokeCompiledCode(ArtMethod* method);
+
   // If an OSR compiled version is available for `method`,
   // and `dex_pc + dex_pc_offset` is an entry point of that compiled
   // version, this method will jump to the compiled code, let it run,
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 8858b48..e8a7189 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -123,7 +123,7 @@
       current_capacity_(initial_code_capacity + initial_data_capacity),
       code_end_(initial_code_capacity),
       data_end_(initial_data_capacity),
-      has_done_full_collection_(false),
+      last_collection_increased_code_cache_(false),
       last_update_time_ns_(0),
       garbage_collect_code_(garbage_collect_code),
       used_memory_for_data_(0),
@@ -546,34 +546,20 @@
   }
 }
 
-void JitCodeCache::RemoveUnusedCode(Thread* self) {
-  // Clear the osr map, chances are most of the code in it is now dead.
-  {
-    MutexLock mu(self, lock_);
-    osr_code_map_.clear();
-  }
-
-  // Run a checkpoint on all threads to mark the JIT compiled code they are running.
-  MarkCompiledCodeOnThreadStacks(self);
-
-  // Iterate over all compiled code and remove entries that are not marked and not
-  // the entrypoint of their corresponding ArtMethod.
-  {
-    MutexLock mu(self, lock_);
-    ScopedCodeCacheWrite scc(code_map_.get());
-    for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
-      const void* code_ptr = it->first;
-      ArtMethod* method = it->second;
-      uintptr_t allocation = FromCodeToAllocation(code_ptr);
-      const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
-      if ((method->GetEntryPointFromQuickCompiledCode() != method_header->GetEntryPoint()) &&
-          !GetLiveBitmap()->Test(allocation)) {
-        FreeCode(code_ptr, method);
-        it = method_code_map_.erase(it);
-      } else {
-        ++it;
-      }
-    }
+bool JitCodeCache::ShouldDoFullCollection() {
+  if (current_capacity_ == max_capacity_) {
+    // Always do a full collection when the code cache is full.
+    return true;
+  } else if (current_capacity_ < kReservedCapacity) {
+    // Always do partial collection when the code cache size is below the reserved
+    // capacity.
+    return false;
+  } else if (last_collection_increased_code_cache_) {
+    // This time do a full collection.
+    return true;
+  } else {
+    // This time do a partial collection.
+    return false;
   }
 }
 
@@ -599,21 +585,10 @@
     }
   }
 
-  // Check if we want to do a full collection.
-  bool do_full_collection = true;
+  bool do_full_collection = false;
   {
     MutexLock mu(self, lock_);
-    if (current_capacity_ == max_capacity_) {
-      // Always do a full collection when the code cache is full.
-      do_full_collection = true;
-    } else if (current_capacity_ < kReservedCapacity) {
-      // Do a partial collection until we hit the reserved capacity limit.
-      do_full_collection = false;
-    } else if (has_done_full_collection_) {
-      // Do a partial collection if we have done a full collection in the last
-      // collection round.
-      do_full_collection = false;
-    }
+    do_full_collection = ShouldDoFullCollection();
   }
 
   if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
@@ -624,45 +599,91 @@
               << ", data=" << PrettySize(DataCacheSize());
   }
 
-  if (do_full_collection) {
-    DoFullCollection(self);
-  } else {
-    RemoveUnusedCode(self);
-  }
-
-  {
-    MutexLock mu(self, lock_);
-    if (!do_full_collection) {
-      has_done_full_collection_ = false;
-      IncreaseCodeCacheCapacity();
-    } else {
-      has_done_full_collection_ = true;
-    }
-    live_bitmap_.reset(nullptr);
-    NotifyCollectionDone(self);
-  }
+  DoCollection(self, /* collect_profiling_info */ do_full_collection);
 
   if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
     LOG(INFO) << "After code cache collection, code="
               << PrettySize(CodeCacheSize())
               << ", data=" << PrettySize(DataCacheSize());
   }
-}
 
-void JitCodeCache::DoFullCollection(Thread* self) {
-  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   {
     MutexLock mu(self, lock_);
-    // Walk over all compiled methods and set the entry points of these
-    // methods to interpreter.
-    for (auto& it : method_code_map_) {
-      instrumentation->UpdateMethodsCode(it.second, GetQuickToInterpreterBridge());
+
+    // Increase the code cache only when we do partial collections.
+    // TODO: base this strategy on how full the code cache is?
+    if (do_full_collection) {
+      last_collection_increased_code_cache_ = false;
+    } else {
+      last_collection_increased_code_cache_ = true;
+      IncreaseCodeCacheCapacity();
     }
 
-    // Clear the profiling info of methods that are not being compiled.
-    for (ProfilingInfo* info : profiling_infos_) {
-      if (!info->IsMethodBeingCompiled()) {
-        info->GetMethod()->SetProfilingInfo(nullptr);
+    bool next_collection_will_be_full = ShouldDoFullCollection();
+
+    // Start polling the liveness of compiled code to prepare for the next full collection.
+    // We avoid doing this if exit stubs are installed to not mess with the instrumentation.
+    // TODO(ngeoffray): Clean up instrumentation and code cache interactions.
+    if (!Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled() &&
+        next_collection_will_be_full) {
+      // Save the entry point of methods we have compiled, and update the entry
+      // point of those methods to the interpreter. If the method is invoked, the
+      // interpreter will update its entry point to the compiled code and call it.
+      for (ProfilingInfo* info : profiling_infos_) {
+        const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+        if (ContainsPc(entry_point)) {
+          info->SetSavedEntryPoint(entry_point);
+          info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+        }
+      }
+
+      DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
+    }
+    live_bitmap_.reset(nullptr);
+    NotifyCollectionDone(self);
+  }
+}
+
+void JitCodeCache::RemoveUnusedAndUnmarkedCode(Thread* self) {
+  MutexLock mu(self, lock_);
+  ScopedCodeCacheWrite scc(code_map_.get());
+  // Iterate over all compiled code and remove entries that are not marked and not
+  // the entrypoint of their corresponding ArtMethod.
+  for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+    const void* code_ptr = it->first;
+    ArtMethod* method = it->second;
+    uintptr_t allocation = FromCodeToAllocation(code_ptr);
+    const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+    const void* entrypoint = method->GetEntryPointFromQuickCompiledCode();
+    if ((entrypoint == method_header->GetEntryPoint()) || GetLiveBitmap()->Test(allocation)) {
+      ++it;
+    } else {
+      if (entrypoint == GetQuickToInterpreterBridge()) {
+        method->ClearCounter();
+      }
+      FreeCode(code_ptr, method);
+      it = method_code_map_.erase(it);
+    }
+  }
+}
+
+void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
+  {
+    MutexLock mu(self, lock_);
+    if (collect_profiling_info) {
+      // Clear the profiling info of methods that do not have compiled code as entrypoint.
+      // Also remove the saved entry point from the ProfilingInfo objects.
+      for (ProfilingInfo* info : profiling_infos_) {
+        const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+        if (!ContainsPc(ptr) && !info->IsMethodBeingCompiled()) {
+          info->GetMethod()->SetProfilingInfo(nullptr);
+        }
+        info->SetSavedEntryPoint(nullptr);
+      }
+    } else if (kIsDebugBuild) {
+      // Sanity check that the profiling infos do not have a dangling entry point.
+      for (ProfilingInfo* info : profiling_infos_) {
+        DCHECK(info->GetSavedEntryPoint() == nullptr);
       }
     }
 
@@ -674,41 +695,50 @@
   // Run a checkpoint on all threads to mark the JIT compiled code they are running.
   MarkCompiledCodeOnThreadStacks(self);
 
-  {
-    MutexLock mu(self, lock_);
-    // Free unused compiled code, and restore the entry point of used compiled code.
-    {
-      ScopedCodeCacheWrite scc(code_map_.get());
-      for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
-        const void* code_ptr = it->first;
-        ArtMethod* method = it->second;
-        uintptr_t allocation = FromCodeToAllocation(code_ptr);
-        const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
-        if (GetLiveBitmap()->Test(allocation)) {
-          instrumentation->UpdateMethodsCode(method, method_header->GetEntryPoint());
-          ++it;
-        } else {
-          method->ClearCounter();
-          DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
-          FreeCode(code_ptr, method);
-          it = method_code_map_.erase(it);
-        }
-      }
-    }
+  // Remove compiled code that is not the entrypoint of their method and not in the call
+  // stack.
+  RemoveUnusedAndUnmarkedCode(self);
 
-    // Free all profiling infos of methods that were not being compiled.
+  if (collect_profiling_info) {
+    MutexLock mu(self, lock_);
+    // Free all profiling infos of methods not compiled nor being compiled.
     auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
       [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
-        if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
+        const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+        if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
+          // Make sure compiled methods have a ProfilingInfo object. It is needed for
+          // code cache collection.
+          info->GetMethod()->SetProfilingInfo(info);
+        } else if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) != info) {
+          // No need for this ProfilingInfo object anymore.
           FreeData(reinterpret_cast<uint8_t*>(info));
           return true;
         }
         return false;
       });
     profiling_infos_.erase(profiling_kept_end, profiling_infos_.end());
+    DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
   }
 }
 
+bool JitCodeCache::CheckLiveCompiledCodeHasProfilingInfo() {
+  // Check that methods we have compiled do have a ProfilingInfo object. We would
+  // have memory leaks of compiled code otherwise.
+  for (const auto& it : method_code_map_) {
+    ArtMethod* method = it.second;
+    if (method->GetProfilingInfo(sizeof(void*)) == nullptr) {
+      const void* code_ptr = it.first;
+      const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+      if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
+        // If the code is not dead, then we have a problem. Note that this can even
+        // happen just after a collection, as mutator threads are running in parallel
+        // and could deoptimize an existing compiled code.
+        return false;
+      }
+    }
+  }
+  return true;
+}
 
 OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
   static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
@@ -849,6 +879,13 @@
 
 void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
                                              const OatQuickMethodHeader* header) {
+  ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
+  if ((profiling_info != nullptr) &&
+      (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) {
+    // Prevent future uses of the compiled code.
+    profiling_info->SetSavedEntryPoint(nullptr);
+  }
+
   if (method->GetEntryPointFromQuickCompiledCode() == header->GetEntryPoint()) {
     // The entrypoint is the one to invalidate, so we just update
     // it to the interpreter entry point and clear the counter to get the method
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 4574edf..7b33b92 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -124,6 +124,11 @@
     return live_bitmap_.get();
   }
 
+  // Return whether we should do a full collection given the current state of the cache.
+  bool ShouldDoFullCollection()
+      REQUIRES(lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   // Perform a collection on the code cache.
   void GarbageCollectCache(Thread* self)
       REQUIRES(!lock_)
@@ -235,11 +240,11 @@
   // Set the footprint limit of the code cache.
   void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
 
-  void DoFullCollection(Thread* self)
+  void DoCollection(Thread* self, bool collect_profiling_info)
       REQUIRES(!lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void RemoveUnusedCode(Thread* self)
+  void RemoveUnusedAndUnmarkedCode(Thread* self)
       REQUIRES(!lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -247,6 +252,10 @@
       REQUIRES(!lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  bool CheckLiveCompiledCodeHasProfilingInfo()
+      REQUIRES(lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   // Lock for guarding allocations, collections, and the method_code_map_.
   Mutex lock_;
   // Condition to wait on during collection.
@@ -282,8 +291,8 @@
   // The current footprint in bytes of the data portion of the code cache.
   size_t data_end_ GUARDED_BY(lock_);
 
-  // Whether a full collection has already been done on the current capacity.
-  bool has_done_full_collection_ GUARDED_BY(lock_);
+  // Whether the last collection round increased the code cache.
+  bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
 
   // Last time the the code_cache was updated.
   // It is atomic to avoid locking when reading it.
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 46c362a..d751e5a 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -187,7 +187,18 @@
     return;
   }
 
-  instrumentation_cache_->AddSamples(thread, method, 1);
+  ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
+  // Update the entrypoint if the ProfilingInfo has one. The interpreter will call it
+  // instead of interpreting the method.
+  // We avoid doing this if exit stubs are installed to not mess with the instrumentation.
+  // TODO(ngeoffray): Clean up instrumentation and code cache interactions.
+  if ((profiling_info != nullptr) &&
+      (profiling_info->GetSavedEntryPoint() != nullptr) &&
+      !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) {
+    method->SetEntryPointFromQuickCompiledCode(profiling_info->GetSavedEntryPoint());
+  } else {
+    instrumentation_cache_->AddSamples(thread, method, 1);
+  }
 }
 
 void JitInstrumentationListener::Branch(Thread* thread,
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index ab72373..d54f3df 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -126,11 +126,20 @@
     is_method_being_compiled_ = value;
   }
 
+  void SetSavedEntryPoint(const void* entry_point) {
+    saved_entry_point_ = entry_point;
+  }
+
+  const void* GetSavedEntryPoint() const {
+    return saved_entry_point_;
+  }
+
  private:
   ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
       : number_of_inline_caches_(entries.size()),
         method_(method),
-        is_method_being_compiled_(false) {
+        is_method_being_compiled_(false),
+        saved_entry_point_(nullptr) {
     memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
     for (size_t i = 0; i < number_of_inline_caches_; ++i) {
       cache_[i].dex_pc_ = entries[i];
@@ -148,6 +157,10 @@
   // TODO: Make the JIT code cache lock global.
   bool is_method_being_compiled_;
 
+  // Entry point of the corresponding ArtMethod, while the JIT code cache
+  // is poking for the liveness of compiled code.
+  const void* saved_entry_point_;
+
   // Dynamically allocated array of size `number_of_inline_caches_`.
   InlineCache cache_[0];