Don't request a deopt if caller isn't async deoptimizeable

When determining if a deopt is required for instrumentation reasons on a
method exit we missed to check if the caller is async deoptimizaeble. We
don't handle the case where we don't deopt anything. For example, when a
native method is called from a method that isn't async deoptimizaeble
and if we request a deopt, it will be treated as a partial frame deopt
(since there is a non-deoptimizaeble method in the fragment) and we
start the execution in the QuickToInterpreterBridge with a native method
which isn't correct.

Test: art/test.py
Bug: 206029744
Change-Id: Ibd70e6fbbedbb7b721451e07ef7727a0ddf3cf4c
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index f404928..8735dcf 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1653,6 +1653,22 @@
   uintptr_t caller_pc_addr = reinterpret_cast<uintptr_t>(sp) + frame_info.GetReturnPcOffset();
   uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(caller_pc_addr);
 
+  return ShouldDeoptimizeCaller(self, caller, caller_pc, caller_sp);
+}
+
+bool Instrumentation::ShouldDeoptimizeCaller(Thread* self, const NthCallerVisitor& visitor) {
+  uintptr_t caller_sp = reinterpret_cast<uintptr_t>(visitor.GetCurrentQuickFrame());
+  // When the caller isn't executing quick code there is no need to deoptimize.
+  if (visitor.GetCurrentOatQuickMethodHeader() == nullptr) {
+    return false;
+  }
+  return ShouldDeoptimizeCaller(self, visitor.GetOuterMethod(), visitor.caller_pc, caller_sp);
+}
+
+bool Instrumentation::ShouldDeoptimizeCaller(Thread* self,
+                                             ArtMethod* caller,
+                                             uintptr_t caller_pc,
+                                             uintptr_t caller_sp) {
   if (caller == nullptr ||
       caller->IsNative() ||
       caller_pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
@@ -1667,7 +1683,24 @@
     return false;
   }
 
-  if (NeedsSlowInterpreterForMethod(self, caller)) {
+  bool needs_deopt = NeedsSlowInterpreterForMethod(self, caller);
+
+  // Non java debuggable apps don't support redefinition and hence it isn't required to check if
+  // frame needs to be deoptimized. We also want to avoid getting method header when we need a
+  // deopt anyway.
+  if (Runtime::Current()->IsJavaDebuggable() && !needs_deopt) {
+    const OatQuickMethodHeader* header = caller->GetOatQuickMethodHeader(caller_pc);
+    if (header != nullptr && header->HasShouldDeoptimizeFlag()) {
+      DCHECK(header->IsOptimized());
+      uint8_t* should_deopt_flag_addr =
+          reinterpret_cast<uint8_t*>(caller_sp) + header->GetShouldDeoptimizeFlagOffset();
+      if ((*should_deopt_flag_addr & static_cast<uint8_t>(DeoptimizeFlagValue::kDebug)) != 0) {
+        needs_deopt = true;
+      }
+    }
+  }
+
+  if (needs_deopt) {
     if (!Runtime::Current()->IsAsyncDeoptimizeable(caller, caller_pc)) {
       LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
                    << caller->PrettyMethod();
@@ -1676,51 +1709,7 @@
     return true;
   }
 
-  // Non java debuggable apps don't support redefinition and hence it isn't required to check if
-  // frame needs to be deoptimized.
-  if (!Runtime::Current()->IsJavaDebuggable()) {
-    return false;
-  }
-
-  bool should_deoptimize_frame = false;
-  const OatQuickMethodHeader* header = caller->GetOatQuickMethodHeader(caller_pc);
-  if (header != nullptr && header->HasShouldDeoptimizeFlag()) {
-    DCHECK(header->IsOptimized());
-    uint8_t* should_deopt_flag_addr =
-        reinterpret_cast<uint8_t*>(caller_sp) + header->GetShouldDeoptimizeFlagOffset();
-    if ((*should_deopt_flag_addr & static_cast<uint8_t>(DeoptimizeFlagValue::kDebug)) != 0) {
-      should_deoptimize_frame = true;
-    }
-  }
-
-  if (should_deoptimize_frame && !Runtime::Current()->IsAsyncDeoptimizeable(caller, caller_pc)) {
-    LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
-                 << caller->PrettyMethod();
-    return false;
-  }
-  return should_deoptimize_frame;
-}
-
-bool Instrumentation::ShouldDeoptimizeCaller(Thread* self, const NthCallerVisitor& visitor) {
-  bool should_deoptimize_frame = false;
-  if (visitor.caller != nullptr && visitor.caller->IsNative()) {
-    // Native methods don't need deoptimization. We don't set breakpoints / redefine native methods.
-    return false;
-  }
-
-  if (visitor.caller != nullptr) {
-    const OatQuickMethodHeader* header = visitor.GetCurrentOatQuickMethodHeader();
-    if (header != nullptr && header->HasShouldDeoptimizeFlag()) {
-      uint8_t should_deopt_flag = visitor.GetShouldDeoptimizeFlag();
-      // DeoptimizeFlag could be set for debugging or for CHA invalidations.
-      // Deoptimize here only if it was requested for debugging. CHA
-      // invalidations are handled in the JITed code.
-      if ((should_deopt_flag & static_cast<uint8_t>(DeoptimizeFlagValue::kDebug)) != 0) {
-        should_deoptimize_frame = true;
-      }
-    }
-  }
-  return NeedsSlowInterpreterForMethod(self, visitor.caller) || should_deoptimize_frame;
+  return false;
 }
 
 TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index ab8bee7..23c433e 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -504,6 +504,12 @@
   // method requires a deopt or if this particular frame needs a deopt because of a class
   // redefinition.
   bool ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_);
+  // This is a helper function used by the two variants of ShouldDeoptimizeCaller.
+  // Remove this once ShouldDeoptimizeCaller is updated not to use NthCallerVisitor.
+  bool ShouldDeoptimizeCaller(Thread* self,
+                              ArtMethod* caller,
+                              uintptr_t caller_pc,
+                              uintptr_t caller_sp) REQUIRES_SHARED(Locks::mutator_lock_);
   // This returns if the specified method requires a deoptimization. This doesn't account if a stack
   // frame involving this method requires a deoptimization.
   bool NeedsSlowInterpreterForMethod(Thread* self, ArtMethod* method)