Revert "Cleanups around the creation of ProfilingInfo."

This reverts commit 0fa304ee0fa63149222bfc6756f56cd285c56bd1.

Reason for revert: 685-deoptimizeable test is failing on
host debuggable (and cdex-redefine-stress-jit once) targets

Bug: 112676029
Test: ./test.py --host --debuggable
Change-Id: I88cf51ec48d704f966066ea9f2dbb17d32648f5a
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index cdd69e0..4921a99 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -331,7 +331,8 @@
   // If we get a request to compile a proxy method, we pass the actual Java method
   // of that proxy method, as the compiler does not expect a proxy method.
   ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-  if (!code_cache_->NotifyCompilationOf(method_to_compile, self, compilation_kind, prejit)) {
+  if (!code_cache_->NotifyCompilationOf(
+          method_to_compile, self, compilation_kind, prejit, region)) {
     return false;
   }
 
@@ -757,6 +758,7 @@
 class JitCompileTask final : public Task {
  public:
   enum class TaskKind {
+    kAllocateProfile,
     kCompile,
     kPreCompile,
   };
@@ -795,6 +797,12 @@
               /* prejit= */ (kind_ == TaskKind::kPreCompile));
           break;
         }
+        case TaskKind::kAllocateProfile: {
+          if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
+            VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
+          }
+          break;
+        }
       }
     }
     ProfileSaver::NotifyJitActivity();
@@ -1587,6 +1595,10 @@
   if (UNLIKELY(runtime->UseJitCompilation() && JitAtFirstUse())) {
     ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
     if (np_method->IsCompilable()) {
+      if (!np_method->IsNative() && GetCodeCache()->CanAllocateProfilingInfo()) {
+        // The compiler requires a ProfilingInfo object for non-native methods.
+        ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
+      }
       // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
       // conflicts with jitzygote optimizations.
       JitCompileTask compile_task(
@@ -1801,6 +1813,7 @@
     return;
   }
   if (GetCodeCache()->CanAllocateProfilingInfo()) {
+    ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
     thread_pool_->AddTask(
         self,
         new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kBaseline));
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index cf28fe1..4b9538d 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -286,6 +286,15 @@
   return PrivateRegionContainsPc(ptr) || shared_region_.IsInExecSpace(ptr);
 }
 
+bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
+  ScopedObjectAccess soa(art::Thread::Current());
+  ScopedAssertNoThreadSuspension sants(__FUNCTION__);
+  if (ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+    return true;
+  }
+  return false;
+}
+
 bool JitCodeCache::ContainsMethod(ArtMethod* method) {
   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   if (UNLIKELY(method->IsNative())) {
@@ -1460,18 +1469,30 @@
 
 ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
                                               ArtMethod* method,
-                                              const std::vector<uint32_t>& entries) {
+                                              const std::vector<uint32_t>& entries,
+                                              bool retry_allocation)
+    // No thread safety analysis as we are using TryLock/Unlock explicitly.
+    NO_THREAD_SAFETY_ANALYSIS {
   DCHECK(CanAllocateProfilingInfo());
   ProfilingInfo* info = nullptr;
-  {
-    MutexLock mu(self, *Locks::jit_lock_);
-    info = AddProfilingInfoInternal(self, method, entries);
-  }
+  if (!retry_allocation) {
+    // If we are allocating for the interpreter, just try to lock, to avoid
+    // lock contention with the JIT.
+    if (Locks::jit_lock_->ExclusiveTryLock(self)) {
+      info = AddProfilingInfoInternal(self, method, entries);
+      Locks::jit_lock_->ExclusiveUnlock(self);
+    }
+  } else {
+    {
+      MutexLock mu(self, *Locks::jit_lock_);
+      info = AddProfilingInfoInternal(self, method, entries);
+    }
 
-  if (info == nullptr) {
-    GarbageCollectCache(self);
-    MutexLock mu(self, *Locks::jit_lock_);
-    info = AddProfilingInfoInternal(self, method, entries);
+    if (info == nullptr) {
+      GarbageCollectCache(self);
+      MutexLock mu(self, *Locks::jit_lock_);
+      info = AddProfilingInfoInternal(self, method, entries);
+    }
   }
   return info;
 }
@@ -1604,7 +1625,8 @@
 bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
                                        Thread* self,
                                        CompilationKind compilation_kind,
-                                       bool prejit) {
+                                       bool prejit,
+                                       JitMemoryRegion* region) {
   const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
   if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) {
     OatQuickMethodHeader* method_header =
@@ -1678,11 +1700,21 @@
     }
     return new_compilation;
   } else {
+    ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
     if (CanAllocateProfilingInfo() &&
         (compilation_kind == CompilationKind::kBaseline) &&
-        (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
-      if (ProfilingInfo::Create(self, method) == nullptr) {
-        VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled baseline";
+        (info == nullptr)) {
+      // We can retry allocation here as we're the JIT thread.
+      if (ProfilingInfo::Create(self, method, /* retry_allocation= */ true)) {
+        info = method->GetProfilingInfo(kRuntimePointerSize);
+      }
+    }
+    if (info == nullptr) {
+      // When prejitting, we don't allocate a profiling info.
+      if (!prejit && !IsSharedRegion(*region)) {
+        VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
+        // Because the counter is not atomic, there are some rare cases where we may not hit the
+        // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
         ClearMethodCounter(method, /*was_warm=*/ false);
         return false;
       }
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 7c828ae..e8ab117 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -197,7 +197,8 @@
   bool NotifyCompilationOf(ArtMethod* method,
                            Thread* self,
                            CompilationKind compilation_kind,
-                           bool prejit)
+                           bool prejit,
+                           JitMemoryRegion* region)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jit_lock_);
 
@@ -227,6 +228,10 @@
   // Return true if the code cache contains this pc in the private region (i.e. not from zygote).
   bool PrivateRegionContainsPc(const void* pc) const;
 
+  // Returns true if either the method's entrypoint is JIT compiled code or it is the
+  // instrumentation entrypoint and we can jump to jit code for this method. For testing use only.
+  bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
+
   // Return true if the code cache contains this method.
   bool ContainsMethod(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
 
@@ -309,10 +314,12 @@
       REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Create a 'ProfileInfo' for 'method'.
+  // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
+  // will collect and retry if the first allocation is unsuccessful.
   ProfilingInfo* AddProfilingInfo(Thread* self,
                                   ArtMethod* method,
-                                  const std::vector<uint32_t>& entries)
+                                  const std::vector<uint32_t>& entries,
+                                  bool retry_allocation)
       REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 93951ee..a61a30d 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -36,7 +36,7 @@
   }
 }
 
-ProfilingInfo* ProfilingInfo::Create(Thread* self, ArtMethod* method) {
+bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocation) {
   // Walk over the dex instructions of the method and keep track of
   // instructions we are interested in profiling.
   DCHECK(!method->IsNative());
@@ -63,7 +63,7 @@
 
   // Allocate the `ProfilingInfo` object int the JIT's data space.
   jit::JitCodeCache* code_cache = Runtime::Current()->GetJit()->GetCodeCache();
-  return code_cache->AddProfilingInfo(self, method, entries);
+  return code_cache->AddProfilingInfo(self, method, entries, retry_allocation) != nullptr;
 }
 
 InlineCache* ProfilingInfo::GetInlineCache(uint32_t dex_pc) {
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index d136b4c..cbe2445 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -63,8 +63,9 @@
  */
 class ProfilingInfo {
  public:
-  // Create a ProfilingInfo for 'method'.
-  static ProfilingInfo* Create(Thread* self, ArtMethod* method)
+  // Create a ProfilingInfo for 'method'. Return whether it succeeded, or if it is
+  // not needed in case the method does not have virtual/interface invocations.
+  static bool Create(Thread* self, ArtMethod* method, bool retry_allocation)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Add information from an executed INVOKE instruction to the profile.
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index d1caf3f..b7365dd 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -109,7 +109,7 @@
       method_name,
       [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
         ArtMethod* m = stack_visitor->GetMethod();
-        ProfilingInfo::Create(Thread::Current(), m);
+        ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
       });
 }
 
diff --git a/test/595-profile-saving/profile-saving.cc b/test/595-profile-saving/profile-saving.cc
index 95aa7e9..d6ca447 100644
--- a/test/595-profile-saving/profile-saving.cc
+++ b/test/595-profile-saving/profile-saving.cc
@@ -39,7 +39,7 @@
   ScopedObjectAccess soa(env);
   ObjPtr<mirror::Executable> exec = soa.Decode<mirror::Executable>(method);
   ArtMethod* art_method = exec->GetArtMethod();
-  if (ProfilingInfo::Create(soa.Self(), art_method) == nullptr) {
+  if (!ProfilingInfo::Create(soa.Self(), art_method, /* retry_allocation */ true)) {
     LOG(ERROR) << "Failed to create profiling info for method " << art_method->PrettyMethod();
   }
 }
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 09f1a53..0e47782 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -230,9 +230,12 @@
 }
 
 static void ForceJitCompiled(Thread* self, ArtMethod* method) REQUIRES(!Locks::mutator_lock_) {
+  bool native = false;
   {
     ScopedObjectAccess soa(self);
-    if (!Runtime::Current()->GetRuntimeCallbacks()->IsMethodSafeToJit(method)) {
+    if (method->IsNative()) {
+      native = true;
+    } else if (!Runtime::Current()->GetRuntimeCallbacks()->IsMethodSafeToJit(method)) {
       std::string msg(method->PrettyMethod());
       msg += ": is not safe to jit!";
       ThrowIllegalStateException(msg.c_str());
@@ -267,17 +270,24 @@
   // Note: this will apply to all JIT compilations.
   code_cache->SetGarbageCollectCode(false);
   while (true) {
-    if (code_cache->ContainsMethod(method)) {
+    if (native && code_cache->ContainsMethod(method)) {
       break;
     } else {
       // Sleep to yield to the compiler thread.
       usleep(1000);
       ScopedObjectAccess soa(self);
+      if (!native && jit->GetCodeCache()->CanAllocateProfilingInfo()) {
+        // Make sure there is a profiling info, required by the compiler.
+        ProfilingInfo::Create(self, method, /* retry_allocation */ true);
+      }
       // Will either ensure it's compiled or do the compilation itself. We do
       // this before checking if we will execute JIT code to make sure the
       // method is compiled 'optimized' and not baseline (tests expect optimized
       // compilation).
       jit->CompileMethod(method, self, CompilationKind::kOptimized, /*prejit=*/ false);
+      if (code_cache->WillExecuteJitCode(method)) {
+        break;
+      }
     }
   }
 }