Reland "Cleanups around the creation of ProfilingInfo."

This reverts commit a996425197a7946eae02d218f70610a853f2fe9a.

Bug: 112676029

Reason for revert: Fixed JitCodeCache::InvalidateAllCompiledCode and
                   ForceJitCompiled.

Change-Id: Ia87fda1bb40c504d9294e447f899ac1797ae98fc
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 4921a99..cdd69e0 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -331,8 +331,7 @@
   // If we get a request to compile a proxy method, we pass the actual Java method
   // of that proxy method, as the compiler does not expect a proxy method.
   ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-  if (!code_cache_->NotifyCompilationOf(
-          method_to_compile, self, compilation_kind, prejit, region)) {
+  if (!code_cache_->NotifyCompilationOf(method_to_compile, self, compilation_kind, prejit)) {
     return false;
   }
 
@@ -758,7 +757,6 @@
 class JitCompileTask final : public Task {
  public:
   enum class TaskKind {
-    kAllocateProfile,
     kCompile,
     kPreCompile,
   };
@@ -797,12 +795,6 @@
               /* prejit= */ (kind_ == TaskKind::kPreCompile));
           break;
         }
-        case TaskKind::kAllocateProfile: {
-          if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
-            VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
-          }
-          break;
-        }
       }
     }
     ProfileSaver::NotifyJitActivity();
@@ -1595,10 +1587,6 @@
   if (UNLIKELY(runtime->UseJitCompilation() && JitAtFirstUse())) {
     ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
     if (np_method->IsCompilable()) {
-      if (!np_method->IsNative() && GetCodeCache()->CanAllocateProfilingInfo()) {
-        // The compiler requires a ProfilingInfo object for non-native methods.
-        ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
-      }
       // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
       // conflicts with jitzygote optimizations.
       JitCompileTask compile_task(
@@ -1813,7 +1801,6 @@
     return;
   }
   if (GetCodeCache()->CanAllocateProfilingInfo()) {
-    ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
     thread_pool_->AddTask(
         self,
         new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kBaseline));
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 4b9538d..fbc843e 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -286,15 +286,6 @@
   return PrivateRegionContainsPc(ptr) || shared_region_.IsInExecSpace(ptr);
 }
 
-bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
-  ScopedObjectAccess soa(art::Thread::Current());
-  ScopedAssertNoThreadSuspension sants(__FUNCTION__);
-  if (ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
-    return true;
-  }
-  return false;
-}
-
 bool JitCodeCache::ContainsMethod(ArtMethod* method) {
   MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   if (UNLIKELY(method->IsNative())) {
@@ -1469,30 +1460,18 @@
 
 ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
                                               ArtMethod* method,
-                                              const std::vector<uint32_t>& entries,
-                                              bool retry_allocation)
-    // No thread safety analysis as we are using TryLock/Unlock explicitly.
-    NO_THREAD_SAFETY_ANALYSIS {
+                                              const std::vector<uint32_t>& entries) {
   DCHECK(CanAllocateProfilingInfo());
   ProfilingInfo* info = nullptr;
-  if (!retry_allocation) {
-    // If we are allocating for the interpreter, just try to lock, to avoid
-    // lock contention with the JIT.
-    if (Locks::jit_lock_->ExclusiveTryLock(self)) {
-      info = AddProfilingInfoInternal(self, method, entries);
-      Locks::jit_lock_->ExclusiveUnlock(self);
-    }
-  } else {
-    {
-      MutexLock mu(self, *Locks::jit_lock_);
-      info = AddProfilingInfoInternal(self, method, entries);
-    }
+  {
+    MutexLock mu(self, *Locks::jit_lock_);
+    info = AddProfilingInfoInternal(self, method, entries);
+  }
 
-    if (info == nullptr) {
-      GarbageCollectCache(self);
-      MutexLock mu(self, *Locks::jit_lock_);
-      info = AddProfilingInfoInternal(self, method, entries);
-    }
+  if (info == nullptr) {
+    GarbageCollectCache(self);
+    MutexLock mu(self, *Locks::jit_lock_);
+    info = AddProfilingInfoInternal(self, method, entries);
   }
   return info;
 }
@@ -1625,8 +1604,7 @@
 bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
                                        Thread* self,
                                        CompilationKind compilation_kind,
-                                       bool prejit,
-                                       JitMemoryRegion* region) {
+                                       bool prejit) {
   const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
   if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) {
     OatQuickMethodHeader* method_header =
@@ -1700,21 +1678,11 @@
     }
     return new_compilation;
   } else {
-    ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
     if (CanAllocateProfilingInfo() &&
         (compilation_kind == CompilationKind::kBaseline) &&
-        (info == nullptr)) {
-      // We can retry allocation here as we're the JIT thread.
-      if (ProfilingInfo::Create(self, method, /* retry_allocation= */ true)) {
-        info = method->GetProfilingInfo(kRuntimePointerSize);
-      }
-    }
-    if (info == nullptr) {
-      // When prejitting, we don't allocate a profiling info.
-      if (!prejit && !IsSharedRegion(*region)) {
-        VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
-        // Because the counter is not atomic, there are some rare cases where we may not hit the
-        // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
+        (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
+      if (ProfilingInfo::Create(self, method) == nullptr) {
+        VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled baseline";
         ClearMethodCounter(method, /*was_warm=*/ false);
         return false;
       }
@@ -1768,23 +1736,20 @@
 
 void JitCodeCache::InvalidateAllCompiledCode() {
   art::MutexLock mu(Thread::Current(), *Locks::jit_lock_);
-  size_t cnt = profiling_infos_.size();
-  size_t osr_size = osr_code_map_.size();
-  for (ProfilingInfo* pi : profiling_infos_) {
-    // NB Due to OSR we might run this on some methods multiple times but this should be fine.
-    ArtMethod* meth = pi->GetMethod();
-    // We had a ProfilingInfo so we must be warm.
+  VLOG(jit) << "Invalidating all compiled code";
+  ClassLinker* linker = Runtime::Current()->GetClassLinker();
+  for (auto it : method_code_map_) {
+    ArtMethod* meth = it.second;
+    // We were compiled, so we must be warm.
     ClearMethodCounter(meth, /*was_warm=*/true);
-    ClassLinker* linker = Runtime::Current()->GetClassLinker();
     if (meth->IsObsolete()) {
       linker->SetEntryPointsForObsoleteMethod(meth);
     } else {
       linker->SetEntryPointsToInterpreter(meth);
     }
   }
+  saved_compiled_methods_map_.clear();
   osr_code_map_.clear();
-  VLOG(jit) << "Invalidated the compiled code of " << (cnt - osr_size) << " methods and "
-            << osr_size << " OSRs.";
 }
 
 void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index e8ab117..7c828ae 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -197,8 +197,7 @@
   bool NotifyCompilationOf(ArtMethod* method,
                            Thread* self,
                            CompilationKind compilation_kind,
-                           bool prejit,
-                           JitMemoryRegion* region)
+                           bool prejit)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jit_lock_);
 
@@ -228,10 +227,6 @@
   // Return true if the code cache contains this pc in the private region (i.e. not from zygote).
   bool PrivateRegionContainsPc(const void* pc) const;
 
-  // Returns true if either the method's entrypoint is JIT compiled code or it is the
-  // instrumentation entrypoint and we can jump to jit code for this method. For testing use only.
-  bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
-
   // Return true if the code cache contains this method.
   bool ContainsMethod(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
 
@@ -314,12 +309,10 @@
       REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
-  // will collect and retry if the first allocation is unsuccessful.
+  // Create a 'ProfileInfo' for 'method'.
   ProfilingInfo* AddProfilingInfo(Thread* self,
                                   ArtMethod* method,
-                                  const std::vector<uint32_t>& entries,
-                                  bool retry_allocation)
+                                  const std::vector<uint32_t>& entries)
       REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index a61a30d..93951ee 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -36,7 +36,7 @@
   }
 }
 
-bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocation) {
+ProfilingInfo* ProfilingInfo::Create(Thread* self, ArtMethod* method) {
   // Walk over the dex instructions of the method and keep track of
   // instructions we are interested in profiling.
   DCHECK(!method->IsNative());
@@ -63,7 +63,7 @@
 
   // Allocate the `ProfilingInfo` object int the JIT's data space.
   jit::JitCodeCache* code_cache = Runtime::Current()->GetJit()->GetCodeCache();
-  return code_cache->AddProfilingInfo(self, method, entries, retry_allocation) != nullptr;
+  return code_cache->AddProfilingInfo(self, method, entries);
 }
 
 InlineCache* ProfilingInfo::GetInlineCache(uint32_t dex_pc) {
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index cbe2445..d136b4c 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -63,9 +63,8 @@
  */
 class ProfilingInfo {
  public:
-  // Create a ProfilingInfo for 'method'. Return whether it succeeded, or if it is
-  // not needed in case the method does not have virtual/interface invocations.
-  static bool Create(Thread* self, ArtMethod* method, bool retry_allocation)
+  // Create a ProfilingInfo for 'method'.
+  static ProfilingInfo* Create(Thread* self, ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Add information from an executed INVOKE instruction to the profile.
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index b7365dd..d1caf3f 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -109,7 +109,7 @@
       method_name,
       [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
         ArtMethod* m = stack_visitor->GetMethod();
-        ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
+        ProfilingInfo::Create(Thread::Current(), m);
       });
 }
 
diff --git a/test/595-profile-saving/profile-saving.cc b/test/595-profile-saving/profile-saving.cc
index d6ca447..95aa7e9 100644
--- a/test/595-profile-saving/profile-saving.cc
+++ b/test/595-profile-saving/profile-saving.cc
@@ -39,7 +39,7 @@
   ScopedObjectAccess soa(env);
   ObjPtr<mirror::Executable> exec = soa.Decode<mirror::Executable>(method);
   ArtMethod* art_method = exec->GetArtMethod();
-  if (!ProfilingInfo::Create(soa.Self(), art_method, /* retry_allocation */ true)) {
+  if (ProfilingInfo::Create(soa.Self(), art_method) == nullptr) {
     LOG(ERROR) << "Failed to create profiling info for method " << art_method->PrettyMethod();
   }
 }
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 0e47782..5d68b2c 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -230,12 +230,9 @@
 }
 
 static void ForceJitCompiled(Thread* self, ArtMethod* method) REQUIRES(!Locks::mutator_lock_) {
-  bool native = false;
   {
     ScopedObjectAccess soa(self);
-    if (method->IsNative()) {
-      native = true;
-    } else if (!Runtime::Current()->GetRuntimeCallbacks()->IsMethodSafeToJit(method)) {
+    if (!Runtime::Current()->GetRuntimeCallbacks()->IsMethodSafeToJit(method)) {
       std::string msg(method->PrettyMethod());
       msg += ": is not safe to jit!";
       ThrowIllegalStateException(msg.c_str());
@@ -269,26 +266,15 @@
   // Update the code cache to make sure the JIT code does not get deleted.
   // Note: this will apply to all JIT compilations.
   code_cache->SetGarbageCollectCode(false);
-  while (true) {
-    if (native && code_cache->ContainsMethod(method)) {
-      break;
-    } else {
-      // Sleep to yield to the compiler thread.
-      usleep(1000);
-      ScopedObjectAccess soa(self);
-      if (!native && jit->GetCodeCache()->CanAllocateProfilingInfo()) {
-        // Make sure there is a profiling info, required by the compiler.
-        ProfilingInfo::Create(self, method, /* retry_allocation */ true);
-      }
-      // Will either ensure it's compiled or do the compilation itself. We do
-      // this before checking if we will execute JIT code to make sure the
-      // method is compiled 'optimized' and not baseline (tests expect optimized
-      // compilation).
-      jit->CompileMethod(method, self, CompilationKind::kOptimized, /*prejit=*/ false);
-      if (code_cache->WillExecuteJitCode(method)) {
-        break;
-      }
-    }
+  while (!code_cache->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+    // Sleep to yield to the compiler thread.
+    usleep(1000);
+    ScopedObjectAccess soa(self);
+    // Will either ensure it's compiled or do the compilation itself. We do
+    // this before checking if we will execute JIT code to make sure the
+    // method is compiled 'optimized' and not baseline (tests expect optimized
+    // compilation).
+    jit->CompileMethod(method, self, CompilationKind::kOptimized, /*prejit=*/ false);
   }
 }