Revert "Remove the extra compilation sets in the JIT."

This reverts commit e39ccf2ea19ca241b7f1dbd9761db696f2055e29.

Reason for revert: Issues during class unloading

Change-Id: I6621ea1c4aa3d01bc0aae7a22b0b8105d61821a3
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 55c8912..6916dec 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2405,10 +2405,6 @@
   // enabling tracing requires the mutator lock, there are no race conditions here.
   const bool tracing_enabled = Trace::IsTracingEnabled();
   Thread* const self = Thread::Current();
-  // For simplicity, if there is JIT activity, we'll trace all class loaders.
-  // This prevents class unloading while a method is being compiled or is going
-  // to be compiled.
-  const bool is_jit_active = jit::Jit::IsActive(self);
   WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
   if (gUseReadBarrier) {
     // We do not track new roots for CC.
@@ -2439,8 +2435,8 @@
     // these objects.
     UnbufferedRootVisitor root_visitor(visitor, RootInfo(kRootStickyClass));
     boot_class_table_->VisitRoots(root_visitor);
-    // If tracing is enabled or jit is active, mark all the class loaders to prevent unloading.
-    if ((flags & kVisitRootFlagClassLoader) != 0 || tracing_enabled || is_jit_active) {
+    // If tracing is enabled, then mark all the class loaders to prevent unloading.
+    if ((flags & kVisitRootFlagClassLoader) != 0 || tracing_enabled) {
       for (const ClassLoaderData& data : class_loaders_) {
         GcRoot<mirror::Object> root(GcRoot<mirror::Object>(self->DecodeJObject(data.weak_root)));
         root.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index c203298..f67ffdd 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -229,6 +229,10 @@
                                 Thread* self,
                                 CompilationKind compilation_kind,
                                 bool prejit) {
+  if (kIsDebugBuild) {
+    MutexLock mu(self, *Locks::jit_lock_);
+    CHECK(GetCodeCache()->IsMethodBeingCompiled(method, compilation_kind));
+  }
   DCHECK(Runtime::Current()->UseJitCompilation());
   DCHECK(!method->IsRuntimeMethod());
 
@@ -724,6 +728,51 @@
   child_mapping_methods.Reset();
 }
 
+class ScopedCompilation {
+ public:
+  ScopedCompilation(ScopedCompilation&& other) noexcept :
+      jit_(other.jit_),
+      method_(other.method_),
+      compilation_kind_(other.compilation_kind_),
+      owns_compilation_(other.owns_compilation_) {
+    other.owns_compilation_ = false;
+  }
+
+  ScopedCompilation(Jit* jit, ArtMethod* method, CompilationKind compilation_kind)
+      : jit_(jit),
+        method_(method),
+        compilation_kind_(compilation_kind),
+        owns_compilation_(true) {
+    MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+    // We don't want to enqueue any new tasks when thread pool has stopped. This simplifies
+    // the implementation of redefinition feature in jvmti.
+    if (jit_->GetThreadPool() == nullptr ||
+        !jit_->GetThreadPool()->HasStarted(Thread::Current()) ||
+        jit_->GetCodeCache()->IsMethodBeingCompiled(method_, compilation_kind_)) {
+      owns_compilation_ = false;
+      return;
+    }
+    jit_->GetCodeCache()->AddMethodBeingCompiled(method_, compilation_kind_);
+  }
+
+  bool OwnsCompilation() const {
+    return owns_compilation_;
+  }
+
+  ~ScopedCompilation() {
+    if (owns_compilation_) {
+      MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+      jit_->GetCodeCache()->RemoveMethodBeingCompiled(method_, compilation_kind_);
+    }
+  }
+
+ private:
+  Jit* const jit_;
+  ArtMethod* const method_;
+  const CompilationKind compilation_kind_;
+  bool owns_compilation_;
+};
+
 class JitCompileTask final : public Task {
  public:
   enum class TaskKind {
@@ -733,10 +782,16 @@
 
   JitCompileTask(ArtMethod* method,
                  TaskKind task_kind,
-                 CompilationKind compilation_kind)
+                 CompilationKind compilation_kind,
+                 ScopedCompilation&& sc)
       : method_(method),
         kind_(task_kind),
-        compilation_kind_(compilation_kind) {}
+        compilation_kind_(compilation_kind),
+        scoped_compilation_(std::move(sc)) {
+    DCHECK(scoped_compilation_.OwnsCompilation());
+    // NOLINTNEXTLINE - OwnsCompilation is still valid after move constructor
+    DCHECK(!sc.OwnsCompilation());
+  }
 
   void Run(Thread* self) override {
     {
@@ -764,6 +819,7 @@
   ArtMethod* const method_;
   const TaskKind kind_;
   const CompilationKind compilation_kind_;
+  ScopedCompilation scoped_compilation_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
 };
@@ -1258,12 +1314,15 @@
                          ArtMethod* method,
                          CompilationKind compilation_kind,
                          bool precompile) {
+  ScopedCompilation sc(this, method, compilation_kind);
+  if (!sc.OwnsCompilation()) {
+    return;
+  }
   JitCompileTask::TaskKind task_kind = precompile
       ? JitCompileTask::TaskKind::kPreCompile
       : JitCompileTask::TaskKind::kCompile;
-  if (thread_pool_ != nullptr &&  thread_pool_->HasStarted(self)) {
-    thread_pool_->AddTask(self, new JitCompileTask(method, task_kind, compilation_kind));
-  }
+  thread_pool_->AddTask(
+      self, new JitCompileTask(method, task_kind, compilation_kind, std::move(sc)));
 }
 
 bool Jit::CompileMethodFromProfile(Thread* self,
@@ -1298,11 +1357,15 @@
     VLOG(jit) << "JIT Zygote processing method " << ArtMethod::PrettyMethod(method)
               << " from profile";
     method->SetPreCompiled();
+    ScopedCompilation sc(this, method, compilation_kind);
+    if (!sc.OwnsCompilation()) {
+      return false;
+    }
     if (!add_to_queue) {
       CompileMethodInternal(method, self, compilation_kind, /* prejit= */ true);
     } else {
       Task* task = new JitCompileTask(
-          method, JitCompileTask::TaskKind::kPreCompile, compilation_kind);
+          method, JitCompileTask::TaskKind::kPreCompile, compilation_kind, std::move(sc));
       if (compile_after_boot) {
         AddPostBootTask(self, task);
       } else {
@@ -1753,6 +1816,11 @@
                         Thread* self,
                         CompilationKind compilation_kind,
                         bool prejit) {
+  ScopedCompilation sc(this, method, compilation_kind);
+  // TODO: all current users of this method expect us to wait if it is being compiled.
+  if (!sc.OwnsCompilation()) {
+    return false;
+  }
   // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
   ScopedSetRuntimeThread ssrt(self);
   // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
@@ -1760,13 +1828,5 @@
   return CompileMethodInternal(method, self, compilation_kind, prejit);
 }
 
-bool Jit::IsActive(Thread* self) {
-  Jit* jit = Runtime::Current()->GetJit();
-  if (jit == nullptr || jit->GetThreadPool() == nullptr) {
-    return false;
-  }
-  return jit->GetThreadPool()->IsActive(self);
-}
-
 }  // namespace jit
 }  // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 28bd99d..d5fcd35 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -242,9 +242,6 @@
   // Create JIT itself.
   static std::unique_ptr<Jit> Create(JitCodeCache* code_cache, JitOptions* options);
 
-  // Return whether any JIT thread is busy compiling, or the task queue is not empty.
-  static bool IsActive(Thread* self);
-
   bool CompileMethod(ArtMethod* method, Thread* self, CompilationKind compilation_kind, bool prejit)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 6a7820b..4f59930 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1197,9 +1197,62 @@
   garbage_collect_code_ = value;
 }
 
+void JitCodeCache::RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
+  ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
+  DCHECK(IsMethodBeingCompiled(method, kind));
+  switch (kind) {
+    case CompilationKind::kOsr:
+      current_osr_compilations_.erase(method);
+      break;
+    case CompilationKind::kBaseline:
+      current_baseline_compilations_.erase(method);
+      break;
+    case CompilationKind::kOptimized:
+      current_optimized_compilations_.erase(method);
+      break;
+  }
+}
+
+void JitCodeCache::AddMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
+  ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
+  DCHECK(!IsMethodBeingCompiled(method, kind));
+  switch (kind) {
+    case CompilationKind::kOsr:
+      current_osr_compilations_.insert(method);
+      break;
+    case CompilationKind::kBaseline:
+      current_baseline_compilations_.insert(method);
+      break;
+    case CompilationKind::kOptimized:
+      current_optimized_compilations_.insert(method);
+      break;
+  }
+}
+
+bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
+  ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
+  switch (kind) {
+    case CompilationKind::kOsr:
+      return ContainsElement(current_osr_compilations_, method);
+    case CompilationKind::kBaseline:
+      return ContainsElement(current_baseline_compilations_, method);
+    case CompilationKind::kOptimized:
+      return ContainsElement(current_optimized_compilations_, method);
+  }
+}
+
+bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method) {
+  ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
+  return ContainsElement(current_optimized_compilations_, method) ||
+      ContainsElement(current_osr_compilations_, method) ||
+      ContainsElement(current_baseline_compilations_, method);
+}
+
 ProfilingInfo* JitCodeCache::GetProfilingInfo(ArtMethod* method, Thread* self) {
   ScopedDebugDisallowReadBarriers sddrb(self);
   MutexLock mu(self, *Locks::jit_lock_);
+  DCHECK(IsMethodBeingCompiled(method))
+      << "GetProfilingInfo should only be called when the method is being compiled";
   auto it = profiling_infos_.find(method);
   if (it == profiling_infos_.end()) {
     return nullptr;
@@ -1519,10 +1572,35 @@
   return osr_code_map_.find(method) != osr_code_map_.end();
 }
 
+void JitCodeCache::VisitRoots(RootVisitor* visitor) {
+  if (Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {
+    // In case of userfaultfd compaction, ArtMethods are updated concurrently
+    // via linear-alloc.
+    return;
+  }
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+  UnbufferedRootVisitor root_visitor(visitor, RootInfo(kRootStickyClass));
+  for (ArtMethod* method : current_optimized_compilations_) {
+    method->VisitRoots(root_visitor, kRuntimePointerSize);
+  }
+  for (ArtMethod* method : current_baseline_compilations_) {
+    method->VisitRoots(root_visitor, kRuntimePointerSize);
+  }
+  for (ArtMethod* method : current_osr_compilations_) {
+    method->VisitRoots(root_visitor, kRuntimePointerSize);
+  }
+}
+
 bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
                                        Thread* self,
                                        CompilationKind compilation_kind,
                                        bool prejit) {
+  if (kIsDebugBuild) {
+    MutexLock mu(self, *Locks::jit_lock_);
+    // Note: the compilation kind may have been adjusted after what was passed initially.
+    // We really just want to check that the method is indeed being compiled.
+    CHECK(IsMethodBeingCompiled(method));
+  }
   const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
   if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) {
     OatQuickMethodHeader* method_header =
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index eb60594..82510fb 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -422,6 +422,20 @@
                               Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void VisitRoots(RootVisitor* visitor);
+
+  // Return whether `method` is being compiled with the given mode.
+  bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+      REQUIRES(Locks::jit_lock_);
+
+  // Remove `method` from the list of methods meing compiled with the given mode.
+  void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+      REQUIRES(Locks::jit_lock_);
+
+  // Record that `method` is being compiled with the given mode.
+  void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+      REQUIRES(Locks::jit_lock_);
+
  private:
   JitCodeCache();
 
@@ -507,6 +521,9 @@
       REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Return whether `method` is being compiled in any mode.
+  bool IsMethodBeingCompiled(ArtMethod* method) REQUIRES(Locks::jit_lock_);
+
   class JniStubKey;
   class JniStubData;
 
@@ -552,6 +569,11 @@
   // ProfilingInfo objects we have allocated.
   SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
 
+  // Methods we are currently compiling, one set for each kind of compilation.
+  std::set<ArtMethod*> current_optimized_compilations_ GUARDED_BY(Locks::jit_lock_);
+  std::set<ArtMethod*> current_osr_compilations_ GUARDED_BY(Locks::jit_lock_);
+  std::set<ArtMethod*> current_baseline_compilations_ GUARDED_BY(Locks::jit_lock_);
+
   // Methods that the zygote has compiled and can be shared across processes
   // forked from the zygote.
   ZygoteMap zygote_map_;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a1d2607..f79351c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -2614,6 +2614,9 @@
   }
   jni_id_manager_->VisitRoots(visitor);
   heap_->VisitAllocationRecords(visitor);
+  if (jit_ != nullptr) {
+    jit_->GetCodeCache()->VisitRoots(visitor);
+  }
   if ((flags & kVisitRootFlagNewRoots) == 0) {
     // Guaranteed to have no new roots in the constant roots.
     VisitConstantRoots(visitor);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index fdfed74..de9ccba 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -304,11 +304,6 @@
   return nullptr;
 }
 
-bool ThreadPool::IsActive(Thread* self) {
-  MutexLock mu(self, task_queue_lock_);
-  return waiting_count_ != GetThreadCount() || HasOutstandingTasks();
-}
-
 void ThreadPool::Wait(Thread* self, bool do_work, bool may_hold_locks) {
   if (do_work) {
     CHECK(!create_peers_);
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 7a3acec..5c75733 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -133,9 +133,6 @@
   // Remove all tasks in the queue.
   void RemoveAllTasks(Thread* self) REQUIRES(!task_queue_lock_);
 
-  // Return whether any thread in the pool is busy, or the task queue is not empty.
-  bool IsActive(Thread* self) REQUIRES(!task_queue_lock_);
-
   // Create a named thread pool with the given number of threads.
   //
   // If create_peers is true, all worker threads will have a Java peer object. Note that if the