Pause GC during deoptimization work

When we are deoptimizing code we should stop the gc to prevent
deadlocks. This change also reorganizes gc pause code a bit to make
this simpler to implement.

Test: ./test.py --host
Bug: 79175795
Change-Id: I00000ab608231a7fd1627d69d58216b9115a47ca
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index 2f24d7e..d20c756 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -40,6 +40,8 @@
 #include "dex/dex_file_annotations.h"
 #include "dex/modifiers.h"
 #include "events-inl.h"
+#include "gc/heap.h"
+#include "gc/scoped_gc_critical_section.h"
 #include "jit/jit.h"
 #include "jni/jni_internal.h"
 #include "mirror/class-inl.h"
@@ -266,29 +268,35 @@
   deoptimization_status_lock_.ExclusiveUnlock(self);
 }
 
+// Users should make sure that only gc-critical-section safe code is used while a
+// ScopedDeoptimizationContext exists.
 class ScopedDeoptimizationContext : public art::ValueObject {
  public:
   ScopedDeoptimizationContext(art::Thread* self, DeoptManager* deopt)
       RELEASE(deopt->deoptimization_status_lock_)
       ACQUIRE(art::Locks::mutator_lock_)
       ACQUIRE(art::Roles::uninterruptible_)
-      : self_(self), deopt_(deopt), uninterruptible_cause_(nullptr) {
+      : self_(self),
+        deopt_(deopt),
+        critical_section_(self_, "JVMTI Deoptimizing methods"),
+        uninterruptible_cause_(nullptr) {
     deopt_->WaitForDeoptimizationToFinishLocked(self_);
     DCHECK(!deopt->performing_deoptimization_)
         << "Already performing deoptimization on another thread!";
     // Use performing_deoptimization_ to keep track of the lock.
     deopt_->performing_deoptimization_ = true;
     deopt_->deoptimization_status_lock_.Unlock(self_);
+    uninterruptible_cause_ = critical_section_.Enter(art::gc::kGcCauseInstrumentation,
+                                                     art::gc::kCollectorTypeCriticalSection);
     art::Runtime::Current()->GetThreadList()->SuspendAll("JMVTI Deoptimizing methods",
                                                          /*long_suspend*/ false);
-    uninterruptible_cause_ = self_->StartAssertNoThreadSuspension("JVMTI deoptimizing methods");
   }
 
   ~ScopedDeoptimizationContext()
       RELEASE(art::Locks::mutator_lock_)
       RELEASE(art::Roles::uninterruptible_) {
     // Can be suspended again.
-    self_->EndAssertNoThreadSuspension(uninterruptible_cause_);
+    critical_section_.Exit(uninterruptible_cause_);
     // Release the mutator lock.
     art::Runtime::Current()->GetThreadList()->ResumeAll();
     // Let other threads know it's fine to proceed.
@@ -300,6 +308,7 @@
  private:
   art::Thread* self_;
   DeoptManager* deopt_;
+  art::gc::GCCriticalSection critical_section_;
   const char* uninterruptible_cause_;
 };
 
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d014372..5c34c56 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -1425,6 +1425,7 @@
   friend class collector::ConcurrentCopying;
   friend class collector::MarkSweep;
   friend class collector::SemiSpace;
+  friend class GCCriticalSection;
   friend class ReferenceQueue;
   friend class ScopedGCCriticalSection;
   friend class VerifyReferenceCardVisitor;
diff --git a/runtime/gc/scoped_gc_critical_section.cc b/runtime/gc/scoped_gc_critical_section.cc
index 2976dd0..7a0a6e8 100644
--- a/runtime/gc/scoped_gc_critical_section.cc
+++ b/runtime/gc/scoped_gc_critical_section.cc
@@ -24,20 +24,38 @@
 namespace art {
 namespace gc {
 
+const char* GCCriticalSection::Enter(GcCause cause, CollectorType type) {
+  Runtime::Current()->GetHeap()->StartGC(self_, cause, type);
+  if (self_ != nullptr) {
+    return self_->StartAssertNoThreadSuspension(section_name_);
+  } else {
+    // Workaround to avoid having to mark the whole function as NO_THREAD_SAFETY_ANALYSIS.
+    auto kludge = []() ACQUIRE(Roles::uninterruptible_) NO_THREAD_SAFETY_ANALYSIS {};
+    kludge();
+    return nullptr;
+  }
+}
+
+void GCCriticalSection::Exit(const char* old_cause) {
+  if (self_ != nullptr) {
+    self_->EndAssertNoThreadSuspension(old_cause);
+  } else {
+    // Workaround to avoid having to mark the whole function as NO_THREAD_SAFETY_ANALYSIS.
+    auto kludge = []() RELEASE(Roles::uninterruptible_) NO_THREAD_SAFETY_ANALYSIS {};
+    kludge();
+  }
+  Runtime::Current()->GetHeap()->FinishGC(self_, collector::kGcTypeNone);
+}
+
 ScopedGCCriticalSection::ScopedGCCriticalSection(Thread* self,
                                                  GcCause cause,
                                                  CollectorType collector_type)
-    : self_(self) {
-  Runtime::Current()->GetHeap()->StartGC(self, cause, collector_type);
-  if (self != nullptr) {
-    old_cause_ = self->StartAssertNoThreadSuspension("ScopedGCCriticalSection");
-  }
+    : critical_section_(self, "ScopedGCCriticalSection") {
+  old_no_suspend_reason_ = critical_section_.Enter(cause, collector_type);
 }
+
 ScopedGCCriticalSection::~ScopedGCCriticalSection() {
-  if (self_ != nullptr) {
-    self_->EndAssertNoThreadSuspension(old_cause_);
-  }
-  Runtime::Current()->GetHeap()->FinishGC(self_, collector::kGcTypeNone);
+  critical_section_.Exit(old_no_suspend_reason_);
 }
 
 }  // namespace gc
diff --git a/runtime/gc/scoped_gc_critical_section.h b/runtime/gc/scoped_gc_critical_section.h
index 1271ff7..864bf87 100644
--- a/runtime/gc/scoped_gc_critical_section.h
+++ b/runtime/gc/scoped_gc_critical_section.h
@@ -27,6 +27,24 @@
 
 namespace gc {
 
+// The use of ScopedGCCriticalSection should be preferred whenever possible.
+class GCCriticalSection {
+ public:
+  GCCriticalSection(Thread* self, const char* name)
+      : self_(self), section_name_(name) {}
+  ~GCCriticalSection() {}
+
+  // Starts a GCCriticalSection. Returns the previous no-suspension reason.
+  const char* Enter(GcCause cause, CollectorType type) ACQUIRE(Roles::uninterruptible_);
+
+  // Ends a GCCriticalSection. Takes the old no-suspension reason.
+  void Exit(const char* old_reason) RELEASE(Roles::uninterruptible_);
+
+ private:
+  Thread* const self_;
+  const char* section_name_;
+};
+
 // Wait until the GC is finished and then prevent the GC from starting until the destructor. Used
 // to prevent deadlocks in places where we call ClassLinker::VisitClass with all the threads
 // suspended.
@@ -37,8 +55,8 @@
   ~ScopedGCCriticalSection() RELEASE(Roles::uninterruptible_);
 
  private:
-  Thread* const self_;
-  const char* old_cause_;
+  GCCriticalSection critical_section_;
+  const char* old_no_suspend_reason_;
 };
 
 }  // namespace gc