Make the mark stack expandable for the CC collector.

Bug: 12687968
Change-Id: I5d05df5524f54c6adb964901e5a963eb042cb2e1
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index d2d12af..e433b8d 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -36,13 +36,16 @@
 namespace gc {
 namespace collector {
 
+static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
+
 ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
     : GarbageCollector(heap,
                        name_prefix + (name_prefix.empty() ? "" : " ") +
                        "concurrent copying + mark sweep"),
       region_space_(nullptr), gc_barrier_(new Barrier(0)),
       gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
-                                                     2 * MB, 2 * MB)),
+                                                     kDefaultGcMarkStackSize,
+                                                     kDefaultGcMarkStackSize)),
       mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
       thread_running_gc_(nullptr),
       is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
@@ -577,6 +580,18 @@
   Locks::mutator_lock_->SharedLock(self);
 }
 
+void ConcurrentCopying::ExpandGcMarkStack() {
+  DCHECK(gc_mark_stack_->IsFull());
+  const size_t new_size = gc_mark_stack_->Capacity() * 2;
+  std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
+                                                   gc_mark_stack_->End());
+  gc_mark_stack_->Resize(new_size);
+  for (auto& ref : temp) {
+    gc_mark_stack_->PushBack(ref.AsMirrorPtr());
+  }
+  DCHECK(!gc_mark_stack_->IsFull());
+}
+
 void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
   CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
       << " " << to_ref << " " << PrettyTypeOf(to_ref);
@@ -587,7 +602,9 @@
     if (self == thread_running_gc_) {
       // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
       CHECK(self->GetThreadLocalMarkStack() == nullptr);
-      CHECK(!gc_mark_stack_->IsFull());
+      if (UNLIKELY(gc_mark_stack_->IsFull())) {
+        ExpandGcMarkStack();
+      }
       gc_mark_stack_->PushBack(to_ref);
     } else {
       // Otherwise, use a thread-local mark stack.
@@ -621,7 +638,9 @@
   } else if (mark_stack_mode == kMarkStackModeShared) {
     // Access the shared GC mark stack with a lock.
     MutexLock mu(self, mark_stack_lock_);
-    CHECK(!gc_mark_stack_->IsFull());
+    if (UNLIKELY(gc_mark_stack_->IsFull())) {
+      ExpandGcMarkStack();
+    }
     gc_mark_stack_->PushBack(to_ref);
   } else {
     CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
@@ -633,7 +652,9 @@
         << "Only GC-running thread should access the mark stack "
         << "in the GC exclusive mark stack mode";
     // Access the GC mark stack without a lock.
-    CHECK(!gc_mark_stack_->IsFull());
+    if (UNLIKELY(gc_mark_stack_->IsFull())) {
+      ExpandGcMarkStack();
+    }
     gc_mark_stack_->PushBack(to_ref);
   }
 }
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 8efad73..c32b19e 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -182,6 +182,7 @@
   void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
   void DisableMarking() SHARED_REQUIRES(Locks::mutator_lock_);
   void IssueDisableMarkingCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
+  void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
 
   space::RegionSpace* region_space_;      // The underlying region space.
   std::unique_ptr<Barrier> gc_barrier_;