More of the concurrent copying collector.

Bug: 12687968
Change-Id: I62f70274d47df6d6cab714df95c518b750ce3105
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 6ba30c6..ba06e05 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -49,6 +49,7 @@
 #include "gc/space/dlmalloc_space-inl.h"
 #include "gc/space/image_space.h"
 #include "gc/space/large_object_space.h"
+#include "gc/space/region_space.h"
 #include "gc/space/rosalloc_space-inl.h"
 #include "gc/space/space-inl.h"
 #include "gc/space/zygote_space.h"
@@ -176,6 +177,7 @@
       current_non_moving_allocator_(kAllocatorTypeNonMoving),
       bump_pointer_space_(nullptr),
       temp_space_(nullptr),
+      region_space_(nullptr),
       min_free_(min_free),
       max_free_(max_free),
       target_utilization_(target_utilization),
@@ -211,6 +213,12 @@
   mark_bitmap_.reset(new accounting::HeapBitmap(this));
   // Requested begin for the alloc space, to follow the mapped image and oat files
   uint8_t* requested_alloc_space_begin = nullptr;
+  if (foreground_collector_type_ == kCollectorTypeCC) {
+    // Need to use a low address so that we can allocate a contiguous
+    // 2 * Xmx space when there's no image (dex2oat for target).
+    CHECK_GE(300 * MB, non_moving_space_capacity);
+    requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
+  }
   if (!image_file_name.empty()) {
     std::string error_msg;
     space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str(),
@@ -241,8 +249,9 @@
                                      +-main alloc space2 / bump space 2 (capacity_)+-
                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
   */
-  // We don't have hspace compaction enabled with GSS.
-  if (foreground_collector_type_ == kCollectorTypeGSS) {
+  // We don't have hspace compaction enabled with GSS or CC.
+  if (foreground_collector_type_ == kCollectorTypeGSS ||
+      foreground_collector_type_ == kCollectorTypeCC) {
     use_homogeneous_space_compaction_for_oom_ = false;
   }
   bool support_homogeneous_space_compaction =
@@ -280,10 +289,12 @@
     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
     request_begin = reinterpret_cast<uint8_t*>(300 * MB);
   }
-  // Attempt to create 2 mem maps at or after the requested begin.
-  main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
-                                                    &error_str));
-  CHECK(main_mem_map_1.get() != nullptr) << error_str;
+  if (foreground_collector_type_ != kCollectorTypeCC) {
+    // Attempt to create 2 mem maps at or after the requested begin.
+    main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
+                                                      &error_str));
+    CHECK(main_mem_map_1.get() != nullptr) << error_str;
+  }
   if (support_homogeneous_space_compaction ||
       background_collector_type_ == kCollectorTypeSS ||
       foreground_collector_type_ == kCollectorTypeSS) {
@@ -305,7 +316,10 @@
     AddSpace(non_moving_space_);
   }
   // Create other spaces based on whether or not we have a moving GC.
-  if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) {
+  if (foreground_collector_type_ == kCollectorTypeCC) {
+    region_space_ = space::RegionSpace::Create("Region space", capacity_ * 2, request_begin);
+    AddSpace(region_space_);
+  } else if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) {
     // Create bump pointer spaces.
     // We only to create the bump pointer if the foreground collector is a compacting GC.
     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
@@ -379,6 +393,12 @@
   // Allocate the card table.
   card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
   CHECK(card_table_.get() != NULL) << "Failed to create card table";
+
+  if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
+    rb_table_.reset(new accounting::ReadBarrierTable());
+    DCHECK(rb_table_->IsAllCleared());
+  }
+
   // Card cache for now since it makes it easier for us to update the references to the copying
   // spaces.
   accounting::ModUnionTable* mod_union_table =
@@ -703,29 +723,64 @@
   }
 }
 
+// Visit objects when threads aren't suspended. If concurrent moving
+// GC, disable moving GC and suspend threads and then visit objects.
 void Heap::VisitObjects(ObjectCallback callback, void* arg) {
   Thread* self = Thread::Current();
-  if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
-    // Threads are already suspended.
-    VisitObjectsInternal(callback, arg);
-  } else if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
-    // Concurrent moving GC. Suspend all threads and visit objects.
-    DCHECK_EQ(collector_type_, foreground_collector_type_);
-    DCHECK_EQ(foreground_collector_type_, background_collector_type_)
-        << "Assume no transition such that collector_type_ won't change";
+  Locks::mutator_lock_->AssertSharedHeld(self);
+  DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
+  if (IsGcConcurrentAndMoving()) {
+    // Concurrent moving GC. Just suspending threads isn't sufficient
+    // because a collection isn't one big pause and we could suspend
+    // threads in the middle (between phases) of a concurrent moving
+    // collection where it's not easily known which objects are alive
+    // (both the region space and the non-moving space) or which
+    // copies of objects to visit, and the to-space invariant could be
+    // easily broken. Visit objects while GC isn't running by using
+    // IncrementDisableMovingGC() and threads are suspended.
+    IncrementDisableMovingGC(self);
     self->TransitionFromRunnableToSuspended(kWaitingForVisitObjects);
     ThreadList* tl = Runtime::Current()->GetThreadList();
     tl->SuspendAll();
+    VisitObjectsInternalRegionSpace(callback, arg);
     VisitObjectsInternal(callback, arg);
     tl->ResumeAll();
     self->TransitionFromSuspendedToRunnable();
+    DecrementDisableMovingGC(self);
   } else {
     // GCs can move objects, so don't allow this.
     ScopedAssertNoThreadSuspension ants(self, "Visiting objects");
+    DCHECK(region_space_ == nullptr);
     VisitObjectsInternal(callback, arg);
   }
 }
 
+// Visit objects when threads are already suspended.
+void Heap::VisitObjectsPaused(ObjectCallback callback, void* arg) {
+  Thread* self = Thread::Current();
+  Locks::mutator_lock_->AssertExclusiveHeld(self);
+  VisitObjectsInternalRegionSpace(callback, arg);
+  VisitObjectsInternal(callback, arg);
+}
+
+// Visit objects in the region spaces.
+void Heap::VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) {
+  Thread* self = Thread::Current();
+  Locks::mutator_lock_->AssertExclusiveHeld(self);
+  if (region_space_ != nullptr) {
+    DCHECK(IsGcConcurrentAndMoving());
+    if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
+      // Exclude the pre-zygote fork time where the semi-space collector
+      // calls VerifyHeapReferences() as part of the zygote compaction
+      // which then would call here without the moving GC disabled,
+      // which is fine.
+      DCHECK(IsMovingGCDisabled(self));
+    }
+    region_space_->Walk(callback, arg);
+  }
+}
+
+// Visit objects in the other spaces.
 void Heap::VisitObjectsInternal(ObjectCallback callback, void* arg) {
   if (bump_pointer_space_ != nullptr) {
     // Visit objects in bump pointer space.
@@ -956,6 +1011,9 @@
     } else if (allocator_type == kAllocatorTypeBumpPointer ||
                allocator_type == kAllocatorTypeTLAB) {
       space = bump_pointer_space_;
+    } else if (allocator_type == kAllocatorTypeRegion ||
+               allocator_type == kAllocatorTypeRegionTLAB) {
+      space = region_space_;
     }
     if (space != nullptr) {
       space->LogFragmentationAllocFailure(oss, byte_count);
@@ -1062,6 +1120,9 @@
   if (bump_pointer_space_ != nullptr) {
     total_alloc_space_allocated -= bump_pointer_space_->Size();
   }
+  if (region_space_ != nullptr) {
+    total_alloc_space_allocated -= region_space_->GetBytesAllocated();
+  }
   const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
       static_cast<float>(total_alloc_space_size);
   uint64_t gc_heap_end_ns = NanoTime();
@@ -1134,6 +1195,9 @@
     // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
     return temp_space_->Contains(obj);
   }
+  if (region_space_ != nullptr && region_space_->HasAddress(obj)) {
+    return true;
+  }
   space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
   space::DiscontinuousSpace* d_space = nullptr;
   if (c_space != nullptr) {
@@ -1780,7 +1844,15 @@
     collector_type_ = collector_type;
     gc_plan_.clear();
     switch (collector_type_) {
-      case kCollectorTypeCC:  // Fall-through.
+      case kCollectorTypeCC: {
+        gc_plan_.push_back(collector::kGcTypeFull);
+        if (use_tlab_) {
+          ChangeAllocator(kAllocatorTypeRegionTLAB);
+        } else {
+          ChangeAllocator(kAllocatorTypeRegion);
+        }
+        break;
+      }
       case kCollectorTypeMC:  // Fall-through.
       case kCollectorTypeSS:  // Fall-through.
       case kCollectorTypeGSS: {
@@ -1963,7 +2035,11 @@
     // Compact the bump pointer space to a new zygote bump pointer space.
     bool reset_main_space = false;
     if (IsMovingGc(collector_type_)) {
-      zygote_collector.SetFromSpace(bump_pointer_space_);
+      if (collector_type_ == kCollectorTypeCC) {
+        zygote_collector.SetFromSpace(region_space_);
+      } else {
+        zygote_collector.SetFromSpace(bump_pointer_space_);
+      }
     } else {
       CHECK(main_space_ != nullptr);
       // Copy from the main space.
@@ -1984,7 +2060,11 @@
       delete old_main_space;
       AddSpace(main_space_);
     } else {
-      bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+      if (collector_type_ == kCollectorTypeCC) {
+        region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+      } else {
+        bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+      }
     }
     if (temp_space_ != nullptr) {
       CHECK(temp_space_->IsEmpty());
@@ -2154,7 +2234,9 @@
   // TODO: Clean this up.
   if (compacting_gc) {
     DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
-           current_allocator_ == kAllocatorTypeTLAB);
+           current_allocator_ == kAllocatorTypeTLAB ||
+           current_allocator_ == kAllocatorTypeRegion ||
+           current_allocator_ == kAllocatorTypeRegionTLAB);
     switch (collector_type_) {
       case kCollectorTypeSS:
         // Fall-through.
@@ -2165,6 +2247,7 @@
         collector = semi_space_collector_;
         break;
       case kCollectorTypeCC:
+        concurrent_copying_collector_->SetRegionSpace(region_space_);
         collector = concurrent_copying_collector_;
         break;
       case kCollectorTypeMC:
@@ -2174,7 +2257,7 @@
       default:
         LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
     }
-    if (collector != mark_compact_collector_) {
+    if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) {
       temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
       CHECK(temp_space_->IsEmpty());
     }
@@ -2491,7 +2574,7 @@
   // 2. Allocated during the GC (pre sweep GC verification).
   // We don't want to verify the objects in the live stack since they themselves may be
   // pointing to dead objects if they are not reachable.
-  VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
+  VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor);
   // Verify the roots:
   Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRootCallback, &visitor);
   if (visitor.GetFailureCount() > 0) {
@@ -2633,7 +2716,7 @@
 
 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
   // This must be called only during the pause.
-  CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
+  DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
   MutexLock mu2(self, *Locks::thread_list_lock_);
   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
@@ -3175,6 +3258,9 @@
   if (bump_pointer_space_ != nullptr) {
     bump_pointer_space_->RevokeThreadLocalBuffers(thread);
   }
+  if (region_space_ != nullptr) {
+    region_space_->RevokeThreadLocalBuffers(thread);
+  }
 }
 
 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
@@ -3190,6 +3276,9 @@
   if (bump_pointer_space_ != nullptr) {
     bump_pointer_space_->RevokeAllThreadLocalBuffers();
   }
+  if (region_space_ != nullptr) {
+    region_space_->RevokeAllThreadLocalBuffers();
+  }
 }
 
 bool Heap::IsGCRequestPending() const {