ART: Mark Dbg GCs as debugger

Change Heap::CollectGarbage to accept explicit GcCause, but implicitly
default to kGcCauseExplicit.

Change Dbg functions that run an explicit GC to set the cause to
kGcCauseDebugger.

Test: m test-art-host
Change-Id: I53d4073fca01c1de78d14a58dff33004c7971981
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 2be00f5..f504d86 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -40,6 +40,7 @@
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/allocation_record.h"
+#include "gc/gc_cause.h"
 #include "gc/scoped_gc_critical_section.h"
 #include "gc/space/bump_pointer_space-walk-inl.h"
 #include "gc/space/large_object_space.h"
@@ -947,7 +948,7 @@
 JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
                                        std::vector<uint64_t>* counts) {
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  heap->CollectGarbage(false);
+  heap->CollectGarbage(false, gc::GcCause::kGcCauseDebugger);
   VariableSizedHandleScope hs(Thread::Current());
   std::vector<Handle<mirror::Class>> classes;
   counts->clear();
@@ -968,7 +969,7 @@
                                   std::vector<JDWP::ObjectId>* instances) {
   gc::Heap* heap = Runtime::Current()->GetHeap();
   // We only want reachable instances, so do a GC.
-  heap->CollectGarbage(false);
+  heap->CollectGarbage(false, gc::GcCause::kGcCauseDebugger);
   JDWP::JdwpError error;
   ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
   if (c == nullptr) {
@@ -990,7 +991,7 @@
 JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
                                          std::vector<JDWP::ObjectId>* referring_objects) {
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  heap->CollectGarbage(false);
+  heap->CollectGarbage(false, gc::GcCause::kGcCauseDebugger);
   JDWP::JdwpError error;
   ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(object_id, &error);
   if (o == nullptr) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 3ba12ca..dbaddaf 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1883,10 +1883,10 @@
   VisitObjects(referring_objects_finder);
 }
 
-void Heap::CollectGarbage(bool clear_soft_references) {
+void Heap::CollectGarbage(bool clear_soft_references, GcCause cause) {
   // Even if we waited for a GC we still need to do another GC since weaks allocated during the
   // last GC will not have necessarily been cleared.
-  CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
+  CollectGarbageInternal(gc_plan_.back(), cause, clear_soft_references);
 }
 
 bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 0d11658..7dcf82f 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -333,7 +333,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Initiates an explicit garbage collection.
-  void CollectGarbage(bool clear_soft_references)
+  void CollectGarbage(bool clear_soft_references, GcCause cause = kGcCauseExplicit)
       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
 
   // Does a concurrent GC, should only be called by the GC daemon thread