Fix potential bugs in allocation tracker

Add a missing visit allocation records call in concurrent copying collecter.
Handle null class objects if we support class unloading, and issues
with disallow and allow new allocation records.

Bug: 20037135
Change-Id: I59b7321c281e0d79a620501b2f43e36d2a576203
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index de46b35..97d170e 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4836,6 +4836,9 @@
       CHECK(!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
       records = &dummy;
     }
+    // We don't need to wait on the condition variable records->new_record_condition_, because this
+    // function only reads the class objects, which are already marked so it doesn't change their
+    // reachability.
 
     //
     // Part 1: generate string tables.
@@ -4850,7 +4853,7 @@
          count > 0 && it != end; count--, it++) {
       const gc::AllocRecord* record = it->second;
       std::string temp;
-      class_names.Add(record->GetClass()->GetDescriptor(&temp));
+      class_names.Add(record->GetClassDescriptor(&temp));
       for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) {
         ArtMethod* m = record->StackElement(i).GetMethod();
         class_names.Add(m->GetDeclaringClassDescriptor());
@@ -4902,7 +4905,7 @@
       const gc::AllocRecord* record = it->second;
       size_t stack_depth = record->GetDepth();
       size_t allocated_object_class_name_index =
-          class_names.IndexOf(record->GetClass()->GetDescriptor(&temp));
+          class_names.IndexOf(record->GetClassDescriptor(&temp));
       JDWP::Append4BE(bytes, record->ByteCount());
       JDWP::Append2BE(bytes, static_cast<uint16_t>(record->GetTid()));
       JDWP::Append2BE(bytes, allocated_object_class_name_index);
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 11921f4..6537ed2 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -32,6 +32,15 @@
   return method_->GetLineNumFromDexPC(dex_pc_);
 }
 
+const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
+  // klass_ could contain null only if we implement class unloading.
+  if (UNLIKELY(klass_.IsNull())) {
+    return "null";
+  } else {
+    return klass_.Read()->GetDescriptor(storage);
+  }
+}
+
 void AllocRecordObjectMap::SetProperties() {
 #ifdef HAVE_ANDROID_OS
   // Check whether there's a system property overriding the max number of records.
@@ -97,7 +106,7 @@
   // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
   // klass_ fields as strong roots.
   for (auto it = entries_.rbegin(), end = entries_.rend(); count > 0 && it != end; count--, ++it) {
-    buffered_visitor.VisitRoot(it->second->GetClassGcRoot());
+    buffered_visitor.VisitRootIfNonNull(it->second->GetClassGcRoot());
   }
 }
 
@@ -107,6 +116,8 @@
   GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
   // This does not need a read barrier because this is called by GC.
   mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
+  // The class object can become null if we implement class unloading.
+  // In that case we might still want to keep the class name string (not implemented).
   mirror::Object* new_object = UNLIKELY(old_object == nullptr) ?
       nullptr : callback(old_object, arg);
   if (UNLIKELY(old_object != new_object)) {
@@ -163,11 +174,6 @@
   allow_new_record_ = false;
 }
 
-void AllocRecordObjectMap::EnsureNewAllocationRecordsDisallowed() {
-  CHECK(!allow_new_record_);
-}
-
-
 struct AllocRecordStackVisitor : public StackVisitor {
   AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index f567153..06721c8 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -188,7 +188,10 @@
     return klass_.Read();
   }
 
-  GcRoot<mirror::Class>& GetClassGcRoot() {
+  const char* GetClassDescriptor(std::string* storage) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  GcRoot<mirror::Class>& GetClassGcRoot() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return klass_;
   }
 
@@ -262,15 +265,18 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
 
+  // Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and
+  // AllowNewAllocationRecords(), in which case new allocation records can be added although they
+  // should be disallowed. However, this is GC-safe because new objects are not processed in this GC
+  // cycle. The only downside of not handling this case is that such new allocation records can be
+  // swept from the list. But missing the first few records is acceptable for using the button to
+  // enable allocation tracking.
   void DisallowNewAllocationRecords()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
   void AllowNewAllocationRecords()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
-  void EnsureNewAllocationRecordsDisallowed()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
 
   // TODO: Is there a better way to hide the entries_'s type?
   EntryList::iterator Begin()
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index c7d2e9f..5e69b79 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -334,6 +334,8 @@
       }
     }
   }
+  // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part
+  // to also use the same function.
   {
     TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
     Runtime::Current()->VisitConstantRoots(this);
@@ -351,6 +353,7 @@
     TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
     Runtime::Current()->VisitNonThreadRoots(this);
   }
+  Runtime::Current()->GetHeap()->VisitAllocationRecords(this);
 
   // Immune spaces.
   for (auto& space : heap_->GetContinuousSpaces()) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1b45ea1..a782fc8 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3734,17 +3734,6 @@
   }
 }
 
-void Heap::EnsureNewAllocationRecordsDisallowed() const {
-  if (IsAllocTrackingEnabled()) {
-    // Lock and unlock once to ensure that no threads are still in the
-    // middle of adding new allocation records.
-    MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
-    if (IsAllocTrackingEnabled()) {
-      GetAllocationRecords()->EnsureNewAllocationRecordsDisallowed();
-    }
-  }
-}
-
 // Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
 class StackCrawlState {
  public:
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 1c75bd0..cfb6e06 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -721,10 +721,6 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
 
-  void EnsureNewAllocationRecordsDisallowed() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
-
  private:
   class ConcurrentGCTask;
   class CollectorTransitionTask;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 20e4149..5067b0d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1510,7 +1510,6 @@
   monitor_list_->EnsureNewMonitorsDisallowed();
   intern_table_->EnsureNewInternsDisallowed();
   java_vm_->EnsureNewWeakGlobalsDisallowed();
-  heap_->EnsureNewAllocationRecordsDisallowed();
 }
 
 void Runtime::SetInstructionSet(InstructionSet instruction_set) {