Fix potential bugs in allocation tracker
Add a missing visit allocation records call in concurrent copying collecter.
Handle null class objects if we support class unloading, and issues
with disallow and allow new allocation records.
Bug: 20037135
Change-Id: I59b7321c281e0d79a620501b2f43e36d2a576203
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 11921f4..6537ed2 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -32,6 +32,15 @@
return method_->GetLineNumFromDexPC(dex_pc_);
}
+const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
+ // klass_ could contain null only if we implement class unloading.
+ if (UNLIKELY(klass_.IsNull())) {
+ return "null";
+ } else {
+ return klass_.Read()->GetDescriptor(storage);
+ }
+}
+
void AllocRecordObjectMap::SetProperties() {
#ifdef HAVE_ANDROID_OS
// Check whether there's a system property overriding the max number of records.
@@ -97,7 +106,7 @@
// Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
// klass_ fields as strong roots.
for (auto it = entries_.rbegin(), end = entries_.rend(); count > 0 && it != end; count--, ++it) {
- buffered_visitor.VisitRoot(it->second->GetClassGcRoot());
+ buffered_visitor.VisitRootIfNonNull(it->second->GetClassGcRoot());
}
}
@@ -107,6 +116,8 @@
GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
// This does not need a read barrier because this is called by GC.
mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
+ // The class object can become null if we implement class unloading.
+ // In that case we might still want to keep the class name string (not implemented).
mirror::Object* new_object = UNLIKELY(old_object == nullptr) ?
nullptr : callback(old_object, arg);
if (UNLIKELY(old_object != new_object)) {
@@ -163,11 +174,6 @@
allow_new_record_ = false;
}
-void AllocRecordObjectMap::EnsureNewAllocationRecordsDisallowed() {
- CHECK(!allow_new_record_);
-}
-
-
struct AllocRecordStackVisitor : public StackVisitor {
AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)