summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/debugger.cc7
-rw-r--r--runtime/gc/allocation_record.cc18
-rw-r--r--runtime/gc/allocation_record.h14
-rw-r--r--runtime/gc/collector/concurrent_copying.cc3
-rw-r--r--runtime/gc/heap.cc11
-rw-r--r--runtime/gc/heap.h4
-rw-r--r--runtime/runtime.cc1
7 files changed, 30 insertions, 28 deletions
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index de46b3527c..97d170e39c 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4836,6 +4836,9 @@ jbyteArray Dbg::GetRecentAllocations() {
CHECK(!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
records = &dummy;
}
+ // We don't need to wait on the condition variable records->new_record_condition_, because this
+ // function only reads the class objects, which are already marked so it doesn't change their
+ // reachability.
//
// Part 1: generate string tables.
@@ -4850,7 +4853,7 @@ jbyteArray Dbg::GetRecentAllocations() {
count > 0 && it != end; count--, it++) {
const gc::AllocRecord* record = it->second;
std::string temp;
- class_names.Add(record->GetClass()->GetDescriptor(&temp));
+ class_names.Add(record->GetClassDescriptor(&temp));
for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) {
ArtMethod* m = record->StackElement(i).GetMethod();
class_names.Add(m->GetDeclaringClassDescriptor());
@@ -4902,7 +4905,7 @@ jbyteArray Dbg::GetRecentAllocations() {
const gc::AllocRecord* record = it->second;
size_t stack_depth = record->GetDepth();
size_t allocated_object_class_name_index =
- class_names.IndexOf(record->GetClass()->GetDescriptor(&temp));
+ class_names.IndexOf(record->GetClassDescriptor(&temp));
JDWP::Append4BE(bytes, record->ByteCount());
JDWP::Append2BE(bytes, static_cast<uint16_t>(record->GetTid()));
JDWP::Append2BE(bytes, allocated_object_class_name_index);
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 11921f44d3..6537ed2bb8 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -32,6 +32,15 @@ int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
return method_->GetLineNumFromDexPC(dex_pc_);
}
+const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
+ // klass_ could contain null only if we implement class unloading.
+ if (UNLIKELY(klass_.IsNull())) {
+ return "null";
+ } else {
+ return klass_.Read()->GetDescriptor(storage);
+ }
+}
+
void AllocRecordObjectMap::SetProperties() {
#ifdef HAVE_ANDROID_OS
// Check whether there's a system property overriding the max number of records.
@@ -97,7 +106,7 @@ void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
// Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
// klass_ fields as strong roots.
for (auto it = entries_.rbegin(), end = entries_.rend(); count > 0 && it != end; count--, ++it) {
- buffered_visitor.VisitRoot(it->second->GetClassGcRoot());
+ buffered_visitor.VisitRootIfNonNull(it->second->GetClassGcRoot());
}
}
@@ -107,6 +116,8 @@ static inline void SweepClassObject(AllocRecord* record, IsMarkedCallback* callb
GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
// This does not need a read barrier because this is called by GC.
mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
+ // The class object can become null if we implement class unloading.
+ // In that case we might still want to keep the class name string (not implemented).
mirror::Object* new_object = UNLIKELY(old_object == nullptr) ?
nullptr : callback(old_object, arg);
if (UNLIKELY(old_object != new_object)) {
@@ -163,11 +174,6 @@ void AllocRecordObjectMap::DisallowNewAllocationRecords() {
allow_new_record_ = false;
}
-void AllocRecordObjectMap::EnsureNewAllocationRecordsDisallowed() {
- CHECK(!allow_new_record_);
-}
-
-
struct AllocRecordStackVisitor : public StackVisitor {
AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index f5671537b7..06721c86b1 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -188,7 +188,10 @@ class AllocRecord {
return klass_.Read();
}
- GcRoot<mirror::Class>& GetClassGcRoot() {
+ const char* GetClassDescriptor(std::string* storage) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ GcRoot<mirror::Class>& GetClassGcRoot() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return klass_;
}
@@ -262,15 +265,18 @@ class AllocRecordObjectMap {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+ // Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and
+ // AllowNewAllocationRecords(), in which case new allocation records can be added although they
+ // should be disallowed. However, this is GC-safe because new objects are not processed in this GC
+ // cycle. The only downside of not handling this case is that such new allocation records can be
+ // swept from the list. But missing the first few records is acceptable for using the button to
+ // enable allocation tracking.
void DisallowNewAllocationRecords()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
void AllowNewAllocationRecords()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
- void EnsureNewAllocationRecordsDisallowed()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
// TODO: Is there a better way to hide the entries_'s type?
EntryList::iterator Begin()
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index c7d2e9f2c9..5e69b797e0 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -334,6 +334,8 @@ void ConcurrentCopying::MarkingPhase() {
}
}
}
+ // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part
+ // to also use the same function.
{
TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
Runtime::Current()->VisitConstantRoots(this);
@@ -351,6 +353,7 @@ void ConcurrentCopying::MarkingPhase() {
TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Runtime::Current()->VisitNonThreadRoots(this);
}
+ Runtime::Current()->GetHeap()->VisitAllocationRecords(this);
// Immune spaces.
for (auto& space : heap_->GetContinuousSpaces()) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1b45ea1f5a..a782fc803a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3734,17 +3734,6 @@ void Heap::DisallowNewAllocationRecords() const {
}
}
-void Heap::EnsureNewAllocationRecordsDisallowed() const {
- if (IsAllocTrackingEnabled()) {
- // Lock and unlock once to ensure that no threads are still in the
- // middle of adding new allocation records.
- MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
- if (IsAllocTrackingEnabled()) {
- GetAllocationRecords()->EnsureNewAllocationRecordsDisallowed();
- }
- }
-}
-
// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
class StackCrawlState {
public:
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 1c75bd0089..cfb6e06a18 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -721,10 +721,6 @@ class Heap {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
- void EnsureNewAllocationRecordsDisallowed() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
-
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 20e4149feb..5067b0d60c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1510,7 +1510,6 @@ void Runtime::EnsureNewSystemWeaksDisallowed() {
monitor_list_->EnsureNewMonitorsDisallowed();
intern_table_->EnsureNewInternsDisallowed();
java_vm_->EnsureNewWeakGlobalsDisallowed();
- heap_->EnsureNewAllocationRecordsDisallowed();
}
void Runtime::SetInstructionSet(InstructionSet instruction_set) {