summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h18
-rw-r--r--runtime/gc/allocation_record.cc18
-rw-r--r--runtime/gc/allocation_record.h2
-rw-r--r--runtime/gc/heap-inl.h6
-rw-r--r--runtime/gc/heap.cc4
-rw-r--r--runtime/gc/heap.h4
6 files changed, 31 insertions, 21 deletions
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 7e7d904a10..3e6b4537f9 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -173,7 +173,10 @@ inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
if (klass == nullptr) {
return nullptr;
}
- return klass->Alloc<kInstrumented>(self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ // CheckObjectAlloc can cause thread suspension which means we may now be instrumented.
+ return klass->Alloc</*kInstrumented*/true>(
+ self,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
DCHECK(klass != nullptr);
return klass->Alloc<kInstrumented>(self, allocator_type);
@@ -194,7 +197,9 @@ inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
}
gc::Heap* heap = Runtime::Current()->GetHeap();
// Pass in false since the object cannot be finalizable.
- return klass->Alloc<kInstrumented, false>(self, heap->GetCurrentAllocator());
+ // CheckClassInitializedForObjectAlloc can cause thread suspension which means we may now be
+ // instrumented.
+ return klass->Alloc</*kInstrumented*/true, false>(self, heap->GetCurrentAllocator());
}
// Pass in false since the object cannot be finalizable.
return klass->Alloc<kInstrumented, false>(self, allocator_type);
@@ -265,9 +270,12 @@ inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
return nullptr;
}
gc::Heap* heap = Runtime::Current()->GetHeap();
- return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
- klass->GetComponentSizeShift(),
- heap->GetCurrentAllocator());
+ // CheckArrayAlloc can cause thread suspension which means we may now be instrumented.
+ return mirror::Array::Alloc</*kInstrumented*/true>(self,
+ klass,
+ component_count,
+ klass->GetComponentSizeShift(),
+ heap->GetCurrentAllocator());
}
return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
klass->GetComponentSizeShift(), allocator_type);
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 4672483308..73190455cf 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -92,7 +92,7 @@ void AllocRecordObjectMap::SetProperties() {
}
AllocRecordObjectMap::~AllocRecordObjectMap() {
- STLDeleteValues(&entries_);
+ Clear();
}
void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
@@ -223,7 +223,11 @@ void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
if (heap->IsAllocTrackingEnabled()) {
return; // Already enabled, bail.
}
- AllocRecordObjectMap* records = new AllocRecordObjectMap();
+ AllocRecordObjectMap* records = heap->GetAllocationRecords();
+ if (records == nullptr) {
+ records = new AllocRecordObjectMap;
+ heap->SetAllocationRecords(records);
+ }
CHECK(records != nullptr);
records->SetProperties();
std::string self_name;
@@ -237,7 +241,6 @@ void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
<< records->max_stack_depth_ << " frames, taking up to "
<< PrettySize(sz * records->alloc_record_max_) << ")";
- heap->SetAllocationRecords(records);
}
Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
{
@@ -247,7 +250,6 @@ void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
} else {
// Delete outside of the critical section to avoid possible lock violations like the runtime
// shutdown lock.
- std::unique_ptr<AllocRecordObjectMap> map;
{
MutexLock mu(self, *Locks::alloc_tracker_lock_);
if (!heap->IsAllocTrackingEnabled()) {
@@ -255,7 +257,8 @@ void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
}
heap->SetAllocTrackingEnabled(false);
LOG(INFO) << "Disabling alloc tracker";
- map = heap->ReleaseAllocationRecords();
+ AllocRecordObjectMap* records = heap->GetAllocationRecords();
+ records->Clear();
}
// If an allocation comes in before we uninstrument, we will safely drop it on the floor.
Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
@@ -303,5 +306,10 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self, mirror::Object* obj, m
DCHECK_LE(records->Size(), records->alloc_record_max_);
}
+void AllocRecordObjectMap::Clear() {
+ STLDeleteValues(&entries_);
+ entries_.clear();
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index ffdfd31ce6..18cce4d72d 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -306,6 +306,8 @@ class AllocRecordObjectMap {
return entries_.rend();
}
+ void Clear() REQUIRES(Locks::alloc_tracker_lock_);
+
private:
static constexpr size_t kDefaultNumAllocRecords = 512 * 1024;
static constexpr size_t kDefaultNumRecentRecords = 64 * 1024 - 1;
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index d7023d8ee0..59fd4a6b44 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -174,9 +174,6 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
} else {
DCHECK(!Runtime::Current()->HasStatsEnabled());
}
- if (AllocatorHasAllocationStack(allocator)) {
- PushOnAllocationStack(self, &obj);
- }
if (kInstrumented) {
if (IsAllocTrackingEnabled()) {
// Use obj->GetClass() instead of klass, because PushOnAllocationStack() could move klass
@@ -185,6 +182,9 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
} else {
DCHECK(!IsAllocTrackingEnabled());
}
+ if (AllocatorHasAllocationStack(allocator)) {
+ PushOnAllocationStack(self, &obj);
+ }
if (kInstrumented) {
if (gc_stress_mode_) {
CheckGcStressMode(self, &obj);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2e5b599940..f4fccee034 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3940,10 +3940,6 @@ void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
allocation_records_.reset(records);
}
-std::unique_ptr<AllocRecordObjectMap> Heap::ReleaseAllocationRecords() {
- return std::move(allocation_records_);
-}
-
void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
if (IsAllocTrackingEnabled()) {
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index e0a53a0cc8..9eda422902 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -766,10 +766,6 @@ class Heap {
return allocation_records_.get();
}
- // Release ownership of the allocation records.
- std::unique_ptr<AllocRecordObjectMap> ReleaseAllocationRecords()
- REQUIRES(Locks::alloc_tracker_lock_);
-
void SetAllocationRecords(AllocRecordObjectMap* records)
REQUIRES(Locks::alloc_tracker_lock_);