Include evacuated bytes in heap size trace
Since ConcurrentCopying collector creates copies of objects that are in
the evac from-space, we need to report this during GC cycle using
TraceHeapSize().
Test: Flash device, run and analyse perfetto trace.
Bug: 139020078
Change-Id: Iff0da1de4c77da199c1c3b44ea488883e7fa4261
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 3b66fbc..08cbea2 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -156,7 +156,15 @@
new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
// Only trace when we get an increase in the number of bytes allocated. This happens when
// obtaining a new TLAB and isn't often enough to hurt performance according to golem.
- TraceHeapSize(new_num_bytes_allocated);
+ if (region_space_) {
+ // With CC collector, during a GC cycle, the heap usage increases as
+ // there are two copies of evacuated objects. Therefore, add evac-bytes
+ // to the heap size. When the GC cycle is not running, evac-bytes
+ // are 0, as required.
+ TraceHeapSize(new_num_bytes_allocated + region_space_->EvacBytes());
+ } else {
+ TraceHeapSize(new_num_bytes_allocated);
+ }
}
}
if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 15534c9..c54edf5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -902,6 +902,8 @@
void PostForkChildAction(Thread* self);
+ void TraceHeapSize(size_t heap_size);
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -1180,8 +1182,6 @@
ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke);
- void TraceHeapSize(size_t heap_size);
-
// Remove a vlog code from heap-inl.h which is transitively included in half the world.
static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size);
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 86a0a6e..33b72ac 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -328,58 +328,53 @@
}
}
+ mirror::Object* region = nullptr;
// Find a large enough set of contiguous free regions.
if (kCyclicRegionAllocation) {
+ size_t next_region = -1;
// Try to find a range of free regions within [cyclic_alloc_region_index_, num_regions_).
- size_t next_region1 = -1;
- mirror::Object* region1 = AllocLargeInRange<kForEvac>(cyclic_alloc_region_index_,
- num_regions_,
- num_regs_in_large_region,
- bytes_allocated,
- usable_size,
- bytes_tl_bulk_allocated,
- &next_region1);
- if (region1 != nullptr) {
- DCHECK_LT(0u, next_region1);
- DCHECK_LE(next_region1, num_regions_);
- // Move the cyclic allocation region marker to the region
- // following the large region that was just allocated.
- cyclic_alloc_region_index_ = next_region1 % num_regions_;
- return region1;
+ region = AllocLargeInRange<kForEvac>(cyclic_alloc_region_index_,
+ num_regions_,
+ num_regs_in_large_region,
+ bytes_allocated,
+ usable_size,
+ bytes_tl_bulk_allocated,
+ &next_region);
+
+ if (region == nullptr) {
+ DCHECK_EQ(next_region, static_cast<size_t>(-1));
+ // If the previous attempt failed, try to find a range of free regions within
+ // [0, min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_)).
+ region = AllocLargeInRange<kForEvac>(
+ 0,
+ std::min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_),
+ num_regs_in_large_region,
+ bytes_allocated,
+ usable_size,
+ bytes_tl_bulk_allocated,
+ &next_region);
}
- // If the previous attempt failed, try to find a range of free regions within
- // [0, min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_)).
- size_t next_region2 = -1;
- mirror::Object* region2 = AllocLargeInRange<kForEvac>(
- 0,
- std::min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_),
- num_regs_in_large_region,
- bytes_allocated,
- usable_size,
- bytes_tl_bulk_allocated,
- &next_region2);
- if (region2 != nullptr) {
- DCHECK_LT(0u, next_region2);
- DCHECK_LE(next_region2, num_regions_);
+ if (region != nullptr) {
+ DCHECK_LT(0u, next_region);
+ DCHECK_LE(next_region, num_regions_);
// Move the cyclic allocation region marker to the region
// following the large region that was just allocated.
- cyclic_alloc_region_index_ = next_region2 % num_regions_;
- return region2;
+ cyclic_alloc_region_index_ = next_region % num_regions_;
}
} else {
// Try to find a range of free regions within [0, num_regions_).
- mirror::Object* region = AllocLargeInRange<kForEvac>(0,
- num_regions_,
- num_regs_in_large_region,
- bytes_allocated,
- usable_size,
- bytes_tl_bulk_allocated);
- if (region != nullptr) {
- return region;
- }
+ region = AllocLargeInRange<kForEvac>(0,
+ num_regions_,
+ num_regs_in_large_region,
+ bytes_allocated,
+ usable_size,
+ bytes_tl_bulk_allocated);
}
- return nullptr;
+ if (kForEvac && region != nullptr) {
+ TraceHeapSize();
+ }
+ return region;
}
template<bool kForEvac>
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 4bf5524..7939f0a 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -977,6 +977,11 @@
thread_ = nullptr;
}
+void RegionSpace::TraceHeapSize() {
+ Heap* heap = Runtime::Current()->GetHeap();
+ heap->TraceHeapSize(heap->GetBytesAllocated() + EvacBytes());
+}
+
RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
if (!for_evac && (num_non_free_regions_ + 1) * 2 > num_regions_) {
return nullptr;
@@ -998,6 +1003,7 @@
}
if (for_evac) {
++num_evac_regions_;
+ TraceHeapSize();
// Evac doesn't count as newly allocated.
} else {
r->SetNewlyAllocated();
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 6061c25..6d654b3 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -369,6 +369,10 @@
return time_;
}
+ size_t EvacBytes() const NO_THREAD_SAFETY_ANALYSIS {
+ return num_evac_regions_ * kRegionSize;
+ }
+
private:
RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc);
@@ -635,6 +639,8 @@
return RefToRegionLocked(ref);
}
+ void TraceHeapSize() REQUIRES(region_lock_);
+
Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
// For a performance reason (this is frequently called via
// RegionSpace::IsInFromSpace, etc.) we avoid taking a lock here.