Improve sticky GC ergonomics
Before we scheduled a partial GC after 10 sticky GC had been run to
prevent excessive heap growth and fragmentation. The issue with this is
that it was just a ballpark estimate which was not based on reality. The
new behaviour has that we do sticky GC until we have less space remaining
than minimum free after the GC. When this occurs, we set the next GC to be a
partial GC. After a partial / full GC we grow the heap and set the next GC to
be a sticky GC. This prevents the heap from always growing more than the target
utilization, while ensuring that we do sticky GC often.
dumpsys meminfo: ~450Mb -> 420Mb. Slight slowdown in GCBench.
Change-Id: Ifd865123f7d4ae39914fda44c9225a6731d27890
diff --git a/src/gc/collector/mark_sweep.cc b/src/gc/collector/mark_sweep.cc
index d54fec6..279796f 100644
--- a/src/gc/collector/mark_sweep.cc
+++ b/src/gc/collector/mark_sweep.cc
@@ -58,7 +58,7 @@
// Performance options.
static const bool kParallelMarkStack = true;
-static const bool kDisableFinger = kParallelMarkStack;
+static const bool kDisableFinger = true; // TODO: Fix, bit rotten.
static const bool kUseMarkStackPrefetch = true;
// Profiling and information flags.
@@ -1477,7 +1477,7 @@
heap->PostGcVerification(this);
timings_.NewSplit("GrowForUtilization");
- heap->GrowForUtilization(GetDurationNs());
+ heap->GrowForUtilization(GetGcType(), GetDurationNs());
timings_.NewSplit("RequestHeapTrim");
heap->RequestHeapTrim();
diff --git a/src/gc/heap.cc b/src/gc/heap.cc
index 9ec1f21..43ad910 100644
--- a/src/gc/heap.cc
+++ b/src/gc/heap.cc
@@ -163,13 +163,12 @@
reference_queue_lock_(NULL),
is_gc_running_(false),
last_gc_type_(collector::kGcTypeNone),
+ next_gc_type_(collector::kGcTypePartial),
capacity_(capacity),
growth_limit_(growth_limit),
max_allowed_footprint_(initial_size),
concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes)
: std::numeric_limits<size_t>::max()),
- sticky_gc_count_(0),
- sticky_to_partial_gc_ratio_(10),
total_bytes_freed_ever_(0),
total_objects_freed_ever_(0),
large_object_threshold_(3 * kPageSize),
@@ -533,7 +532,7 @@
size_t size = 0;
uint64_t allocation_start = 0;
if (measure_allocation_time_) {
- allocation_start = NanoTime();
+ allocation_start = NanoTime() / kTimeAdjust;
}
// We need to have a zygote space or else our newly allocated large object can end up in the
@@ -577,7 +576,7 @@
VerifyObject(obj);
if (measure_allocation_time_) {
- total_allocation_time_ += (NanoTime() - allocation_start) / kTimeAdjust;
+ total_allocation_time_ += NanoTime() / kTimeAdjust - allocation_start;
}
return obj;
@@ -1166,20 +1165,6 @@
++Thread::Current()->GetStats()->gc_for_alloc_count;
}
- // We need to do partial GCs every now and then to avoid the heap growing too much and
- // fragmenting.
- // TODO: if sticky GCs are failing to free memory then we should lower the
- // sticky_to_partial_gc_ratio_, if they are successful we can increase it.
- if (gc_type == collector::kGcTypeSticky) {
- ++sticky_gc_count_;
- if (sticky_gc_count_ >= sticky_to_partial_gc_ratio_) {
- gc_type = have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
- sticky_gc_count_ = 0;
- }
- } else {
- sticky_gc_count_ = 0;
- }
-
uint64_t gc_start_time_ns = NanoTime();
uint64_t gc_start_size = GetBytesAllocated();
// Approximate allocation rate in bytes / second.
@@ -1192,6 +1177,11 @@
VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
}
+ if (gc_type == collector::kGcTypeSticky &&
+ alloc_space_->Size() < min_alloc_space_size_for_sticky_gc_) {
+ gc_type = collector::kGcTypePartial;
+ }
+
DCHECK_LT(gc_type, collector::kGcTypeMax);
DCHECK_NE(gc_type, collector::kGcTypeNone);
collector::MarkSweep* collector = NULL;
@@ -1697,20 +1687,39 @@
max_allowed_footprint_ = max_allowed_footprint;
}
-void Heap::GrowForUtilization(uint64_t gc_duration) {
+void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
// We know what our utilization is at this moment.
// This doesn't actually resize any memory. It just lets the heap grow more when necessary.
const size_t bytes_allocated = GetBytesAllocated();
last_gc_size_ = bytes_allocated;
last_gc_time_ns_ = NanoTime();
- size_t target_size = bytes_allocated / GetTargetHeapUtilization();
- if (target_size > bytes_allocated + max_free_) {
- target_size = bytes_allocated + max_free_;
- } else if (target_size < bytes_allocated + min_free_) {
- target_size = bytes_allocated + min_free_;
- }
+ size_t target_size;
+ if (gc_type != collector::kGcTypeSticky) {
+ // Grow the heap for non sticky GC.
+ target_size = bytes_allocated / GetTargetHeapUtilization();
+ if (target_size > bytes_allocated + max_free_) {
+ target_size = bytes_allocated + max_free_;
+ } else if (target_size < bytes_allocated + min_free_) {
+ target_size = bytes_allocated + min_free_;
+ }
+ next_gc_type_ = collector::kGcTypeSticky;
+ } else {
+ // Based on how close the current heap size is to the target size, decide
+ // whether or not to do a partial or sticky GC next.
+ if (bytes_allocated + min_free_ <= max_allowed_footprint_) {
+ next_gc_type_ = collector::kGcTypeSticky;
+ } else {
+ next_gc_type_ = collector::kGcTypePartial;
+ }
+ // If we have freed enough memory, shrink the heap back down.
+ if (bytes_allocated + max_free_ < max_allowed_footprint_) {
+ target_size = bytes_allocated + max_free_;
+ } else {
+ target_size = std::max(bytes_allocated, max_allowed_footprint_);
+ }
+ }
SetIdealFootprint(target_size);
// Calculate when to perform the next ConcurrentGC.
@@ -1887,11 +1896,7 @@
// Wait for any GCs currently running to finish.
if (WaitForConcurrentGcToComplete(self) == collector::kGcTypeNone) {
- if (alloc_space_->Size() > min_alloc_space_size_for_sticky_gc_) {
- CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseBackground, false);
- } else {
- CollectGarbageInternal(collector::kGcTypePartial, kGcCauseBackground, false);
- }
+ CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false);
}
}
diff --git a/src/gc/heap.h b/src/gc/heap.h
index d86c7dc..790ab02 100644
--- a/src/gc/heap.h
+++ b/src/gc/heap.h
@@ -424,7 +424,7 @@
// Given the current contents of the alloc space, increase the allowed heap footprint to match
// the target utilization ratio. This should only be called immediately after a full garbage
// collection.
- void GrowForUtilization(uint64_t gc_duration);
+ void GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration);
size_t GetPercentFree();
@@ -488,6 +488,7 @@
// Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
+ collector::GcType next_gc_type_;
// Maximum size that the heap can reach.
const size_t capacity_;
@@ -502,12 +503,6 @@
// it completes ahead of an allocation failing.
size_t concurrent_start_bytes_;
- // Number of back-to-back sticky mark sweep collections.
- size_t sticky_gc_count_;
-
- // After how many sticky GCs we force to do a partial GC instead of sticky mark bits GC.
- const size_t sticky_to_partial_gc_ratio_;
-
// Since the heap was created, how many bytes have been freed.
size_t total_bytes_freed_ever_;