Fix heap trimming logic.
The new heap trimming logic introduced in the c/84301 was not very
good since the heap trim would get avoided since the daemon thread
was coming to the heap trimming code before the scheduled time.
The new logic is to do the heap trim if the last heap trim occurred
more than kHeapTrimWait ns ago.
Change-Id: I9d0e6766bf0c68e5f7fb15fb059140e1f1264216
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d962f3c..13dd90e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -92,7 +92,7 @@
background_collector_type_(background_collector_type),
desired_collector_type_(collector_type_),
heap_trim_request_lock_(nullptr),
- heap_trim_target_time_(0),
+ last_trim_time_(0),
heap_transition_target_time_(0),
heap_trim_request_pending_(false),
parallel_gc_threads_(parallel_gc_threads),
@@ -484,10 +484,11 @@
process_state_ = process_state;
if (process_state_ == kProcessStateJankPerceptible) {
// Transition back to foreground right away to prevent jank.
- RequestHeapTransition(post_zygote_collector_type_, 0);
+ RequestCollectorTransition(post_zygote_collector_type_, 0);
} else {
// Don't delay for debug builds since we may want to stress test the GC.
- RequestHeapTransition(background_collector_type_, kIsDebugBuild ? 0 : kHeapTransitionWait);
+ RequestCollectorTransition(background_collector_type_, kIsDebugBuild ? 0 :
+ kCollectorTransitionWait);
}
}
}
@@ -903,7 +904,8 @@
ScopedThreadStateChange tsc(self, kSleeping);
usleep(wait_time / 1000); // Usleep takes microseconds.
}
- // Transition the heap if the desired collector type is nto the same as the current collector type.
+ // Transition the collector if the desired collector type is not the same as the current
+ // collector type.
TransitionCollector(desired_collector_type);
// Do a heap trim if it is needed.
Trim();
@@ -913,9 +915,10 @@
Thread* self = Thread::Current();
{
MutexLock mu(self, *heap_trim_request_lock_);
- if (!heap_trim_request_pending_ || NanoTime() < heap_trim_target_time_) {
+ if (!heap_trim_request_pending_ || last_trim_time_ + kHeapTrimWait >= NanoTime()) {
return;
}
+ last_trim_time_ = NanoTime();
heap_trim_request_pending_ = false;
}
{
@@ -1804,7 +1807,7 @@
collector->Run(gc_cause, clear_soft_references);
total_objects_freed_ever_ += collector->GetFreedObjects();
total_bytes_freed_ever_ += collector->GetFreedBytes();
- RequestHeapTrim(Heap::kHeapTrimWait);
+ RequestHeapTrim();
// Enqueue cleared references.
EnqueueClearedReferences();
// Grow the heap so that we know when to perform the next GC.
@@ -2567,7 +2570,7 @@
}
}
-void Heap::RequestHeapTransition(CollectorType desired_collector_type, uint64_t delta_time) {
+void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
Thread* self = Thread::Current();
{
MutexLock mu(self, *heap_trim_request_lock_);
@@ -2580,7 +2583,7 @@
SignalHeapTrimDaemon(self);
}
-void Heap::RequestHeapTrim(uint64_t delta_time) {
+void Heap::RequestHeapTrim() {
// GC completed and now we must decide whether to request a heap trim (advising pages back to the
// kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
// a space it will hold its lock and can become a cause of jank.
@@ -2607,7 +2610,11 @@
if (!CareAboutPauseTimes()) {
{
MutexLock mu(self, *heap_trim_request_lock_);
- heap_trim_target_time_ = std::max(heap_trim_target_time_, NanoTime() + delta_time);
+ if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
+ // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
+ // just yet.
+ return;
+ }
heap_trim_request_pending_ = true;
}
// Notify the daemon thread which will actually do the heap trim.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 797f44c..12c55c4 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -135,9 +135,10 @@
// Used so that we don't overflow the allocation time atomic integer.
static constexpr size_t kTimeAdjust = 1024;
- // How long we wait after a GC to perform a heap trim (nanoseconds).
+ // How often we allow heap trimming to happen (nanoseconds).
static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
- static constexpr uint64_t kHeapTransitionWait = MsToNs(5000);
+ // How long we wait after a transition request to perform a collector transition (nanoseconds).
+ static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
// Create a heap with the requested sizes. The possible empty
// image_file_names names specify Spaces to load based on
@@ -648,9 +649,9 @@
collector::GcType WaitForGcToCompleteLocked(Thread* self)
EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
- void RequestHeapTransition(CollectorType desired_collector_type, uint64_t delta_time)
+ void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
LOCKS_EXCLUDED(heap_trim_request_lock_);
- void RequestHeapTrim(uint64_t delta_time) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+ void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
bool IsGCRequestPending() const;
@@ -754,7 +755,7 @@
// Lock which guards heap trim requests.
Mutex* heap_trim_request_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// When we want to perform the next heap trim (nano seconds).
- uint64_t heap_trim_target_time_ GUARDED_BY(heap_trim_request_lock_);
+ uint64_t last_trim_time_ GUARDED_BY(heap_trim_request_lock_);
// When we want to perform the next heap transition (nano seconds).
uint64_t heap_transition_target_time_ GUARDED_BY(heap_trim_request_lock_);
// If we have a heap trim request pending.