summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Hans Boehm <hboehm@google.com> 2019-01-09 00:39:13 +0000
committer Gerrit Code Review <noreply-gerritcodereview@google.com> 2019-01-09 00:39:13 +0000
commite959e5d54e6b979e400dd096f59303391bd00494 (patch)
tree00b22f05d7ef76c31ad7be54f40fed5dbb73ebdd
parent87fe485bfaac977c2e5979816b38743f3046a400 (diff)
parent15752673020e89df2a9353f332bd1409de4cd4b7 (diff)
Merge "Tweak native allocation GC triggering thresholds"
-rw-r--r--runtime/gc/heap.cc18
-rw-r--r--runtime/gc/heap.h17
2 files changed, 20 insertions, 15 deletions
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8d19cd0efe..d699da0d16 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3798,7 +3798,7 @@ void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full) {
if (!Runtime::Current()->IsShuttingDown(self)) {
// Wait for any GCs currently running to finish.
if (WaitForGcToComplete(cause, self) == collector::kGcTypeNone) {
- // If the we can't run the GC type we wanted to run, find the next appropriate one and try
+ // If we can't run the GC type we wanted to run, find the next appropriate one and try
// that instead. E.g. can't do partial, so do full instead.
collector::GcType next_gc_type = next_gc_type_;
// If forcing full and next gc type is sticky, override with a non-sticky type.
@@ -3977,8 +3977,13 @@ static constexpr size_t kNewNativeDiscountFactor = 2;
// If weighted java + native memory use exceeds our target by kStopForNativeFactor, and
// newly allocated memory exceeds kHugeNativeAlloc, we wait for GC to complete to avoid
// running out of memory.
-static constexpr float kStopForNativeFactor = 2.0;
-static constexpr size_t kHugeNativeAllocs = 200*1024*1024;
+static constexpr float kStopForNativeFactor = 4.0;
+// TODO: Allow this to be tuned. We want this much smaller for some apps, like Calculator.
+// But making it too small can cause jank in apps like launcher that intentionally allocate
+// large amounts of memory in rapid succession. (b/122099093)
+// For now, we punt, and use a value that should be easily large enough to disable this in all
+// questionable setting, but that is clearly too large to be effective for small memory devices.
+static constexpr size_t kHugeNativeAllocs = 1 * GB;
// Return the ratio of the weighted native + java allocated bytes to its target value.
// A return value > 1.0 means we should collect. Significantly larger values mean we're falling
@@ -3998,8 +4003,9 @@ inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes) {
size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes);
size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor
+ old_native_bytes / kOldNativeDiscountFactor;
- size_t adj_start_bytes = concurrent_start_bytes_
- + NativeAllocationGcWatermark() / kNewNativeDiscountFactor;
+ size_t add_bytes_allowed = static_cast<size_t>(
+ NativeAllocationGcWatermark() * HeapGrowthMultiplier());
+ size_t adj_start_bytes = concurrent_start_bytes_ + add_bytes_allowed / kNewNativeDiscountFactor;
return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
/ static_cast<float>(adj_start_bytes);
}
@@ -4017,7 +4023,7 @@ inline void Heap::CheckConcurrentGCForNative(Thread* self) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency;
}
- WaitForGcToComplete(kGcCauseForAlloc, self);
+ WaitForGcToComplete(kGcCauseForNativeAlloc, self);
}
} else {
CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 6696cc1f06..9949bf4213 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -157,7 +157,12 @@ class Heap {
// Client should call NotifyNativeAllocation every kNotifyNativeInterval allocations.
// Should be chosen so that time_to_call_mallinfo / kNotifyNativeInterval is on the same order
// as object allocation time. time_to_call_mallinfo seems to be on the order of 1 usec.
+#ifdef __ANDROID__
static constexpr uint32_t kNotifyNativeInterval = 32;
+#else
+ // Some host mallinfo() implementations are slow. And memory is less scarce.
+ static constexpr uint32_t kNotifyNativeInterval = 128;
+#endif
// RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the
// following. kCheckImmediatelyThreshold * kNotifyNativeInterval should be small enough to
@@ -1140,15 +1145,9 @@ class Heap {
// collect. We collect when a weighted sum of Java memory plus native memory exceeds
// the similarly weighted sum of the Java heap size target and this value.
ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
- // It probably makes most sense to use a constant multiple of target_footprint_ .
- // This is a good indication of the live data size, together with the
- // intended space-time trade-off, as expressed by SetTargetHeapUtilization.
- // For a fixed target utilization, the amount of GC effort per native
- // allocated byte remains roughly constant as the Java heap size changes.
- // But we previously triggered on max_free_ native allocation which is often much
- // smaller. To avoid unexpected growth, we partially keep that limit in place for now.
- // TODO: Consider HeapGrowthMultiplier(). Maybe.
- return std::min(target_footprint_.load(std::memory_order_relaxed), 2 * max_free_);
+ // We keep the traditional limit of max_free_ in place for small heaps,
+ // but allow it to be adjusted upward for large heaps to limit GC overhead.
+ return target_footprint_.load(std::memory_order_relaxed) / 8 + max_free_;
}
ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke);