Revert^4 "Handle OOM situation in java-heap more aggressively"
This reverts commit 685c84775f7dfe23197b080e4730435fd80e6d27.
Reason for revert: Retrying with some changes in the failing art-tests
Change-Id: I2b03f17da74c11adba8a27b8e9488bfae3374085
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2accee7..d6c1397 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1395,10 +1395,14 @@
allocator_type == kAllocatorTypeRegionTLAB) {
space = region_space_;
}
- CHECK(space != nullptr) << "allocator_type:" << allocator_type
- << " byte_count:" << byte_count
- << " total_bytes_free:" << total_bytes_free;
- space->LogFragmentationAllocFailure(oss, byte_count);
+
+ // There is no fragmentation info to log for large-object space.
+ if (allocator_type != kAllocatorTypeLOS) {
+ CHECK(space != nullptr) << "allocator_type:" << allocator_type
+ << " byte_count:" << byte_count
+ << " total_bytes_free:" << total_bytes_free;
+ space->LogFragmentationAllocFailure(oss, byte_count);
+ }
}
self->ThrowOutOfMemoryError(oss.str().c_str());
}
@@ -1739,6 +1743,9 @@
size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
ObjPtr<mirror::Class>* klass) {
+ // After a GC (due to allocation failure) we should retrieve at least this
+ // fraction of the current max heap size. Otherwise throw OOME.
+ constexpr double kMinFreeHeapAfterGcForAlloc = 0.01;
bool was_default_allocator = allocator == GetCurrentAllocator();
// Make sure there is no pending exception since we may need to throw an OOME.
self->AssertNoPendingException();
@@ -1783,50 +1790,36 @@
}
}
+ auto have_reclaimed_enough = [&]() {
+ size_t curr_bytes_allocated = GetBytesAllocated();
+ double curr_free_heap =
+ static_cast<double>(growth_limit_ - curr_bytes_allocated) / growth_limit_;
+ return curr_free_heap >= kMinFreeHeapAfterGcForAlloc;
+ };
+ // We perform one GC as per the next_gc_type_ (chosen in GrowForUtilization),
+ // if it's not already tried. If that doesn't succeed then go for the most
+ // exhaustive option. Perform a full-heap collection including clearing
+ // SoftReferences. In case of ConcurrentCopying, it will also ensure that
+ // all regions are evacuated. If allocation doesn't succeed even after that
+ // then there is no hope, so we throw OOME.
collector::GcType tried_type = next_gc_type_;
- const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
- CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
+ if (last_gc < tried_type) {
+ const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
+ CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
- if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
- (!instrumented && EntrypointsInstrumented())) {
- return nullptr;
- }
- if (gc_ran) {
- mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
- usable_size, bytes_tl_bulk_allocated);
- if (ptr != nullptr) {
- return ptr;
- }
- }
-
- // Loop through our different Gc types and try to Gc until we get enough free memory.
- for (collector::GcType gc_type : gc_plan_) {
- if (gc_type == tried_type) {
- continue;
- }
- // Attempt to run the collector, if we succeed, re-try the allocation.
- const bool plan_gc_ran = PERFORM_SUSPENDING_OPERATION(
- CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
- if (plan_gc_ran) {
- // Did we free sufficient memory for the allocation to succeed?
- mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+ if (gc_ran && have_reclaimed_enough()) {
+ mirror::Object* ptr = TryToAllocate<true, false>(self, allocator,
+ alloc_size, bytes_allocated,
usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
}
}
- // Allocations have failed after GCs; this is an exceptional state.
- // Try harder, growing the heap if necessary.
- mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
- usable_size, bytes_tl_bulk_allocated);
- if (ptr != nullptr) {
- return ptr;
- }
// Most allocations should have succeeded by now, so the heap is really full, really fragmented,
// or the requested size is really big. Do another GC, collecting SoftReferences this time. The
// VM spec requires that all SoftReferences have been collected and cleared before throwing
@@ -1841,8 +1834,12 @@
(!instrumented && EntrypointsInstrumented())) {
return nullptr;
}
- ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
- bytes_tl_bulk_allocated);
+ mirror::Object* ptr = nullptr;
+ if (have_reclaimed_enough()) {
+ ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
+ usable_size, bytes_tl_bulk_allocated);
+ }
+
if (ptr == nullptr) {
const uint64_t current_time = NanoTime();
switch (allocator) {