diff options
| -rw-r--r-- | runtime/gc/heap-inl.h | 2 | ||||
| -rw-r--r-- | runtime/gc/heap.cc | 21 | ||||
| -rw-r--r-- | runtime/gc/heap.h | 5 |
3 files changed, 18 insertions, 10 deletions
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index e089ef203f..89ded0b27f 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -256,7 +256,7 @@ inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) co // Zygote resulting in it being prematurely freed. // We can only do this for primitive objects since large objects will not be within the card table // range. This also means that we rely on SetClass not dirtying the object's card. - return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray(); + return byte_count >= large_object_threshold_ && c->IsPrimitiveArray(); } template <bool kGrow> diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 8d8cdd6047..2e6d2c29b6 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -98,6 +98,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max long_gc_log_threshold_(long_gc_log_threshold), ignore_max_footprint_(ignore_max_footprint), have_zygote_space_(false), + large_object_threshold_(std::numeric_limits<size_t>::max()), // Starts out disabled. soft_reference_queue_(this), weak_reference_queue_(this), finalizer_reference_queue_(this), @@ -159,11 +160,16 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max } // If we aren't the zygote, switch to the default non zygote allocator. This may update the // entrypoints. - if (!Runtime::Current()->IsZygote() || !kMovingCollector) { + if (!Runtime::Current()->IsZygote()) { ChangeCollector(post_zygote_collector_type_); + large_object_threshold_ = kDefaultLargeObjectThreshold; } else { - // We are the zygote, use bump pointer allocation + semi space collector. - ChangeCollector(kCollectorTypeSS); + if (kMovingCollector) { + // We are the zygote, use bump pointer allocation + semi space collector. + ChangeCollector(kCollectorTypeSS); + } else { + ChangeCollector(post_zygote_collector_type_); + } } live_bitmap_.reset(new accounting::HeapBitmap(this)); @@ -1485,15 +1491,13 @@ void Heap::PreZygoteFork() { main_space_->SetFootprintLimit(main_space_->Capacity()); AddSpace(main_space_); have_zygote_space_ = true; + // Enable large object space allocations. + large_object_threshold_ = kDefaultLargeObjectThreshold; // Create the zygote space mod union table. accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space); CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table"; AddModUnionTable(mod_union_table); - // Reset the cumulative loggers since we now have a few additional timing phases. - for (const auto& collector : garbage_collectors_) { - collector->ResetCumulativeStatistics(); - } // Can't use RosAlloc for non moving space due to thread local buffers. // TODO: Non limited space for non-movable objects? MemMap* mem_map = post_zygote_non_moving_space_mem_map_.release(); @@ -2049,7 +2053,8 @@ void Heap::ProcessCards(TimingLogger& timings) { TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings); // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards // were dirty before the GC started. - // TODO: Don't need to use atomic. + // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread) + // -> clean(cleaning thread). // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint // roots and then we scan / update mod union tables after. We will always scan either card. // If we end up with the non aged card, we scan it it in the pause. diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 5d3232fa3d..2f227d0d37 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -119,7 +119,7 @@ class Heap { // If true, measure the total allocation time. static constexpr bool kMeasureAllocationTime = false; // Primitive arrays larger than this size are put in the large object space. - static constexpr size_t kLargeObjectThreshold = 3 * kPageSize; + static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize; static constexpr size_t kDefaultInitialSize = 2 * MB; static constexpr size_t kDefaultMaximumSize = 32 * MB; @@ -743,6 +743,9 @@ class Heap { // If we have a zygote space. bool have_zygote_space_; + // Minimum allocation size of large object. + size_t large_object_threshold_; + // Guards access to the state of GC, associated conditional variable is used to signal when a GC // completes. Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; |