diff options
| -rw-r--r-- | runtime/gc/accounting/read_barrier_table.h | 2 | ||||
| -rw-r--r-- | runtime/gc/heap.cc | 7 | ||||
| -rw-r--r-- | runtime/gc/space/region_space-inl.h | 8 | ||||
| -rw-r--r-- | runtime/gc/space/region_space.cc | 24 | ||||
| -rw-r--r-- | runtime/gc/space/region_space.h | 43 | ||||
| -rw-r--r-- | test/154-gc-loop/src/Main.java | 2 |
6 files changed, 66 insertions, 20 deletions
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h index 86266e2500..e77a5b8e39 100644 --- a/runtime/gc/accounting/read_barrier_table.h +++ b/runtime/gc/accounting/read_barrier_table.h @@ -80,7 +80,7 @@ class ReadBarrierTable { } // This should match RegionSpace::kRegionSize. static_assert'ed in concurrent_copying.h. - static constexpr size_t kRegionSize = 1 * MB; + static constexpr size_t kRegionSize = 256 * KB; private: static constexpr uint64_t kHeapCapacity = 4ULL * GB; // low 4gb. diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 12b9701845..b857ea3eef 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -3559,11 +3559,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran, collector::GcType gc_type = collector_ran->GetGcType(); const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for // foreground. - // Ensure at least 2.5 MB to temporarily fix excessive GC caused by TLAB ergonomics. - const uint64_t adjusted_min_free = std::max(static_cast<uint64_t>(min_free_ * multiplier), - static_cast<uint64_t>(5 * MB / 2)); - const uint64_t adjusted_max_free = std::max(static_cast<uint64_t>(max_free_ * multiplier), - static_cast<uint64_t>(5 * MB / 2)); + const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier); + const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier); if (gc_type != collector::kGcTypeSticky) { // Grow the heap for non sticky GC. ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated; diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index 5d282f1ae9..fffcee64ad 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -78,7 +78,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by for (size_t i = 0; i < num_regions_; ++i) { Region* r = ®ions_[i]; if (r->IsFree()) { - r->Unfree(time_); + r->Unfree(this, time_); r->SetNewlyAllocated(); ++num_non_free_regions_; obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); @@ -91,7 +91,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by for (size_t i = 0; i < num_regions_; ++i) { Region* r = ®ions_[i]; if (r->IsFree()) { - r->Unfree(time_); + r->Unfree(this, time_); ++num_non_free_regions_; obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); CHECK(obj != nullptr); @@ -314,13 +314,13 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate DCHECK_EQ(left + num_regs, right); Region* first_reg = ®ions_[left]; DCHECK(first_reg->IsFree()); - first_reg->UnfreeLarge(time_); + first_reg->UnfreeLarge(this, time_); ++num_non_free_regions_; first_reg->SetTop(first_reg->Begin() + num_bytes); for (size_t p = left + 1; p < right; ++p) { DCHECK_LT(p, num_regions_); DCHECK(regions_[p].IsFree()); - regions_[p].UnfreeLargeTail(time_); + regions_[p].UnfreeLargeTail(this, time_); ++num_non_free_regions_; } *bytes_allocated = num_bytes; diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index 321524cbbd..560abe121a 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -86,6 +86,7 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map) num_regions_ = mem_map_size / kRegionSize; num_non_free_regions_ = 0U; DCHECK_GT(num_regions_, 0U); + non_free_region_index_limit_ = 0U; regions_.reset(new Region[num_regions_]); uint8_t* region_addr = mem_map->Begin(); for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) { @@ -192,7 +193,11 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc MutexLock mu(Thread::Current(), region_lock_); size_t num_expected_large_tails = 0; bool prev_large_evacuated = false; - for (size_t i = 0; i < num_regions_; ++i) { + VerifyNonFreeRegionLimit(); + const size_t iter_limit = kUseTableLookupReadBarrier + ? num_regions_ + : std::min(num_regions_, non_free_region_index_limit_); + for (size_t i = 0; i < iter_limit; ++i) { Region* r = ®ions_[i]; RegionState state = r->State(); RegionType type = r->Type(); @@ -236,13 +241,16 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc } } } + DCHECK_EQ(num_expected_large_tails, 0U); current_region_ = &full_region_; evac_region_ = &full_region_; } void RegionSpace::ClearFromSpace() { MutexLock mu(Thread::Current(), region_lock_); - for (size_t i = 0; i < num_regions_; ++i) { + VerifyNonFreeRegionLimit(); + size_t new_non_free_region_index_limit = 0; + for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) { Region* r = ®ions_[i]; if (r->IsInFromSpace()) { r->Clear(); @@ -255,6 +263,7 @@ void RegionSpace::ClearFromSpace() { cur->LiveBytes() != static_cast<size_t>(cur->Top() - cur->Begin())) { break; } + DCHECK(cur->IsInUnevacFromSpace()); if (full_count != 0) { cur->SetUnevacFromSpaceAsToSpace(); } @@ -271,7 +280,15 @@ void RegionSpace::ClearFromSpace() { i += full_count - 1; } } + // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above. + Region* last_checked_region = ®ions_[i]; + if (!last_checked_region->IsFree()) { + new_non_free_region_index_limit = std::max(new_non_free_region_index_limit, + last_checked_region->Idx() + 1); + } } + // Update non_free_region_index_limit_. + SetNonFreeRegionLimit(new_non_free_region_index_limit); evac_region_ = nullptr; } @@ -324,6 +341,7 @@ void RegionSpace::Clear() { } r->Clear(); } + SetNonFreeRegionLimit(0); current_region_ = &full_region_; evac_region_ = &full_region_; } @@ -390,7 +408,7 @@ bool RegionSpace::AllocNewTlab(Thread* self) { for (size_t i = 0; i < num_regions_; ++i) { Region* r = ®ions_[i]; if (r->IsFree()) { - r->Unfree(time_); + r->Unfree(this, time_); ++num_non_free_regions_; r->SetNewlyAllocated(); r->SetTop(r->End()); diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h index da36f5c55d..95f293bc12 100644 --- a/runtime/gc/space/region_space.h +++ b/runtime/gc/space/region_space.h @@ -167,7 +167,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { // Object alignment within the space. static constexpr size_t kAlignment = kObjectAlignment; // The region size. - static constexpr size_t kRegionSize = 1 * MB; + static constexpr size_t kRegionSize = 256 * KB; bool IsInFromSpace(mirror::Object* ref) { if (HasAddress(ref)) { @@ -308,25 +308,31 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { } // Given a free region, declare it non-free (allocated). - void Unfree(uint32_t alloc_time) { + void Unfree(RegionSpace* region_space, uint32_t alloc_time) + REQUIRES(region_space->region_lock_) { DCHECK(IsFree()); state_ = RegionState::kRegionStateAllocated; type_ = RegionType::kRegionTypeToSpace; alloc_time_ = alloc_time; + region_space->AdjustNonFreeRegionLimit(idx_); } - void UnfreeLarge(uint32_t alloc_time) { + void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time) + REQUIRES(region_space->region_lock_) { DCHECK(IsFree()); state_ = RegionState::kRegionStateLarge; type_ = RegionType::kRegionTypeToSpace; alloc_time_ = alloc_time; + region_space->AdjustNonFreeRegionLimit(idx_); } - void UnfreeLargeTail(uint32_t alloc_time) { + void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time) + REQUIRES(region_space->region_lock_) { DCHECK(IsFree()); state_ = RegionState::kRegionStateLargeTail; type_ = RegionType::kRegionTypeToSpace; alloc_time_ = alloc_time; + region_space->AdjustNonFreeRegionLimit(idx_); } void SetNewlyAllocated() { @@ -342,7 +348,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { bool IsLarge() const { bool is_large = state_ == RegionState::kRegionStateLarge; if (is_large) { - DCHECK_LT(begin_ + 1 * MB, Top()); + DCHECK_LT(begin_ + kRegionSize, Top()); } return is_large; } @@ -429,7 +435,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { size_t ObjectsAllocated() const { if (IsLarge()) { - DCHECK_LT(begin_ + 1 * MB, Top()); + DCHECK_LT(begin_ + kRegionSize, Top()); DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U); return 1; } else if (IsLargeTail()) { @@ -520,6 +526,27 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { mirror::Object* GetNextObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_); + void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) { + DCHECK_LT(new_non_free_region_index, num_regions_); + non_free_region_index_limit_ = std::max(non_free_region_index_limit_, + new_non_free_region_index + 1); + VerifyNonFreeRegionLimit(); + } + + void SetNonFreeRegionLimit(size_t new_non_free_region_index_limit) REQUIRES(region_lock_) { + DCHECK_LE(new_non_free_region_index_limit, num_regions_); + non_free_region_index_limit_ = new_non_free_region_index_limit; + VerifyNonFreeRegionLimit(); + } + + void VerifyNonFreeRegionLimit() REQUIRES(region_lock_) { + if (kIsDebugBuild && non_free_region_index_limit_ < num_regions_) { + for (size_t i = non_free_region_index_limit_; i < num_regions_; ++i) { + CHECK(regions_[i].IsFree()); + } + } + } + Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; uint32_t time_; // The time as the number of collections since the startup. @@ -527,6 +554,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { size_t num_non_free_regions_; // The number of non-free regions in this space. std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_); // The pointer to the region array. + // The upper-bound index of the non-free regions. Used to avoid scanning all regions in + // SetFromSpace(). Invariant: for all i >= non_free_region_index_limit_, regions_[i].IsFree() is + // true. + size_t non_free_region_index_limit_ GUARDED_BY(region_lock_); Region* current_region_; // The region that's being allocated currently. Region* evac_region_; // The region that's being evacuated to currently. Region full_region_; // The dummy/sentinel region that looks full. diff --git a/test/154-gc-loop/src/Main.java b/test/154-gc-loop/src/Main.java index 3a256c109e..2228ca2783 100644 --- a/test/154-gc-loop/src/Main.java +++ b/test/154-gc-loop/src/Main.java @@ -38,7 +38,7 @@ public class Main { } } catch (Exception e) {} System.out.println("Finalize count too large: " + - ((finalizeCounter >= 10) ? Integer.toString(finalizeCounter) : "false")); + ((finalizeCounter >= 12) ? Integer.toString(finalizeCounter) : "false")); } private static native void backgroundProcessState(); |