diff options
| author | 2018-08-21 16:46:41 +0000 | |
|---|---|---|
| committer | 2018-08-21 16:46:41 +0000 | |
| commit | 348d10084f59ffe70619e9a25374ead424379669 (patch) | |
| tree | f69bf96432a6f9c8078e6c86efc706410e776c56 | |
| parent | 7c0c794790f49c417ef42c19676c29f06b09a383 (diff) | |
| parent | 4e7511364e21e55078ce8434dd8a7ffacbe0037b (diff) | |
Merge "Rework the newly-allocated region logic in RegionSpace."
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 6 | ||||
| -rw-r--r-- | runtime/gc/space/region_space-inl.h | 8 | ||||
| -rw-r--r-- | runtime/gc/space/region_space.cc | 37 | ||||
| -rw-r--r-- | runtime/gc/space/region_space.h | 33 |
4 files changed, 77 insertions, 7 deletions
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 85b0bb94cb..b1dfd32f0a 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -627,8 +627,6 @@ class ConcurrentCopying::VerifyNoMissingCardMarkVisitor { void CheckReference(mirror::Object* ref, int32_t offset = -1) const REQUIRES_SHARED(Locks::mutator_lock_) { - // FIXME: This assertion is failing in 004-ThreadStress most of - // the time with Sticky-Bit (Generational) CC. CHECK(ref == nullptr || !cc_->region_space_->IsInNewlyAllocatedRegion(ref)) << holder_->PrettyTypeOf() << "(" << holder_.Ptr() << ") references object " << ref->PrettyTypeOf() << "(" << ref << ") in newly allocated region at offset=" << offset; @@ -1618,7 +1616,9 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { } space::RegionSpace::RegionType rtype = region_space_->GetRegionType(to_ref); bool add_to_live_bytes = false; - DCHECK(!region_space_->IsInNewlyAllocatedRegion(to_ref)); + // Invariant: There should be no object from a newly-allocated + // region (either large or non-large) on the mark stack. + DCHECK(!region_space_->IsInNewlyAllocatedRegion(to_ref)) << to_ref; if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { // Mark the bitmap only in the GC thread here so that we don't need a CAS. if (!kUseBakerReadBarrier || diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index e30b63ace8..e04851564d 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -355,6 +355,10 @@ inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin, // We make 'top' all usable bytes, as the caller of this // allocation may use all of 'usable_size' (see mirror::Array::Alloc). first_reg->SetTop(first_reg->Begin() + allocated); + if (!kForEvac) { + // Evac doesn't count as newly allocated. + first_reg->SetNewlyAllocated(); + } for (size_t p = left + 1; p < right; ++p) { DCHECK_LT(p, num_regions_); DCHECK(regions_[p].IsFree()); @@ -364,6 +368,10 @@ inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin, } else { ++num_non_free_regions_; } + if (!kForEvac) { + // Evac doesn't count as newly allocated. + regions_[p].SetNewlyAllocated(); + } } *bytes_allocated = allocated; if (usable_size != nullptr) { diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index beda3299d5..a2e2e95540 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -189,7 +189,40 @@ inline bool RegionSpace::Region::ShouldBeEvacuated(EvacMode evac_mode) { } bool result = false; if (is_newly_allocated_) { - result = true; + // Invariant: newly allocated regions have an undefined live bytes count. + DCHECK_EQ(live_bytes_, static_cast<size_t>(-1)); + if (IsAllocated()) { + // We always evacuate newly-allocated non-large regions as we + // believe they contain many dead objects (a very simple form of + // the generational hypothesis, even before the Sticky-Bit CC + // approach). + // + // TODO: Verify that assertion by collecting statistics on the + // number/proportion of live objects in newly allocated regions + // in RegionSpace::ClearFromSpace. + // + // Note that a side effect of evacuating a newly-allocated + // non-large region is that the "newly allocated" status will + // later be removed, as its live objects will be copied to an + // evacuation region, which won't be marked as "newly + // allocated" (see RegionSpace::AllocateRegion). + result = true; + } else { + DCHECK(IsLarge()); + // We never want to evacuate a large region (and the associated + // tail regions), except if: + // - we are forced to do so (see the `kEvacModeForceAll` case + // above); or + // - we know that the (sole) object contained in this region is + // dead (see the corresponding logic below, in the + // `kEvacModeLivePercentNewlyAllocated` case). + // For a newly allocated region (i.e. allocated since the + // previous GC started), we don't have any liveness information + // (the live bytes count is -1 -- also note this region has been + // a to-space one between the time of its allocation and now), + // so we prefer not to evacuate it. + result = false; + } } else if (evac_mode == kEvacModeLivePercentNewlyAllocated) { bool is_live_percent_valid = (live_bytes_ != static_cast<size_t>(-1)); if (is_live_percent_valid) { @@ -315,6 +348,8 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, rb_table->Clear(r->Begin(), r->End()); } } + // Invariant: There should be no newly-allocated region in the from-space. + DCHECK(!r->is_newly_allocated_); } DCHECK_EQ(num_expected_large_tails, 0U); current_region_ = &full_region_; diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h index a943b70a07..8ad26baff1 100644 --- a/runtime/gc/space/region_space.h +++ b/runtime/gc/space/region_space.h @@ -460,6 +460,18 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { void SetAsFromSpace() { DCHECK(!IsFree() && IsInToSpace()); type_ = RegionType::kRegionTypeFromSpace; + if (IsNewlyAllocated()) { + // Clear the "newly allocated" status here, as we do not want the + // GC to see it when encountering references in the from-space. + // + // Invariant: There should be no newly-allocated region in the + // from-space (when the from-space exists, which is between the calls + // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace). + is_newly_allocated_ = false; + } + // Set live bytes to an invalid value, as we have made an + // evacuation decision (possibly based on the percentage of live + // bytes). live_bytes_ = static_cast<size_t>(-1); } @@ -472,7 +484,25 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { DCHECK(kEnableGenerationalConcurrentCopyingCollection || clear_live_bytes); DCHECK(!IsFree() && IsInToSpace()); type_ = RegionType::kRegionTypeUnevacFromSpace; + if (IsNewlyAllocated()) { + // A newly allocated region set as unevac from-space must be + // a large or large tail region. + DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_); + // Always clear the live bytes of a newly allocated (large or + // large tail) region. + clear_live_bytes = true; + // Clear the "newly allocated" status here, as we do not want the + // GC to see it when encountering (and processing) references in the + // from-space. + // + // Invariant: There should be no newly-allocated region in the + // from-space (when the from-space exists, which is between the calls + // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace). + is_newly_allocated_ = false; + } if (clear_live_bytes) { + // Reset the live bytes, as we have made a non-evacuation + // decision (possibly based on the percentage of live bytes). live_bytes_ = 0; } } @@ -482,9 +512,6 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { void SetUnevacFromSpaceAsToSpace() { DCHECK(!IsFree() && IsInUnevacFromSpace()); type_ = RegionType::kRegionTypeToSpace; - if (kEnableGenerationalConcurrentCopyingCollection) { - is_newly_allocated_ = false; - } } // Return whether this region should be evacuated. Used by RegionSpace::SetFromSpace. |