diff options
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 27 | ||||
| -rw-r--r-- | runtime/gc/space/region_space-inl.h | 14 | ||||
| -rw-r--r-- | runtime/gc/space/region_space.cc | 33 | ||||
| -rw-r--r-- | runtime/gc/space/region_space.h | 2 |
4 files changed, 54 insertions, 22 deletions
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 8f9c187e1d..aea9708ddc 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -1644,10 +1644,10 @@ void ConcurrentCopying::ReclaimPhase() { // Record freed objects. TimingLogger::ScopedTiming split2("RecordFree", GetTimings()); // Don't include thread-locals that are in the to-space. - uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); - uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); - uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); - uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); + const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); + const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); + const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); + const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes); uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); @@ -1658,8 +1658,18 @@ void ConcurrentCopying::ReclaimPhase() { } CHECK_LE(to_objects, from_objects); CHECK_LE(to_bytes, from_bytes); - int64_t freed_bytes = from_bytes - to_bytes; - int64_t freed_objects = from_objects - to_objects; + // cleared_bytes and cleared_objects may be greater than the from space equivalents since + // ClearFromSpace may clear empty unevac regions. + uint64_t cleared_bytes; + uint64_t cleared_objects; + { + TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); + region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects); + CHECK_GE(cleared_bytes, from_bytes); + CHECK_GE(cleared_objects, from_objects); + } + int64_t freed_bytes = cleared_bytes - to_bytes; + int64_t freed_objects = cleared_objects - to_objects; if (kVerboseMode) { LOG(INFO) << "RecordFree:" << " from_bytes=" << from_bytes << " from_objects=" << from_objects @@ -1678,11 +1688,6 @@ void ConcurrentCopying::ReclaimPhase() { } { - TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); - region_space_->ClearFromSpace(); - } - - { WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); Sweep(false); SwapBitmaps(); diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index fffcee64ad..5809027235 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -233,14 +233,12 @@ void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) { continue; } if (r->IsLarge()) { - if (r->LiveBytes() > 0) { - // Avoid visiting dead large objects since they may contain dangling pointers to the - // from-space. - DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object"; - mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin()); - DCHECK(obj->GetClass() != nullptr); - callback(obj, arg); - } + // Avoid visiting dead large objects since they may contain dangling pointers to the + // from-space. + DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object"; + mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin()); + DCHECK(obj->GetClass() != nullptr); + callback(obj, arg); } else if (r->IsLargeTail()) { // Do nothing. } else { diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index 560abe121a..1ad48438ba 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -246,16 +246,45 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc evac_region_ = &full_region_; } -void RegionSpace::ClearFromSpace() { +void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) { + DCHECK(cleared_bytes != nullptr); + DCHECK(cleared_objects != nullptr); + *cleared_bytes = 0; + *cleared_objects = 0; MutexLock mu(Thread::Current(), region_lock_); VerifyNonFreeRegionLimit(); size_t new_non_free_region_index_limit = 0; for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) { Region* r = ®ions_[i]; if (r->IsInFromSpace()) { - r->Clear(); + *cleared_bytes += r->BytesAllocated(); + *cleared_objects += r->ObjectsAllocated(); --num_non_free_regions_; + r->Clear(); } else if (r->IsInUnevacFromSpace()) { + if (r->LiveBytes() == 0) { + // Special case for 0 live bytes, this means all of the objects in the region are dead and + // we can clear it. This is important for large objects since we must not visit dead ones in + // RegionSpace::Walk because they may contain dangling references to invalid objects. + // It is also better to clear these regions now instead of at the end of the next GC to + // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal + // live percent evacuation logic. + size_t free_regions = 1; + // Also release RAM for large tails. + while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) { + DCHECK(r->IsLarge()); + regions_[i + free_regions].Clear(); + ++free_regions; + } + *cleared_bytes += r->BytesAllocated(); + *cleared_objects += r->ObjectsAllocated(); + num_non_free_regions_ -= free_regions; + r->Clear(); + GetLiveBitmap()->ClearRange( + reinterpret_cast<mirror::Object*>(r->Begin()), + reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize)); + continue; + } size_t full_count = 0; while (r->IsInUnevacFromSpace()) { Region* const cur = ®ions_[i + full_count]; diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h index 95f293bc12..253792993b 100644 --- a/runtime/gc/space/region_space.h +++ b/runtime/gc/space/region_space.h @@ -215,7 +215,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { size_t FromSpaceSize() REQUIRES(!region_lock_); size_t UnevacFromSpaceSize() REQUIRES(!region_lock_); size_t ToSpaceSize() REQUIRES(!region_lock_); - void ClearFromSpace() REQUIRES(!region_lock_); + void ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) REQUIRES(!region_lock_); void AddLiveBytes(mirror::Object* ref, size_t alloc_size) { Region* reg = RefToRegionUnlocked(ref); |