summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Lokesh Gidra <lokeshgidra@google.com> 2018-01-10 01:42:44 +0000
committer Gerrit Code Review <noreply-gerritcodereview@google.com> 2018-01-10 01:42:44 +0000
commitd6b7e8c63f8eca25460f56f66dcae15eaa897ff0 (patch)
treeeb39f8cf17e2895b72dffcff0d72d878b235511f
parentea443af9fd561ee3c3649ec98cafe8ecda077371 (diff)
parentb4f154137c9d170fb8739f8bd3882421a6e8c152 (diff)
Merge "Fix calculation of non-free region count."
-rw-r--r--runtime/gc/collector/concurrent_copying.cc15
-rw-r--r--runtime/gc/collector/concurrent_copying.h5
-rw-r--r--runtime/gc/space/region_space-inl.h54
-rw-r--r--runtime/gc/space/region_space.cc53
-rw-r--r--runtime/gc/space/region_space.h20
5 files changed, 86 insertions, 61 deletions
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index cf837161e0..a68227e0eb 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -88,7 +88,6 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
from_space_num_bytes_at_first_pause_(0),
mark_stack_mode_(kMarkStackModeOff),
weak_ref_access_enabled_(true),
- max_peak_num_non_free_regions_(0),
skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
mark_from_read_barrier_measurements_(false),
@@ -1755,8 +1754,6 @@ void ConcurrentCopying::ReclaimPhase() {
cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
cumulative_objects_moved_.FetchAndAddRelaxed(to_objects);
- max_peak_num_non_free_regions_ = std::max(max_peak_num_non_free_regions_,
- region_space_->GetNumNonFreeRegions());
if (kEnableFromSpaceAccountingCheck) {
CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
@@ -2269,7 +2266,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
size_t non_moving_space_bytes_allocated = 0U;
size_t bytes_allocated = 0U;
size_t dummy;
- mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
+ mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac*/ true>(
region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
bytes_allocated = region_space_bytes_allocated;
if (to_ref != nullptr) {
@@ -2341,7 +2338,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
DCHECK(region_space_->IsInToSpace(to_ref));
if (bytes_allocated > space::RegionSpace::kRegionSize) {
// Free the large alloc.
- region_space_->FreeLarge(to_ref, bytes_allocated);
+ region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated);
} else {
// Record the lost copy for later reuse.
heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
@@ -2696,10 +2693,10 @@ void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
os << "Cumulative objects moved " << cumulative_objects_moved_.LoadRelaxed() << "\n";
os << "Peak regions allocated "
- << max_peak_num_non_free_regions_ << " ("
- << PrettySize(max_peak_num_non_free_regions_ * space::RegionSpace::kRegionSize)
- << ") / " << region_space_->GetNumRegions() << " ("
- << PrettySize(region_space_->GetNumRegions() * space::RegionSpace::kRegionSize)
+ << region_space_->GetMaxPeakNumNonFreeRegions() << " ("
+ << PrettySize(region_space_->GetMaxPeakNumNonFreeRegions() * space::RegionSpace::kRegionSize)
+ << ") / " << region_space_->GetNumRegions() / 2 << " ("
+ << PrettySize(region_space_->GetNumRegions() * space::RegionSpace::kRegionSize / 2)
<< ")\n";
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 939e7fc8a4..8b4b58e7b1 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -308,11 +308,6 @@ class ConcurrentCopying : public GarbageCollector {
Atomic<uint64_t> cumulative_bytes_moved_;
Atomic<uint64_t> cumulative_objects_moved_;
- // Maintain the maximum of number of non-free regions collected just before
- // reclaim in each GC cycle. At this moment in cycle, highest number of
- // regions are in non-free.
- size_t max_peak_num_non_free_regions_;
-
// The skipped blocks are memory blocks/chucks that were copies of
// objects that were unused due to lost races (cas failures) at
// object copy/forward pointer install. They are reused.
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index ea2168fe9c..e74e9b169f 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -115,7 +115,7 @@ inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* byte
}
template<RegionSpace::RegionType kRegionType>
-uint64_t RegionSpace::GetBytesAllocatedInternal() {
+inline uint64_t RegionSpace::GetBytesAllocatedInternal() {
uint64_t bytes = 0;
MutexLock mu(Thread::Current(), region_lock_);
for (size_t i = 0; i < num_regions_; ++i) {
@@ -150,7 +150,7 @@ uint64_t RegionSpace::GetBytesAllocatedInternal() {
}
template<RegionSpace::RegionType kRegionType>
-uint64_t RegionSpace::GetObjectsAllocatedInternal() {
+inline uint64_t RegionSpace::GetObjectsAllocatedInternal() {
uint64_t bytes = 0;
MutexLock mu(Thread::Current(), region_lock_);
for (size_t i = 0; i < num_regions_; ++i) {
@@ -185,7 +185,7 @@ uint64_t RegionSpace::GetObjectsAllocatedInternal() {
}
template<bool kToSpaceOnly, typename Visitor>
-void RegionSpace::WalkInternal(Visitor&& visitor) {
+inline void RegionSpace::WalkInternal(Visitor&& visitor) {
// TODO: MutexLock on region_lock_ won't work due to lock order
// issues (the classloader classes lock and the monitor lock). We
// call this with threads suspended.
@@ -237,9 +237,10 @@ inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
}
template<bool kForEvac>
-mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size,
- size_t* bytes_tl_bulk_allocated) {
+inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
+ size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
DCHECK_ALIGNED(num_bytes, kAlignment);
DCHECK_GT(num_bytes, kRegionSize);
size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
@@ -274,7 +275,11 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
Region* first_reg = &regions_[left];
DCHECK(first_reg->IsFree());
first_reg->UnfreeLarge(this, time_);
- ++num_non_free_regions_;
+ if (kForEvac) {
+ ++num_evac_regions_;
+ } else {
+ ++num_non_free_regions_;
+ }
size_t allocated = num_regs * kRegionSize;
// We make 'top' all usable bytes, as the caller of this
// allocation may use all of 'usable_size' (see mirror::Array::Alloc).
@@ -283,7 +288,11 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
DCHECK_LT(p, num_regions_);
DCHECK(regions_[p].IsFree());
regions_[p].UnfreeLargeTail(this, time_);
- ++num_non_free_regions_;
+ if (kForEvac) {
+ ++num_evac_regions_;
+ } else {
+ ++num_non_free_regions_;
+ }
}
*bytes_allocated = allocated;
if (usable_size != nullptr) {
@@ -299,6 +308,35 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
return nullptr;
}
+template<bool kForEvac>
+inline void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
+ DCHECK(Contains(large_obj));
+ DCHECK_ALIGNED(large_obj, kRegionSize);
+ MutexLock mu(Thread::Current(), region_lock_);
+ uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
+ uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
+ CHECK_LT(begin_addr, end_addr);
+ for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
+ Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
+ if (addr == begin_addr) {
+ DCHECK(reg->IsLarge());
+ } else {
+ DCHECK(reg->IsLargeTail());
+ }
+ reg->Clear(/*zero_and_release_pages*/true);
+ if (kForEvac) {
+ --num_evac_regions_;
+ } else {
+ --num_non_free_regions_;
+ }
+ }
+ if (end_addr < Limit()) {
+ // If we aren't at the end of the space, check that the next region is not a large tail.
+ Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
+ DCHECK(!following_reg->IsLargeTail());
+ }
+}
+
inline size_t RegionSpace::Region::BytesAllocated() const {
if (IsLarge()) {
DCHECK_LT(begin_ + kRegionSize, Top());
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index a51df7c783..45cfff90cc 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -84,14 +84,18 @@ RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
: ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
kGcRetentionPolicyAlwaysCollect),
- region_lock_("Region lock", kRegionSpaceRegionLock), time_(1U) {
- size_t mem_map_size = mem_map->Size();
- CHECK_ALIGNED(mem_map_size, kRegionSize);
+ region_lock_("Region lock", kRegionSpaceRegionLock),
+ time_(1U),
+ num_regions_(mem_map->Size() / kRegionSize),
+ num_non_free_regions_(0U),
+ num_evac_regions_(0U),
+ max_peak_num_non_free_regions_(0U),
+ non_free_region_index_limit_(0U),
+ current_region_(&full_region_),
+ evac_region_(nullptr) {
+ CHECK_ALIGNED(mem_map->Size(), kRegionSize);
CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
- num_regions_ = mem_map_size / kRegionSize;
- num_non_free_regions_ = 0U;
DCHECK_GT(num_regions_, 0U);
- non_free_region_index_limit_ = 0U;
regions_.reset(new Region[num_regions_]);
uint8_t* region_addr = mem_map->Begin();
for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
@@ -112,8 +116,6 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
}
DCHECK(!full_region_.IsFree());
DCHECK(full_region_.IsAllocated());
- current_region_ = &full_region_;
- evac_region_ = nullptr;
size_t ignored;
DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
}
@@ -267,6 +269,9 @@ void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_obje
VerifyNonFreeRegionLimit();
size_t new_non_free_region_index_limit = 0;
+ // Update max of peak non free region count before reclaiming evacuated regions.
+ max_peak_num_non_free_regions_ = std::max(max_peak_num_non_free_regions_,
+ num_non_free_regions_);
// Combine zeroing and releasing pages to reduce how often madvise is called. This helps
// reduce contention on the mmap semaphore. b/62194020
// clear_region adds a region to the current block. If the region is not adjacent, the
@@ -350,6 +355,8 @@ void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_obje
// Update non_free_region_index_limit_.
SetNonFreeRegionLimit(new_non_free_region_index_limit);
evac_region_ = nullptr;
+ num_non_free_regions_ += num_evac_regions_;
+ num_evac_regions_ = 0;
}
void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
@@ -411,30 +418,6 @@ void RegionSpace::Dump(std::ostream& os) const {
<< reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
}
-void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
- DCHECK(Contains(large_obj));
- DCHECK_ALIGNED(large_obj, kRegionSize);
- MutexLock mu(Thread::Current(), region_lock_);
- uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
- uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
- CHECK_LT(begin_addr, end_addr);
- for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
- Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
- if (addr == begin_addr) {
- DCHECK(reg->IsLarge());
- } else {
- DCHECK(reg->IsLargeTail());
- }
- reg->Clear(/*zero_and_release_pages*/true);
- --num_non_free_regions_;
- }
- if (end_addr < Limit()) {
- // If we aren't at the end of the space, check that the next region is not a large tail.
- Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
- DCHECK(!following_reg->IsLargeTail());
- }
-}
-
void RegionSpace::DumpRegions(std::ostream& os) {
MutexLock mu(Thread::Current(), region_lock_);
for (size_t i = 0; i < num_regions_; ++i) {
@@ -572,10 +555,12 @@ RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
Region* r = &regions_[i];
if (r->IsFree()) {
r->Unfree(this, time_);
- ++num_non_free_regions_;
- if (!for_evac) {
+ if (for_evac) {
+ ++num_evac_regions_;
// Evac doesn't count as newly allocated.
+ } else {
r->SetNewlyAllocated();
+ ++num_non_free_regions_;
}
return r;
}
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index c9c9136c27..ef8aa52a03 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -64,6 +64,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
template<bool kForEvac>
mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
+ template<bool kForEvac>
void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
// Return the storage space required by obj.
@@ -138,9 +139,8 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
}
- // It is OK to do a racy read here as it's only for performance dump.
- size_t GetNumNonFreeRegions() const {
- return num_non_free_regions_;
+ size_t GetMaxPeakNumNonFreeRegions() const {
+ return max_peak_num_non_free_regions_;
}
size_t GetNumRegions() const {
return num_regions_;
@@ -530,8 +530,18 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
uint32_t time_; // The time as the number of collections since the startup.
- size_t num_regions_; // The number of regions in this space.
- size_t num_non_free_regions_; // The number of non-free regions in this space.
+ const size_t num_regions_; // The number of regions in this space.
+ // The number of non-free regions in this space.
+ size_t num_non_free_regions_ GUARDED_BY(region_lock_);
+
+ // The number of evac regions allocated during collection. 0 when GC not running.
+ size_t num_evac_regions_ GUARDED_BY(region_lock_);
+
+ // Maintain the maximum of number of non-free regions collected just before
+ // reclaim in each GC cycle. At this moment in cycle, highest number of
+ // regions are in non-free.
+ size_t max_peak_num_non_free_regions_;
+
std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
// The pointer to the region array.
// The upper-bound index of the non-free regions. Used to avoid scanning all regions in