summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Hiroshi Yamauchi <yamauchi@google.com> 2017-02-23 15:11:56 -0800
committer Hiroshi Yamauchi <yamauchi@google.com> 2017-02-27 16:53:58 -0800
commit6711cd8bc9f9053d653a52676177f8a29c1c36eb (patch)
tree9a3558d3b074bfe1e03b6f2242ef28273a53067d
parent02c488068f4793204b00b40d76eb3a891a332def (diff)
Change the region space region size to 256k.
Also add RegionSpace::non_free_region_index_limit_ to avoid the need to scan the free end of the region table in SetFromSpace(), which compensates (and more) the pause time increase due to the increasing number of regions. Ritz EAAC avg pause time (angler little core / -Xmx512m): Before 186us After 436us (without non_free_region_index_limit_) After 103us Partially revert aog/327342 and remove the temporary adjustment of max/min-free. Changing the region size to 256k was enough to avoid the GCE boot issue (b/34576638), but 154-gc-loop barely fails. Make 154-gc-loop failures less strict. Allocation performance (angler little core / -Xmx512m) Ritz EAAC Before 939.6 After 937.8 BinaryTree Before 603350 After 620200 (-3%) Bug: 12687968 Test: test-art-host Test: GCE boot Change-Id: I1495ab4ced806e1c4d779d49b56cea618817a0d6
-rw-r--r--runtime/gc/accounting/read_barrier_table.h2
-rw-r--r--runtime/gc/heap.cc7
-rw-r--r--runtime/gc/space/region_space-inl.h8
-rw-r--r--runtime/gc/space/region_space.cc24
-rw-r--r--runtime/gc/space/region_space.h43
-rw-r--r--test/154-gc-loop/src/Main.java2
6 files changed, 66 insertions, 20 deletions
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 86266e2500..e77a5b8e39 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -80,7 +80,7 @@ class ReadBarrierTable {
}
// This should match RegionSpace::kRegionSize. static_assert'ed in concurrent_copying.h.
- static constexpr size_t kRegionSize = 1 * MB;
+ static constexpr size_t kRegionSize = 256 * KB;
private:
static constexpr uint64_t kHeapCapacity = 4ULL * GB; // low 4gb.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 53be30eafc..61cf9f1448 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3551,11 +3551,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
collector::GcType gc_type = collector_ran->GetGcType();
const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
// foreground.
- // Ensure at least 2.5 MB to temporarily fix excessive GC caused by TLAB ergonomics.
- const uint64_t adjusted_min_free = std::max(static_cast<uint64_t>(min_free_ * multiplier),
- static_cast<uint64_t>(5 * MB / 2));
- const uint64_t adjusted_max_free = std::max(static_cast<uint64_t>(max_free_ * multiplier),
- static_cast<uint64_t>(5 * MB / 2));
+ const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
+ const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 3e79223498..31d8c00252 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -78,7 +78,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = &regions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
r->SetNewlyAllocated();
++num_non_free_regions_;
obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
@@ -91,7 +91,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = &regions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
++num_non_free_regions_;
obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
CHECK(obj != nullptr);
@@ -310,13 +310,13 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
DCHECK_EQ(left + num_regs, right);
Region* first_reg = &regions_[left];
DCHECK(first_reg->IsFree());
- first_reg->UnfreeLarge(time_);
+ first_reg->UnfreeLarge(this, time_);
++num_non_free_regions_;
first_reg->SetTop(first_reg->Begin() + num_bytes);
for (size_t p = left + 1; p < right; ++p) {
DCHECK_LT(p, num_regions_);
DCHECK(regions_[p].IsFree());
- regions_[p].UnfreeLargeTail(time_);
+ regions_[p].UnfreeLargeTail(this, time_);
++num_non_free_regions_;
}
*bytes_allocated = num_bytes;
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 8077319ec7..ed93f6242a 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -54,6 +54,7 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
num_regions_ = mem_map_size / kRegionSize;
num_non_free_regions_ = 0U;
DCHECK_GT(num_regions_, 0U);
+ non_free_region_index_limit_ = 0U;
regions_.reset(new Region[num_regions_]);
uint8_t* region_addr = mem_map->Begin();
for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
@@ -160,7 +161,11 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc
MutexLock mu(Thread::Current(), region_lock_);
size_t num_expected_large_tails = 0;
bool prev_large_evacuated = false;
- for (size_t i = 0; i < num_regions_; ++i) {
+ VerifyNonFreeRegionLimit();
+ const size_t iter_limit = kUseTableLookupReadBarrier
+ ? num_regions_
+ : std::min(num_regions_, non_free_region_index_limit_);
+ for (size_t i = 0; i < iter_limit; ++i) {
Region* r = &regions_[i];
RegionState state = r->State();
RegionType type = r->Type();
@@ -204,13 +209,16 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc
}
}
}
+ DCHECK_EQ(num_expected_large_tails, 0U);
current_region_ = &full_region_;
evac_region_ = &full_region_;
}
void RegionSpace::ClearFromSpace() {
MutexLock mu(Thread::Current(), region_lock_);
- for (size_t i = 0; i < num_regions_; ++i) {
+ VerifyNonFreeRegionLimit();
+ size_t new_non_free_region_index_limit = 0;
+ for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
Region* r = &regions_[i];
if (r->IsInFromSpace()) {
r->Clear();
@@ -223,6 +231,7 @@ void RegionSpace::ClearFromSpace() {
cur->LiveBytes() != static_cast<size_t>(cur->Top() - cur->Begin())) {
break;
}
+ DCHECK(cur->IsInUnevacFromSpace());
if (full_count != 0) {
cur->SetUnevacFromSpaceAsToSpace();
}
@@ -239,7 +248,15 @@ void RegionSpace::ClearFromSpace() {
i += full_count - 1;
}
}
+ // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
+ Region* last_checked_region = &regions_[i];
+ if (!last_checked_region->IsFree()) {
+ new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
+ last_checked_region->Idx() + 1);
+ }
}
+ // Update non_free_region_index_limit_.
+ SetNonFreeRegionLimit(new_non_free_region_index_limit);
evac_region_ = nullptr;
}
@@ -292,6 +309,7 @@ void RegionSpace::Clear() {
}
r->Clear();
}
+ SetNonFreeRegionLimit(0);
current_region_ = &full_region_;
evac_region_ = &full_region_;
}
@@ -358,7 +376,7 @@ bool RegionSpace::AllocNewTlab(Thread* self) {
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = &regions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
++num_non_free_regions_;
r->SetNewlyAllocated();
r->SetTop(r->End());
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index feab9b0fe9..39893129eb 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -166,7 +166,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
// Object alignment within the space.
static constexpr size_t kAlignment = kObjectAlignment;
// The region size.
- static constexpr size_t kRegionSize = 1 * MB;
+ static constexpr size_t kRegionSize = 256 * KB;
bool IsInFromSpace(mirror::Object* ref) {
if (HasAddress(ref)) {
@@ -307,25 +307,31 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
}
// Given a free region, declare it non-free (allocated).
- void Unfree(uint32_t alloc_time) {
+ void Unfree(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateAllocated;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
- void UnfreeLarge(uint32_t alloc_time) {
+ void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateLarge;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
- void UnfreeLargeTail(uint32_t alloc_time) {
+ void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateLargeTail;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
void SetNewlyAllocated() {
@@ -341,7 +347,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
bool IsLarge() const {
bool is_large = state_ == RegionState::kRegionStateLarge;
if (is_large) {
- DCHECK_LT(begin_ + 1 * MB, Top());
+ DCHECK_LT(begin_ + kRegionSize, Top());
}
return is_large;
}
@@ -428,7 +434,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
size_t ObjectsAllocated() const {
if (IsLarge()) {
- DCHECK_LT(begin_ + 1 * MB, Top());
+ DCHECK_LT(begin_ + kRegionSize, Top());
DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
return 1;
} else if (IsLargeTail()) {
@@ -519,6 +525,27 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
mirror::Object* GetNextObject(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) {
+ DCHECK_LT(new_non_free_region_index, num_regions_);
+ non_free_region_index_limit_ = std::max(non_free_region_index_limit_,
+ new_non_free_region_index + 1);
+ VerifyNonFreeRegionLimit();
+ }
+
+ void SetNonFreeRegionLimit(size_t new_non_free_region_index_limit) REQUIRES(region_lock_) {
+ DCHECK_LE(new_non_free_region_index_limit, num_regions_);
+ non_free_region_index_limit_ = new_non_free_region_index_limit;
+ VerifyNonFreeRegionLimit();
+ }
+
+ void VerifyNonFreeRegionLimit() REQUIRES(region_lock_) {
+ if (kIsDebugBuild && non_free_region_index_limit_ < num_regions_) {
+ for (size_t i = non_free_region_index_limit_; i < num_regions_; ++i) {
+ CHECK(regions_[i].IsFree());
+ }
+ }
+ }
+
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
uint32_t time_; // The time as the number of collections since the startup.
@@ -526,6 +553,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
size_t num_non_free_regions_; // The number of non-free regions in this space.
std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
// The pointer to the region array.
+ // The upper-bound index of the non-free regions. Used to avoid scanning all regions in
+ // SetFromSpace(). Invariant: for all i >= non_free_region_index_limit_, regions_[i].IsFree() is
+ // true.
+ size_t non_free_region_index_limit_ GUARDED_BY(region_lock_);
Region* current_region_; // The region that's being allocated currently.
Region* evac_region_; // The region that's being evacuated to currently.
Region full_region_; // The dummy/sentinel region that looks full.
diff --git a/test/154-gc-loop/src/Main.java b/test/154-gc-loop/src/Main.java
index 3a256c109e..2228ca2783 100644
--- a/test/154-gc-loop/src/Main.java
+++ b/test/154-gc-loop/src/Main.java
@@ -38,7 +38,7 @@ public class Main {
}
} catch (Exception e) {}
System.out.println("Finalize count too large: " +
- ((finalizeCounter >= 10) ? Integer.toString(finalizeCounter) : "false"));
+ ((finalizeCounter >= 12) ? Integer.toString(finalizeCounter) : "false"));
}
private static native void backgroundProcessState();