Revert "Revert "Refeactor region clearing / allocation code""
Test: test-art-host
This reverts commit b645c3069f0e8950f3207778d1dcaaeff810287b.
Change-Id: If5e632c39438d735fb9563af28029b9571dc33fa
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index fc24fc2..82e8f20 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -48,58 +48,32 @@
mirror::Object* obj;
if (LIKELY(num_bytes <= kRegionSize)) {
// Non-large object.
- if (!kForEvac) {
- obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
- bytes_tl_bulk_allocated);
- } else {
- DCHECK(evac_region_ != nullptr);
- obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
- bytes_tl_bulk_allocated);
- }
+ obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
+ bytes_allocated,
+ usable_size,
+ bytes_tl_bulk_allocated);
if (LIKELY(obj != nullptr)) {
return obj;
}
MutexLock mu(Thread::Current(), region_lock_);
// Retry with current region since another thread may have updated it.
- if (!kForEvac) {
- obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
- bytes_tl_bulk_allocated);
- } else {
- obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
- bytes_tl_bulk_allocated);
- }
+ obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
+ bytes_allocated,
+ usable_size,
+ bytes_tl_bulk_allocated);
if (LIKELY(obj != nullptr)) {
return obj;
}
- if (!kForEvac) {
- // Retain sufficient free regions for full evacuation.
- if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
- return nullptr;
+ Region* r = AllocateRegion(kForEvac);
+ if (LIKELY(r != nullptr)) {
+ if (kForEvac) {
+ evac_region_ = r;
+ } else {
+ current_region_ = r;
}
- for (size_t i = 0; i < num_regions_; ++i) {
- Region* r = ®ions_[i];
- if (r->IsFree()) {
- r->Unfree(this, time_);
- r->SetNewlyAllocated();
- ++num_non_free_regions_;
- obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
- CHECK(obj != nullptr);
- current_region_ = r;
- return obj;
- }
- }
- } else {
- for (size_t i = 0; i < num_regions_; ++i) {
- Region* r = ®ions_[i];
- if (r->IsFree()) {
- r->Unfree(this, time_);
- ++num_non_free_regions_;
- obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
- CHECK(obj != nullptr);
- evac_region_ = r;
- return obj;
- }
- }
+ obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
+ CHECK(obj != nullptr);
+ return obj;
}
} else {
// Large object.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 8d8c488..dba252d 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -449,21 +449,14 @@
MutexLock mu(self, region_lock_);
RevokeThreadLocalBuffersLocked(self);
// Retain sufficient free regions for full evacuation.
- if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
- return false;
- }
- for (size_t i = 0; i < num_regions_; ++i) {
- Region* r = ®ions_[i];
- if (r->IsFree()) {
- r->Unfree(this, time_);
- ++num_non_free_regions_;
- r->SetNewlyAllocated();
- r->SetTop(r->End());
- r->is_a_tlab_ = true;
- r->thread_ = self;
- self->SetTlab(r->Begin(), r->Begin() + min_bytes, r->End());
- return true;
- }
+
+ Region* r = AllocateRegion(/*for_evac*/ false);
+ if (r != nullptr) {
+ r->is_a_tlab_ = true;
+ r->thread_ = self;
+ r->SetTop(r->End());
+ self->SetTlab(r->Begin(), r->Begin() + min_bytes, r->End());
+ return true;
}
return false;
}
@@ -543,6 +536,62 @@
return num_bytes;
}
+void RegionSpace::Region::Clear(bool zero_and_release_pages) {
+ top_.StoreRelaxed(begin_);
+ state_ = RegionState::kRegionStateFree;
+ type_ = RegionType::kRegionTypeNone;
+ objects_allocated_.StoreRelaxed(0);
+ alloc_time_ = 0;
+ live_bytes_ = static_cast<size_t>(-1);
+ if (zero_and_release_pages) {
+ ZeroAndReleasePages(begin_, end_ - begin_);
+ }
+ is_newly_allocated_ = false;
+ is_a_tlab_ = false;
+ thread_ = nullptr;
+}
+
+RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
+ if (!for_evac && (num_non_free_regions_ + 1) * 2 > num_regions_) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsFree()) {
+ r->Unfree(this, time_);
+ ++num_non_free_regions_;
+ if (!for_evac) {
+ // Evac doesn't count as newly allocated.
+ r->SetNewlyAllocated();
+ }
+ return r;
+ }
+ }
+ return nullptr;
+}
+
+void RegionSpace::Region::MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time) {
+ DCHECK(IsFree());
+ alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
+ type_ = RegionType::kRegionTypeToSpace;
+}
+
+void RegionSpace::Region::Unfree(RegionSpace* region_space, uint32_t alloc_time) {
+ MarkAsAllocated(region_space, alloc_time);
+ state_ = RegionState::kRegionStateAllocated;
+}
+
+void RegionSpace::Region::UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time) {
+ MarkAsAllocated(region_space, alloc_time);
+ state_ = RegionState::kRegionStateLarge;
+}
+
+void RegionSpace::Region::UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time) {
+ MarkAsAllocated(region_space, alloc_time);
+ state_ = RegionState::kRegionStateLargeTail;
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 323ccdb..8907b07 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -284,20 +284,7 @@
return type_;
}
- void Clear(bool zero_and_release_pages) {
- top_.StoreRelaxed(begin_);
- state_ = RegionState::kRegionStateFree;
- type_ = RegionType::kRegionTypeNone;
- objects_allocated_.StoreRelaxed(0);
- alloc_time_ = 0;
- live_bytes_ = static_cast<size_t>(-1);
- if (zero_and_release_pages) {
- ZeroAndReleasePages(begin_, end_ - begin_);
- }
- is_newly_allocated_ = false;
- is_a_tlab_ = false;
- thread_ = nullptr;
- }
+ void Clear(bool zero_and_release_pages);
ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
@@ -315,31 +302,16 @@
// Given a free region, declare it non-free (allocated).
void Unfree(RegionSpace* region_space, uint32_t alloc_time)
- REQUIRES(region_space->region_lock_) {
- DCHECK(IsFree());
- state_ = RegionState::kRegionStateAllocated;
- type_ = RegionType::kRegionTypeToSpace;
- alloc_time_ = alloc_time;
- region_space->AdjustNonFreeRegionLimit(idx_);
- }
+ REQUIRES(region_space->region_lock_);
void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time)
- REQUIRES(region_space->region_lock_) {
- DCHECK(IsFree());
- state_ = RegionState::kRegionStateLarge;
- type_ = RegionType::kRegionTypeToSpace;
- alloc_time_ = alloc_time;
- region_space->AdjustNonFreeRegionLimit(idx_);
- }
+ REQUIRES(region_space->region_lock_);
void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time)
- REQUIRES(region_space->region_lock_) {
- DCHECK(IsFree());
- state_ = RegionState::kRegionStateLargeTail;
- type_ = RegionType::kRegionTypeToSpace;
- alloc_time_ = alloc_time;
- region_space->AdjustNonFreeRegionLimit(idx_);
- }
+ REQUIRES(region_space->region_lock_);
+
+ void MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_);
void SetNewlyAllocated() {
is_newly_allocated_ = true;
@@ -539,6 +511,8 @@
}
}
+ Region* AllocateRegion(bool for_evac) REQUIRES(region_lock_);
+
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
uint32_t time_; // The time as the number of collections since the startup.