Revert "Refeactor region clearing / allocation code"
This reverts commit 2347b393892cebc0e43e7f60a7a4a5b495147e90.
Change-Id: If49e2676689ad2dd42009f693a1fab79b9dc2cff
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 51c57ab..fc24fc2 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -48,25 +48,59 @@
mirror::Object* obj;
if (LIKELY(num_bytes <= kRegionSize)) {
// Non-large object.
- obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
- bytes_allocated,
- usable_size,
- bytes_tl_bulk_allocated);
+ if (!kForEvac) {
+ obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
+ } else {
+ DCHECK(evac_region_ != nullptr);
+ obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
+ }
if (LIKELY(obj != nullptr)) {
return obj;
}
MutexLock mu(Thread::Current(), region_lock_);
// Retry with current region since another thread may have updated it.
- obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
- bytes_allocated,
- usable_size,
- bytes_tl_bulk_allocated);
- Region* r = AllocateRegion(kForEvac);
- if (LIKELY(r != nullptr)) {
- obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
- CHECK(obj != nullptr);
+ if (!kForEvac) {
+ obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
+ } else {
+ obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
+ }
+ if (LIKELY(obj != nullptr)) {
return obj;
}
+ if (!kForEvac) {
+ // Retain sufficient free regions for full evacuation.
+ if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsFree()) {
+ r->Unfree(this, time_);
+ r->SetNewlyAllocated();
+ ++num_non_free_regions_;
+ obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
+ CHECK(obj != nullptr);
+ current_region_ = r;
+ return obj;
+ }
+ }
+ } else {
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsFree()) {
+ r->Unfree(this, time_);
+ ++num_non_free_regions_;
+ obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
+ CHECK(obj != nullptr);
+ evac_region_ = r;
+ return obj;
+ }
+ }
+ }
} else {
// Large object.
obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size,
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 32ba6a5..8d8c488 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -449,14 +449,21 @@
MutexLock mu(self, region_lock_);
RevokeThreadLocalBuffersLocked(self);
// Retain sufficient free regions for full evacuation.
-
- Region* r = AllocateRegion(/*for_evac*/ false);
- if (r != nullptr) {
- r->is_a_tlab_ = true;
- r->thread_ = self;
- r->SetTop(r->End());
- self->SetTlab(r->Begin(), r->Begin() + min_bytes, r->End());
- return true;
+ if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
+ return false;
+ }
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsFree()) {
+ r->Unfree(this, time_);
+ ++num_non_free_regions_;
+ r->SetNewlyAllocated();
+ r->SetTop(r->End());
+ r->is_a_tlab_ = true;
+ r->thread_ = self;
+ self->SetTlab(r->Begin(), r->Begin() + min_bytes, r->End());
+ return true;
+ }
}
return false;
}
@@ -536,65 +543,6 @@
return num_bytes;
}
-void RegionSpace::Region::Clear(bool zero_and_release_pages) {
- top_.StoreRelaxed(begin_);
- state_ = RegionState::kRegionStateFree;
- type_ = RegionType::kRegionTypeNone;
- objects_allocated_.StoreRelaxed(0);
- alloc_time_ = 0;
- live_bytes_ = static_cast<size_t>(-1);
- if (zero_and_release_pages) {
- ZeroAndReleasePages(begin_, end_ - begin_);
- }
- is_newly_allocated_ = false;
- is_a_tlab_ = false;
- thread_ = nullptr;
-}
-
-RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
- if (!for_evac && (num_non_free_regions_ + 1) * 2 > num_regions_) {
- return nullptr;
- }
- for (size_t i = 0; i < num_regions_; ++i) {
- Region* r = ®ions_[i];
- if (r->IsFree()) {
- r->Unfree(this, time_);
- ++num_non_free_regions_;
- if (for_evac) {
- // Evac doesn't count as newly allocated.
- evac_region_ = r;
- } else {
- current_region_ = r;
- r->SetNewlyAllocated();
- }
- return r;
- }
- }
- return nullptr;
-}
-
-void RegionSpace::Region::MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time) {
- DCHECK(IsFree());
- alloc_time_ = alloc_time;
- region_space->AdjustNonFreeRegionLimit(idx_);
- type_ = RegionType::kRegionTypeToSpace;
-}
-
-void RegionSpace::Region::Unfree(RegionSpace* region_space, uint32_t alloc_time) {
- MarkAsAllocated(region_space, alloc_time);
- state_ = RegionState::kRegionStateAllocated;
-}
-
-void RegionSpace::Region::UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time) {
- MarkAsAllocated(region_space, alloc_time);
- state_ = RegionState::kRegionStateLarge;
-}
-
-void RegionSpace::Region::UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time) {
- MarkAsAllocated(region_space, alloc_time);
- state_ = RegionState::kRegionStateLargeTail;
-}
-
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 8907b07..323ccdb 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -284,7 +284,20 @@
return type_;
}
- void Clear(bool zero_and_release_pages);
+ void Clear(bool zero_and_release_pages) {
+ top_.StoreRelaxed(begin_);
+ state_ = RegionState::kRegionStateFree;
+ type_ = RegionType::kRegionTypeNone;
+ objects_allocated_.StoreRelaxed(0);
+ alloc_time_ = 0;
+ live_bytes_ = static_cast<size_t>(-1);
+ if (zero_and_release_pages) {
+ ZeroAndReleasePages(begin_, end_ - begin_);
+ }
+ is_newly_allocated_ = false;
+ is_a_tlab_ = false;
+ thread_ = nullptr;
+ }
ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size,
@@ -302,16 +315,31 @@
// Given a free region, declare it non-free (allocated).
void Unfree(RegionSpace* region_space, uint32_t alloc_time)
- REQUIRES(region_space->region_lock_);
+ REQUIRES(region_space->region_lock_) {
+ DCHECK(IsFree());
+ state_ = RegionState::kRegionStateAllocated;
+ type_ = RegionType::kRegionTypeToSpace;
+ alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
+ }
void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time)
- REQUIRES(region_space->region_lock_);
+ REQUIRES(region_space->region_lock_) {
+ DCHECK(IsFree());
+ state_ = RegionState::kRegionStateLarge;
+ type_ = RegionType::kRegionTypeToSpace;
+ alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
+ }
void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time)
- REQUIRES(region_space->region_lock_);
-
- void MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time)
- REQUIRES(region_space->region_lock_);
+ REQUIRES(region_space->region_lock_) {
+ DCHECK(IsFree());
+ state_ = RegionState::kRegionStateLargeTail;
+ type_ = RegionType::kRegionTypeToSpace;
+ alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
+ }
void SetNewlyAllocated() {
is_newly_allocated_ = true;
@@ -511,8 +539,6 @@
}
}
- Region* AllocateRegion(bool for_evac) REQUIRES(region_lock_);
-
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
uint32_t time_; // The time as the number of collections since the startup.