summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtime/gc/space/region_space-inl.h2
-rw-r--r--runtime/gc/space/region_space.cc44
-rw-r--r--runtime/gc/space/region_space.h7
3 files changed, 37 insertions, 16 deletions
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 436eb2c09b..e30b63ace8 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -407,7 +407,7 @@ inline void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_alloc
--num_non_free_regions_;
}
}
- if (end_addr < Limit()) {
+ if (kIsDebugBuild && end_addr < Limit()) {
// If we aren't at the end of the space, check that the next region is not a large tail.
Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
DCHECK(!following_reg->IsLargeTail());
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 1ed81d053c..b2a0a97d3b 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -209,6 +209,41 @@ inline bool RegionSpace::Region::ShouldBeEvacuated(EvacMode evac_mode) {
return result;
}
+void RegionSpace::ZeroLiveBytesForLargeObject(mirror::Object* obj) {
+ // This method is only used when Generational CC collection is enabled.
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection);
+
+ // This code uses a logic similar to the one used in RegionSpace::FreeLarge
+ // to traverse the regions supporting `obj`.
+ // TODO: Refactor.
+ DCHECK(IsLargeObject(obj));
+ DCHECK_ALIGNED(obj, kRegionSize);
+ size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
+ DCHECK_GT(obj_size, space::RegionSpace::kRegionSize);
+ // Size of the memory area allocated for `obj`.
+ size_t obj_alloc_size = RoundUp(obj_size, space::RegionSpace::kRegionSize);
+ uint8_t* begin_addr = reinterpret_cast<uint8_t*>(obj);
+ uint8_t* end_addr = begin_addr + obj_alloc_size;
+ DCHECK_ALIGNED(end_addr, kRegionSize);
+
+ // Zero the live bytes of the large region and large tail regions containing the object.
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
+ Region* region = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
+ if (addr == begin_addr) {
+ DCHECK(region->IsLarge());
+ } else {
+ DCHECK(region->IsLargeTail());
+ }
+ region->ZeroLiveBytes();
+ }
+ if (kIsDebugBuild && end_addr < Limit()) {
+ // If we aren't at the end of the space, check that the next region is not a large tail.
+ Region* following_region = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
+ DCHECK(!following_region->IsLargeTail());
+ }
+}
+
// Determine which regions to evacuate and mark them as
// from-space. Mark the rest as unevacuated from-space.
void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table,
@@ -371,16 +406,7 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
while (i + regions_to_clear_bitmap < num_regions_) {
Region* const cur = &regions_[i + regions_to_clear_bitmap];
if (!cur->AllAllocatedBytesAreLive()) {
-#if 0
- // FIXME: These tests fail the following assertion with Sticky-Bit (Generational) CC:
- //
- // 004-ThreadStress
- // 061-out-of-memory
- // 080-oom-throw
- // 134-reg-promotion
- // 617-clinit-oome
DCHECK(!cur->IsLargeTail());
-#endif
break;
}
CHECK(cur->IsInUnevacFromSpace());
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index d86304af48..3f9644dcf7 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -271,12 +271,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
// Zero live bytes for a large object, used by young gen CC for marking newly allocated large
// objects.
- void ZeroLiveBytesForLargeObject(mirror::Object* ref) {
- // This method is only used when Generational CC collection is enabled.
- DCHECK(kEnableGenerationalConcurrentCopyingCollection);
- DCHECK(IsLargeObject(ref));
- RefToRegionUnlocked(ref)->ZeroLiveBytes();
- }
+ void ZeroLiveBytesForLargeObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Determine which regions to evacuate and tag them as
// from-space. Tag the rest as unevacuated from-space.