Revert "Delete GSS"
This reverts commit 1397ea10a8e0409c74efe251721a2ee9ffaa3127.
Reason for revert: Tests failing
Change-Id: Ie25f7342f67685e2b4f80f8344a8a4d7e988f09f
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index a757c91..dd9221d 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -412,6 +412,8 @@
return gc::kCollectorTypeCMS;
} else if (option == "SS") {
return gc::kCollectorTypeSS;
+ } else if (option == "GSS") {
+ return gc::kCollectorTypeGSS;
} else if (option == "CC") {
return gc::kCollectorTypeCC;
} else {
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index ec41b54..33f8c7f 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -174,6 +174,8 @@
MemMap::Init();
// Ensure a chunk of memory is reserved for the image space.
+ // The reservation_end includes room for the main space that has to come
+ // right after the image in case of the GSS collector.
uint64_t reservation_start = ART_BASE_ADDRESS;
uint64_t reservation_end = ART_BASE_ADDRESS + 384 * MB;
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 065a125..7db5d2c 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -58,7 +58,7 @@
MarkStackPush(forward_address);
}
obj_ptr->Assign(forward_address);
- } else if (!immune_spaces_.IsInImmuneRegion(obj)) {
+ } else if (!collect_from_space_only_ && !immune_spaces_.IsInImmuneRegion(obj)) {
DCHECK(!to_space_->HasAddress(obj)) << "Tried to mark " << obj << " in to-space";
auto slow_path = [this](const mirror::Object* ref) {
CHECK(!to_space_->HasAddress(ref)) << "Marking " << ref << " in to_space_";
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index d0030b4..15e0711 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -58,6 +58,8 @@
static constexpr bool kProtectFromSpace = true;
static constexpr bool kStoreStackTraces = false;
+static constexpr size_t kBytesPromotedThreshold = 4 * MB;
+static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB;
void SemiSpace::BindBitmaps() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
@@ -69,23 +71,41 @@
immune_spaces_.AddSpace(space);
} else if (space->GetLiveBitmap() != nullptr) {
// TODO: We can probably also add this space to the immune region.
- if (space == to_space_) {
+ if (space == to_space_ || collect_from_space_only_) {
+ if (collect_from_space_only_) {
+ // Bind the bitmaps of the main free list space and the non-moving space we are doing a
+ // bump pointer space only collection.
+ CHECK(space == GetHeap()->GetPrimaryFreeListSpace() ||
+ space == GetHeap()->GetNonMovingSpace());
+ }
CHECK(space->IsContinuousMemMapAllocSpace());
space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
}
}
}
+ if (collect_from_space_only_) {
+ // We won't collect the large object space if a bump pointer space only collection.
+ is_large_object_space_immune_ = true;
+ }
}
-SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
+SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
: GarbageCollector(heap,
name_prefix + (name_prefix.empty() ? "" : " ") + "semispace"),
mark_stack_(nullptr),
+ is_large_object_space_immune_(false),
to_space_(nullptr),
to_space_live_bitmap_(nullptr),
from_space_(nullptr),
mark_bitmap_(nullptr),
self_(nullptr),
+ generational_(generational),
+ last_gc_to_space_end_(nullptr),
+ bytes_promoted_(0),
+ bytes_promoted_since_last_whole_heap_collection_(0),
+ large_object_bytes_allocated_at_last_whole_heap_collection_(0),
+ collect_from_space_only_(generational),
+ promo_dest_space_(nullptr),
fallback_space_(nullptr),
bytes_moved_(0U),
objects_moved_(0U),
@@ -128,6 +148,7 @@
mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_spaces_.Reset();
+ is_large_object_space_immune_ = false;
saved_bytes_ = 0;
bytes_moved_ = 0;
objects_moved_ = 0;
@@ -140,6 +161,9 @@
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
mark_bitmap_ = heap_->GetMarkBitmap();
}
+ if (generational_) {
+ promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace();
+ }
fallback_space_ = GetHeap()->GetNonMovingSpace();
}
@@ -167,14 +191,44 @@
// Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps
// to prevent fragmentation.
RevokeAllThreadLocalBuffers();
+ if (generational_) {
+ if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
+ GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
+ GetCurrentIteration()->GetClearSoftReferences()) {
+ // If an explicit, native allocation-triggered, or last attempt
+ // collection, collect the whole heap.
+ collect_from_space_only_ = false;
+ }
+ if (!collect_from_space_only_) {
+ VLOG(heap) << "Whole heap collection";
+ name_ = collector_name_ + " whole";
+ } else {
+ VLOG(heap) << "Bump pointer space only collection";
+ name_ = collector_name_ + " bps";
+ }
+ }
- // Always clear soft references.
- GetCurrentIteration()->SetClearSoftReferences(true);
+ if (!collect_from_space_only_) {
+ // If non-generational, always clear soft references.
+ // If generational, clear soft references if a whole heap collection.
+ GetCurrentIteration()->SetClearSoftReferences(true);
+ }
Locks::mutator_lock_->AssertExclusiveHeld(self_);
+ if (generational_) {
+ // If last_gc_to_space_end_ is out of the bounds of the from-space
+ // (the to-space from last GC), then point it to the beginning of
+ // the from-space. For example, the very first GC or the
+ // pre-zygote compaction.
+ if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
+ last_gc_to_space_end_ = from_space_->Begin();
+ }
+ // Reset this before the marking starts below.
+ bytes_promoted_ = 0;
+ }
// Assume the cleared space is already empty.
BindBitmaps();
// Process dirty cards and add dirty cards to mod-union tables.
- heap_->ProcessCards(GetTimings(), /*use_rem_sets=*/false, false, true);
+ heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_, false, true);
// Clear the whole card table since we cannot get any additional dirty cards during the
// paused GC. This saves memory but only works for pause the world collectors.
t.NewTiming("ClearCardTable");
@@ -202,7 +256,7 @@
// Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
// before they are properly counted.
RevokeAllThreadLocalBuffers();
- GetHeap()->RecordFreeRevoke(); // This is for the non-moving rosalloc space.
+ GetHeap()->RecordFreeRevoke(); // this is for the non-moving rosalloc space used by GSS.
// Record freed memory.
const int64_t from_bytes = from_space_->GetBytesAllocated();
const int64_t to_bytes = bytes_moved_;
@@ -295,7 +349,8 @@
GetTimings());
table->UpdateAndMarkReferences(this);
DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
- } else if (space->IsImageSpace() && space->GetLiveBitmap() != nullptr) {
+ } else if ((space->IsImageSpace() || collect_from_space_only_) &&
+ space->GetLiveBitmap() != nullptr) {
// If the space has no mod union table (the non-moving space, app image spaces, main spaces
// when the bump pointer space only collection is enabled,) then we need to scan its live
// bitmap or dirty cards as roots (including the objects on the live stack which have just
@@ -303,8 +358,11 @@
accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
if (!space->IsImageSpace()) {
DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
- << "Space " << space->GetName();
+ << "Space " << space->GetName() << " "
+ << "generational_=" << generational_ << " "
+ << "collect_from_space_only_=" << collect_from_space_only_;
// App images currently do not have remembered sets.
+ DCHECK_EQ(kUseRememberedSet, rem_set != nullptr);
} else {
DCHECK(rem_set == nullptr);
}
@@ -337,6 +395,30 @@
}
}
}
+
+ CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
+ space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
+ if (is_large_object_space_immune_ && los != nullptr) {
+ TimingLogger::ScopedTiming t2("VisitLargeObjects", GetTimings());
+ DCHECK(collect_from_space_only_);
+ // Delay copying the live set to the marked set until here from
+ // BindBitmaps() as the large objects on the allocation stack may
+ // be newly added to the live set above in MarkAllocStackAsLive().
+ los->CopyLiveToMarked();
+
+ // When the large object space is immune, we need to scan the
+ // large object space as roots as they contain references to their
+ // classes (primitive array classes) that could move though they
+ // don't contain any other references.
+ accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
+ std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
+ large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
+ reinterpret_cast<uintptr_t>(range.second),
+ [this](mirror::Object* obj)
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ ScanObject(obj);
+ });
+ }
// Recursively process the mark stack.
ProcessMarkStack();
}
@@ -355,6 +437,12 @@
if (saved_bytes_ > 0) {
VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
}
+ if (generational_) {
+ // Record the end (top) of the to space so we can distinguish
+ // between objects that were allocated since the last GC and the
+ // older objects.
+ last_gc_to_space_end_ = to_space_->End();
+ }
}
void SemiSpace::ResizeMarkStack(size_t new_size) {
@@ -427,15 +515,66 @@
mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
const size_t object_size = obj->SizeOf();
size_t bytes_allocated, dummy;
- // Copy it to the to-space.
- mirror::Object* forward_address = to_space_->AllocThreadUnsafe(self_,
- object_size,
- &bytes_allocated,
- nullptr,
- &dummy);
+ mirror::Object* forward_address = nullptr;
+ if (generational_ && reinterpret_cast<uint8_t*>(obj) < last_gc_to_space_end_) {
+ // If it's allocated before the last GC (older), move
+ // (pseudo-promote) it to the main free list space (as sort
+ // of an old generation.)
+ forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
+ nullptr, &dummy);
+ if (UNLIKELY(forward_address == nullptr)) {
+ // If out of space, fall back to the to-space.
+ forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
+ &dummy);
+ // No logic for marking the bitmap, so it must be null.
+ DCHECK(to_space_live_bitmap_ == nullptr);
+ } else {
+ bytes_promoted_ += bytes_allocated;
+ // Dirty the card at the destionation as it may contain
+ // references (including the class pointer) to the bump pointer
+ // space.
+ WriteBarrier::ForEveryFieldWrite(forward_address);
+ // Handle the bitmaps marking.
+ accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
+ DCHECK(live_bitmap != nullptr);
+ accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
+ DCHECK(mark_bitmap != nullptr);
+ DCHECK(!live_bitmap->Test(forward_address));
+ if (collect_from_space_only_) {
+ // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
+ DCHECK_EQ(live_bitmap, mark_bitmap);
- if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
- to_space_live_bitmap_->Set(forward_address);
+ // If a bump pointer space only collection, delay the live
+ // bitmap marking of the promoted object until it's popped off
+ // the mark stack (ProcessMarkStack()). The rationale: we may
+ // be in the middle of scanning the objects in the promo
+ // destination space for
+ // non-moving-space-to-bump-pointer-space references by
+ // iterating over the marked bits of the live bitmap
+ // (MarkReachableObjects()). If we don't delay it (and instead
+ // mark the promoted object here), the above promo destination
+ // space scan could encounter the just-promoted object and
+ // forward the references in the promoted object's fields even
+ // through it is pushed onto the mark stack. If this happens,
+ // the promoted object would be in an inconsistent state, that
+ // is, it's on the mark stack (gray) but its fields are
+ // already forwarded (black), which would cause a
+ // DCHECK(!to_space_->HasAddress(obj)) failure below.
+ } else {
+ // Mark forward_address on the live bit map.
+ live_bitmap->Set(forward_address);
+ // Mark forward_address on the mark bit map.
+ DCHECK(!mark_bitmap->Test(forward_address));
+ mark_bitmap->Set(forward_address);
+ }
+ }
+ } else {
+ // If it's allocated after the last GC (younger), copy it to the to-space.
+ forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
+ &dummy);
+ if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
+ to_space_live_bitmap_->Set(forward_address);
+ }
}
// If it's still null, attempt to use the fallback space.
if (UNLIKELY(forward_address == nullptr)) {
@@ -457,7 +596,9 @@
obj->AssertReadBarrierState();
forward_address->AssertReadBarrierState();
}
- DCHECK(to_space_->HasAddress(forward_address) || fallback_space_->HasAddress(forward_address))
+ DCHECK(to_space_->HasAddress(forward_address) ||
+ fallback_space_->HasAddress(forward_address) ||
+ (generational_ && promo_dest_space_->HasAddress(forward_address)))
<< forward_address << "\n" << GetHeap()->DumpSpaces();
return forward_address;
}
@@ -523,9 +664,13 @@
RecordFree(alloc_space->Sweep(swap_bitmaps));
}
}
+ if (!is_large_object_space_immune_) {
+ SweepLargeObjects(swap_bitmaps);
+ }
}
void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
+ DCHECK(!is_large_object_space_immune_);
space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
if (los != nullptr) {
TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
@@ -590,8 +735,26 @@
// Scan anything that's on the mark stack.
void SemiSpace::ProcessMarkStack() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
+ const bool collect_from_space_only = collect_from_space_only_;
+ if (collect_from_space_only) {
+ // If a bump pointer space only collection (and the promotion is
+ // enabled,) we delay the live-bitmap marking of promoted objects
+ // from MarkObject() until this function.
+ live_bitmap = promo_dest_space_->GetLiveBitmap();
+ DCHECK(live_bitmap != nullptr);
+ accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
+ DCHECK(mark_bitmap != nullptr);
+ DCHECK_EQ(live_bitmap, mark_bitmap);
+ }
while (!mark_stack_->IsEmpty()) {
Object* obj = mark_stack_->PopBack();
+ if (collect_from_space_only && promo_dest_space_->HasAddress(obj)) {
+ // obj has just been promoted. Mark the live bitmap for it,
+ // which is delayed from MarkObject().
+ DCHECK(!live_bitmap->Test(obj));
+ live_bitmap->Set(obj);
+ }
ScanObject(obj);
}
}
@@ -601,7 +764,9 @@
if (from_space_->HasAddress(obj)) {
// Returns either the forwarding address or null.
return GetForwardingAddressInFromSpace(obj);
- } else if (immune_spaces_.IsInImmuneRegion(obj) || to_space_->HasAddress(obj)) {
+ } else if (collect_from_space_only_ ||
+ immune_spaces_.IsInImmuneRegion(obj) ||
+ to_space_->HasAddress(obj)) {
return obj; // Already forwarded, must be marked.
}
return mark_bitmap_->Test(obj) ? obj : nullptr;
@@ -652,6 +817,35 @@
from_space_ = nullptr;
CHECK(mark_stack_->IsEmpty());
mark_stack_->Reset();
+ space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
+ if (generational_) {
+ // Decide whether to do a whole heap collection or a bump pointer
+ // only space collection at the next collection by updating
+ // collect_from_space_only_.
+ if (collect_from_space_only_) {
+ // Disable collect_from_space_only_ if the bytes promoted since the
+ // last whole heap collection or the large object bytes
+ // allocated exceeds a threshold.
+ bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
+ bool bytes_promoted_threshold_exceeded =
+ bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
+ uint64_t current_los_bytes_allocated = los != nullptr ? los->GetBytesAllocated() : 0U;
+ uint64_t last_los_bytes_allocated =
+ large_object_bytes_allocated_at_last_whole_heap_collection_;
+ bool large_object_bytes_threshold_exceeded =
+ current_los_bytes_allocated >=
+ last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
+ if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
+ collect_from_space_only_ = false;
+ }
+ } else {
+ // Reset the counters.
+ bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
+ large_object_bytes_allocated_at_last_whole_heap_collection_ =
+ los != nullptr ? los->GetBytesAllocated() : 0U;
+ collect_from_space_only_ = true;
+ }
+ }
// Clear all of the spaces' mark bitmaps.
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
heap_->ClearMarkedObjects();
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 9f2939f..f23d416 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -59,7 +59,7 @@
// If true, use remembered sets in the generational mode.
static constexpr bool kUseRememberedSet = true;
- explicit SemiSpace(Heap* heap, const std::string& name_prefix = "");
+ explicit SemiSpace(Heap* heap, bool generational = false, const std::string& name_prefix = "");
~SemiSpace() {}
@@ -76,7 +76,7 @@
return kGcTypePartial;
}
CollectorType GetCollectorType() const override {
- return kCollectorTypeSS;
+ return generational_ ? kCollectorTypeGSS : kCollectorTypeSS;
}
// Sets which space we will be copying objects to.
@@ -208,6 +208,9 @@
// Every object inside the immune spaces is assumed to be marked.
ImmuneSpaces immune_spaces_;
+ // If true, the large object space is immune.
+ bool is_large_object_space_immune_;
+
// Destination and source spaces (can be any type of ContinuousMemMapAllocSpace which either has
// a live bitmap or doesn't).
space::ContinuousMemMapAllocSpace* to_space_;
@@ -219,6 +222,35 @@
Thread* self_;
+ // When true, the generational mode (promotion and the bump pointer
+ // space only collection) is enabled. TODO: move these to a new file
+ // as a new garbage collector?
+ const bool generational_;
+
+ // Used for the generational mode. the end/top of the bump
+ // pointer space at the end of the last collection.
+ uint8_t* last_gc_to_space_end_;
+
+ // Used for the generational mode. During a collection, keeps track
+ // of how many bytes of objects have been copied so far from the
+ // bump pointer space to the non-moving space.
+ uint64_t bytes_promoted_;
+
+ // Used for the generational mode. Keeps track of how many bytes of
+ // objects have been copied so far from the bump pointer space to
+ // the non-moving space, since the last whole heap collection.
+ uint64_t bytes_promoted_since_last_whole_heap_collection_;
+
+ // Used for the generational mode. Keeps track of how many bytes of
+ // large objects were allocated at the last whole heap collection.
+ uint64_t large_object_bytes_allocated_at_last_whole_heap_collection_;
+
+ // Used for generational mode. When true, we only collect the from_space_.
+ bool collect_from_space_only_;
+
+ // The space which we are promoting into, only used for GSS.
+ space::ContinuousMemMapAllocSpace* promo_dest_space_;
+
// The space which we copy to if the to_space_ is full.
space::ContinuousMemMapAllocSpace* fallback_space_;
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index 62527e2..4759fca 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -32,6 +32,8 @@
kCollectorTypeCMS,
// Semi-space / mark-sweep hybrid, enables compaction.
kCollectorTypeSS,
+ // A generational variant of kCollectorTypeSS.
+ kCollectorTypeGSS,
// Heap trimming collector, doesn't do any actual collecting.
kCollectorTypeHeapTrim,
// A (mostly) concurrent copying collector.
@@ -67,6 +69,8 @@
kCollectorTypeCMS
#elif ART_DEFAULT_GC_TYPE_IS_SS
kCollectorTypeSS
+#elif ART_DEFAULT_GC_TYPE_IS_GSS
+ kCollectorTypeGSS
#else
kCollectorTypeCMS
#error "ART default GC type must be set"
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 3b66fbc..1c09b5c 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -143,9 +143,15 @@
obj->AssertReadBarrierState();
}
if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
- // (Note this if statement will be constant folded away for the fast-path quick entry
- // points.) Because SetClass() has no write barrier, the GC may need a write barrier in the
- // case the object is non movable and points to a recently allocated movable class.
+ // (Note this if statement will be constant folded away for the
+ // fast-path quick entry points.) Because SetClass() has no write
+ // barrier, if a non-moving space allocation, we need a write
+ // barrier as the class pointer may point to the bump pointer
+ // space (where the class pointer is an "old-to-young" reference,
+ // though rare) under the GSS collector with the remembered set
+ // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
+ // cases because we don't directly allocate into the main alloc
+ // space (besides promotions) under the SS/GSS collector.
WriteBarrier::ForFieldWrite(obj, mirror::Object::ClassOffset(), klass);
}
pre_fence_visitor(obj, usable_size);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bbcb93c..987b239 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -123,6 +123,7 @@
static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
static const char* kNonMovingSpaceName = "non moving space";
static const char* kZygoteSpaceName = "zygote space";
+static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
static constexpr bool kGCALotMode = false;
// GC alot mode uses a small allocation stack to stress test a lot of GC.
static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
@@ -334,8 +335,9 @@
live_bitmap_.reset(new accounting::HeapBitmap(this));
mark_bitmap_.reset(new accounting::HeapBitmap(this));
- // We don't have hspace compaction enabled with CC.
- if (foreground_collector_type_ == kCollectorTypeCC) {
+ // We don't have hspace compaction enabled with GSS or CC.
+ if (foreground_collector_type_ == kCollectorTypeGSS ||
+ foreground_collector_type_ == kCollectorTypeCC) {
use_homogeneous_space_compaction_for_oom_ = false;
}
bool support_homogeneous_space_compaction =
@@ -348,6 +350,9 @@
bool separate_non_moving_space = is_zygote ||
support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
IsMovingGc(background_collector_type_);
+ if (foreground_collector_type_ == kCollectorTypeGSS) {
+ separate_non_moving_space = false;
+ }
// Requested begin for the alloc space, to follow the mapped image and oat files
uint8_t* request_begin = nullptr;
@@ -355,7 +360,8 @@
size_t heap_reservation_size = 0u;
if (separate_non_moving_space) {
heap_reservation_size = non_moving_space_capacity;
- } else if (foreground_collector_type_ != kCollectorTypeCC && is_zygote) {
+ } else if ((foreground_collector_type_ != kCollectorTypeCC) &&
+ (is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) {
heap_reservation_size = capacity_;
}
heap_reservation_size = RoundUp(heap_reservation_size, kPageSize);
@@ -440,13 +446,14 @@
// Attempt to create 2 mem maps at or after the requested begin.
if (foreground_collector_type_ != kCollectorTypeCC) {
ScopedTrace trace2("Create main mem map");
- if (separate_non_moving_space || !is_zygote) {
+ if (separate_non_moving_space ||
+ !(is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) {
main_mem_map_1 = MapAnonymousPreferredAddress(
kMemMapSpaceName[0], request_begin, capacity_, &error_str);
} else {
- // If no separate non-moving space and we are the zygote, the main space must come right after
- // the image space to avoid a gap. This is required since we want the zygote space to be
- // adjacent to the image space.
+ // If no separate non-moving space and we are the zygote or the collector type is GSS,
+ // the main space must come right after the image space to avoid a gap.
+ // This is required since we want the zygote space to be adjacent to the image space.
DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
main_mem_map_1 = MemMap::MapAnonymous(
kMemMapSpaceName[0],
@@ -499,7 +506,8 @@
region_space_ = space::RegionSpace::Create(
kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_);
AddSpace(region_space_);
- } else if (IsMovingGc(foreground_collector_type_)) {
+ } else if (IsMovingGc(foreground_collector_type_) &&
+ foreground_collector_type_ != kCollectorTypeGSS) {
// Create bump pointer spaces.
// We only to create the bump pointer if the foreground collector is a compacting GC.
// TODO: Place bump-pointer spaces somewhere to minimize size of card table.
@@ -520,7 +528,19 @@
non_moving_space_ = main_space_;
CHECK(!non_moving_space_->CanMoveObjects());
}
- if (main_mem_map_2.IsValid()) {
+ if (foreground_collector_type_ == kCollectorTypeGSS) {
+ CHECK_EQ(foreground_collector_type_, background_collector_type_);
+ // Create bump pointer spaces instead of a backup space.
+ main_mem_map_2.Reset();
+ bump_pointer_space_ = space::BumpPointerSpace::Create(
+ "Bump pointer space 1", kGSSBumpPointerSpaceCapacity);
+ CHECK(bump_pointer_space_ != nullptr);
+ AddSpace(bump_pointer_space_);
+ temp_space_ = space::BumpPointerSpace::Create(
+ "Bump pointer space 2", kGSSBumpPointerSpaceCapacity);
+ CHECK(temp_space_ != nullptr);
+ AddSpace(temp_space_);
+ } else if (main_mem_map_2.IsValid()) {
const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2),
initial_size,
@@ -630,10 +650,13 @@
}
}
if (kMovingCollector) {
- if (MayUseCollector(kCollectorTypeSS) ||
+ if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
use_homogeneous_space_compaction_for_oom_) {
- semi_space_collector_ = new collector::SemiSpace(this);
+ // TODO: Clean this up.
+ const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
+ semi_space_collector_ = new collector::SemiSpace(this, generational,
+ generational ? "generational" : "");
garbage_collectors_.push_back(semi_space_collector_);
}
if (MayUseCollector(kCollectorTypeCC)) {
@@ -666,10 +689,10 @@
}
}
if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
- (is_zygote || separate_non_moving_space)) {
+ (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
// Check that there's no gap between the image space and the non moving space so that the
// immune region won't break (eg. due to a large object allocated in the gap). This is only
- // required when we're the zygote.
+ // required when we're the zygote or using GSS.
// Space with smallest Begin().
space::ImageSpace* first_space = nullptr;
for (space::ImageSpace* space : boot_image_spaces_) {
@@ -772,7 +795,8 @@
if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
// After the zygote we want this to be false if we don't have background compaction enabled so
// that getting primitive array elements is faster.
- can_move_objects = !HasZygoteSpace();
+ // We never have homogeneous compaction with GSS and don't need a space with movable objects.
+ can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
}
if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
RemoveRememberedSet(main_space_);
@@ -2256,7 +2280,8 @@
}
break;
}
- case kCollectorTypeSS: {
+ case kCollectorTypeSS: // Fall-through.
+ case kCollectorTypeGSS: {
gc_plan_.push_back(collector::kGcTypeFull);
if (use_tlab_) {
ChangeAllocator(kAllocatorTypeTLAB);
@@ -2298,7 +2323,7 @@
class ZygoteCompactingCollector final : public collector::SemiSpace {
public:
ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
- : SemiSpace(heap, "zygote collector"),
+ : SemiSpace(heap, false, "zygote collector"),
bin_live_bitmap_(nullptr),
bin_mark_bitmap_(nullptr),
is_running_on_memory_tool_(is_running_on_memory_tool) {}
@@ -2713,6 +2738,8 @@
current_allocator_ == kAllocatorTypeRegionTLAB);
switch (collector_type_) {
case kCollectorTypeSS:
+ // Fall-through.
+ case kCollectorTypeGSS:
semi_space_collector_->SetFromSpace(bump_pointer_space_);
semi_space_collector_->SetToSpace(temp_space_);
semi_space_collector_->SetSwapSemiSpaces(true);
@@ -3338,7 +3365,8 @@
TimingLogger::ScopedTiming t2(name, timings);
table->ProcessCards();
} else if (use_rem_sets && rem_set != nullptr) {
- DCHECK(collector::SemiSpace::kUseRememberedSet) << static_cast<int>(collector_type_);
+ DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
+ << static_cast<int>(collector_type_);
TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
rem_set->ClearCards();
} else if (process_alloc_space_cards) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 07f6a19..5cf1978 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -956,6 +956,7 @@
return
collector_type == kCollectorTypeCC ||
collector_type == kCollectorTypeSS ||
+ collector_type == kCollectorTypeGSS ||
collector_type == kCollectorTypeCCBackground ||
collector_type == kCollectorTypeHomogeneousSpaceCompact;
}
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index ca11297..4fe8027 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -113,6 +113,7 @@
case CollectorType::kCollectorTypeCMS:
case CollectorType::kCollectorTypeCC:
case CollectorType::kCollectorTypeSS:
+ case CollectorType::kCollectorTypeGSS:
return true;
default:
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 413355c..8172e1d 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -569,6 +569,7 @@
{
// If not set, background collector type defaults to homogeneous compaction.
+ // If foreground is GSS, use GSS as background collector.
// If not low memory mode, semispace otherwise.
gc::CollectorType background_collector_type_;
@@ -584,8 +585,12 @@
}
if (background_collector_type_ == gc::kCollectorTypeNone) {
- background_collector_type_ = low_memory_mode_ ?
- gc::kCollectorTypeSS : gc::kCollectorTypeHomogeneousSpaceCompact;
+ if (collector_type_ != gc::kCollectorTypeGSS) {
+ background_collector_type_ = low_memory_mode_ ?
+ gc::kCollectorTypeSS : gc::kCollectorTypeHomogeneousSpaceCompact;
+ } else {
+ background_collector_type_ = collector_type_;
+ }
}
args.Set(M::BackgroundGc, BackgroundGcOption { background_collector_type_ });
diff --git a/test/004-ThreadStress/run b/test/004-ThreadStress/run
index 8004036..067e0d0 100755
--- a/test/004-ThreadStress/run
+++ b/test/004-ThreadStress/run
@@ -15,7 +15,29 @@
# limitations under the License.
# Enable lock contention logging.
-${RUN} --runtime-option -Xlockprofthreshold:10 "${@}"
+if [[ "x$ART_DEFAULT_GC_TYPE" = xGSS ]]; then
+ # NonMovingAlloc operations fail an assertion with the Generational
+ # Semi-Space (GSS) collector (see b/72738921); disable them for now
+ # by explicitly assigning frequencies to operations when the GSS
+ # collector is used.
+ #
+ # Note: The trick to use command substitution to have comments within
+ # a multi-line command is from https://stackoverflow.com/a/12797512.
+ ${RUN} --runtime-option -Xlockprofthreshold:10 "${@}" Main \
+ -oom:0.005 `# 1/200` \
+ -sigquit:0.095 `# 19/200` \
+ -alloc:0.225 `# 45/200` \
+ -largealloc:0.05 `# 10/200` \
+ -nonmovingalloc:0.0 `# 0/200` \
+ -stacktrace:0.1 `# 20/200` \
+ -exit:0.225 `# 45/200` \
+ -sleep:0.125 `# 25/200` \
+ -timedwait:0.05 `# 10/200` \
+ -wait:0.075 `# 15/200` \
+ -queuedwait:0.05 `# 10/200`
+else
+ ${RUN} --runtime-option -Xlockprofthreshold:10 "${@}"
+fi
return_status1=$?
# Run locks-only mode with stack-dump lock profiling. Reduce the number of total operations from
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 29074dd..b76bd5c 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -19,6 +19,13 @@
},
{
"tests": "080-oom-fragmentation",
+ "description": ["Disable 080-oom-fragmentation for GSS GC due to lack of",
+ "support for allocations larger than 32MB."],
+ "env_vars": {"ART_DEFAULT_GC_TYPE": "GSS"},
+ "bug": "http://b/33795328"
+ },
+ {
+ "tests": "080-oom-fragmentation",
"description": ["Disable 080-oom-fragmentation for CC collector in debug mode",
"because of potential fragmentation caused by the region space's",
"cyclic region allocation (which is enabled in debug mode)."],
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index fa5dfed..6e299bd 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -124,6 +124,16 @@
'ART_USE_READ_BARRIER' : 'false'
}
},
+ # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
+ 'art-gss-gc' : {
+ 'run-test' : ['--interpreter',
+ '--optimizing',
+ '--jit'],
+ 'env' : {
+ 'ART_DEFAULT_GC_TYPE' : 'GSS',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
# TODO: Consider removing this configuration when it is no longer used by
# any continuous testing target (b/62611253), as the SS collector overlaps
# with the CC collector, since both move objects.
@@ -137,6 +147,17 @@
'ART_USE_READ_BARRIER' : 'false'
}
},
+ # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
+ 'art-gss-gc-tlab' : {
+ 'run-test' : ['--interpreter',
+ '--optimizing',
+ '--jit'],
+ 'env' : {
+ 'ART_DEFAULT_GC_TYPE' : 'GSS',
+ 'ART_USE_TLAB' : 'true',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
'art-tracing' : {
'run-test' : ['--trace']
},
@@ -208,6 +229,14 @@
'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'none'
}
},
+ # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
+ 'art-gtest-gss-gc': {
+ 'make' : 'test-art-host-gtest',
+ 'env' : {
+ 'ART_DEFAULT_GC_TYPE' : 'GSS',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
# TODO: Consider removing this configuration when it is no longer used by
# any continuous testing target (b/62611253), as the SS collector overlaps
# with the CC collector, since both move objects.
@@ -219,6 +248,15 @@
'ART_USE_READ_BARRIER' : 'false',
}
},
+ # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
+ 'art-gtest-gss-gc-tlab': {
+ 'make' : 'test-art-host-gtest',
+ 'env': {
+ 'ART_DEFAULT_GC_TYPE' : 'GSS',
+ 'ART_USE_TLAB' : 'true',
+ 'ART_USE_READ_BARRIER' : 'false'
+ }
+ },
'art-gtest-debug-gc' : {
'make' : 'test-art-host-gtest',
'env' : {